From 6224a4f4f02fc1240004341d6a97ce59b87d2c8a Mon Sep 17 00:00:00 2001 From: SparkSnail Date: Sat, 29 Dec 2018 16:49:04 +0800 Subject: [PATCH 01/54] add trigger (#544) --- azure-pipelines.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 24f0e62a6f..95546c8ef9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,3 +1,7 @@ +trigger: +- master +- dev-remote-ci + jobs: - job: 'Ubuntu_16_04' From cb83ac0f3371210010a929f2c8946d31b9b00183 Mon Sep 17 00:00:00 2001 From: fishyds Date: Sat, 29 Dec 2018 17:56:44 +0800 Subject: [PATCH 02/54] NNI logging architecture improvement (#539) * Removed unused log code, refactor to rename some class name in nni sdk and trial_tools * Fix the regression bug that loca/remote mode doesnt work --- .../common/clusterJobRestServer.ts | 37 +++++ .../pai/paiTrainingService.ts | 1 + src/sdk/pynni/nni/common.py | 3 + src/sdk/pynni/nni/platform/local.py | 22 ++- tools/nni_trial_tool/constants.py | 8 +- tools/nni_trial_tool/log_utils.py | 145 +++++++++++++++++- tools/nni_trial_tool/metrics_reader.py | 7 +- tools/nni_trial_tool/rest_utils.py | 4 +- tools/nni_trial_tool/trial_keeper.py | 23 ++- tools/nni_trial_tool/url_utils.py | 10 +- 10 files changed, 234 insertions(+), 26 deletions(-) diff --git a/src/nni_manager/training_service/common/clusterJobRestServer.ts b/src/nni_manager/training_service/common/clusterJobRestServer.ts index 057d57ae9e..797eb59827 100644 --- a/src/nni_manager/training_service/common/clusterJobRestServer.ts +++ b/src/nni_manager/training_service/common/clusterJobRestServer.ts @@ -23,8 +23,12 @@ import * as assert from 'assert'; import { Request, Response, Router } from 'express'; import * as bodyParser from 'body-parser'; import * as component from '../../common/component'; +import * as fs from 'fs' +import * as path from 'path' import { getBasePort, getExperimentId } from '../../common/experimentStartupInfo'; import { RestServer } from '../../common/restServer' +import { getLogDir } from '../../common/utils'; +import { Writable } from 'stream'; /** * Cluster Job Training service Rest server, provides rest API to support Cluster job metrics update @@ -33,6 +37,7 @@ import { RestServer } from '../../common/restServer' @component.Singleton export abstract class ClusterJobRestServer extends RestServer{ private readonly API_ROOT_URL: string = '/api/v1/nni-pai'; + private readonly NNI_METRICS_PATTERN: string = `NNISDK_MEb'(?.*?)'`; private readonly expId: string = getExperimentId(); @@ -88,6 +93,38 @@ export abstract class ClusterJobRestServer extends RestServer{ } }); + router.post(`/stdout/${this.expId}/:trialId`, (req: Request, res: Response) => { + const trialLogPath: string = path.join(getLogDir(), `trial_${req.params.trialId}.log`); + try { + let skipLogging: boolean = false; + if(req.body.tag === 'trial' && req.body.msg !== undefined) { + const metricsContent = req.body.msg.match(this.NNI_METRICS_PATTERN); + if(metricsContent && metricsContent.groups) { + this.handleTrialMetrics(req.params.trialId, [metricsContent.groups['metrics']]); + skipLogging = true; + } + } + + if(!skipLogging){ + // Construct write stream to write remote trial's log into local file + const writeStream: Writable = fs.createWriteStream(trialLogPath, { + flags: 'a+', + encoding: 'utf8', + autoClose: true + }); + + writeStream.write(req.body.msg + '\n'); + writeStream.end(); + } + res.send(); + } + catch(err) { + this.log.error(`json parse stdout data error: ${err}`); + res.status(500); + res.send(err.message); + } + }); + return router; } diff --git a/src/nni_manager/training_service/pai/paiTrainingService.ts b/src/nni_manager/training_service/pai/paiTrainingService.ts index f415876a63..c1ef8ccb60 100644 --- a/src/nni_manager/training_service/pai/paiTrainingService.ts +++ b/src/nni_manager/training_service/pai/paiTrainingService.ts @@ -92,6 +92,7 @@ class PAITrainingService implements TrainingService { public async run(): Promise { const restServer: PAIJobRestServer = component.get(PAIJobRestServer); await restServer.start(); + this.log.info(`PAI Training service rest server listening on: ${restServer.endPoint}`); while (!this.stopping) { await this.updatePaiToken(); diff --git a/src/sdk/pynni/nni/common.py b/src/sdk/pynni/nni/common.py index cb21efda64..d71241a7f5 100644 --- a/src/sdk/pynni/nni/common.py +++ b/src/sdk/pynni/nni/common.py @@ -44,10 +44,13 @@ def _load_env_args(): class _LoggerFileWrapper(TextIOBase): def __init__(self, logger_file): self.file = logger_file + self.orig_stdout = sys.stdout def write(self, s): if s != '\n': time = datetime.now().strftime(_time_format) + self.orig_stdout.write(s + '\n') + self.orig_stdout.flush() self.file.write('[{}] PRINT '.format(time) + s + '\n') self.file.flush() return len(s) diff --git a/src/sdk/pynni/nni/platform/local.py b/src/sdk/pynni/nni/platform/local.py index cf4df736d7..d2303b0aa9 100644 --- a/src/sdk/pynni/nni/platform/local.py +++ b/src/sdk/pynni/nni/platform/local.py @@ -34,8 +34,11 @@ _outputdir = os.environ['NNI_OUTPUT_DIR'] if not os.path.exists(_outputdir): os.makedirs(_outputdir) -_log_file_path = os.path.join(_outputdir, 'trial.log') -init_logger(_log_file_path) + +_nni_platform = os.environ['NNI_PLATFORM'] +if _nni_platform != 'pai': + _log_file_path = os.path.join(_outputdir, 'trial.log') + init_logger(_log_file_path) _multiphase = os.environ.get('MULTI_PHASE') @@ -74,11 +77,16 @@ def get_next_parameter(): return params def send_metric(string): - data = (string + '\n').encode('utf8') - assert len(data) < 1000000, 'Metric too long' - _metric_file.write(b'ME%06d%b' % (len(data), data)) - _metric_file.flush() - subprocess.run(['touch', _metric_file.name], check = True) + if _nni_platform == 'pai': + data = (string).encode('utf8') + assert len(data) < 1000000, 'Metric too long' + print('NNISDK_ME%s' % (data)) + else: + data = (string + '\n').encode('utf8') + assert len(data) < 1000000, 'Metric too long' + _metric_file.write(b'ME%06d%b' % (len(data), data)) + _metric_file.flush() + subprocess.run(['touch', _metric_file.name], check = True) def get_sequence_id(): return os.environ['NNI_TRIAL_SEQ_ID'] \ No newline at end of file diff --git a/tools/nni_trial_tool/constants.py b/tools/nni_trial_tool/constants.py index 3ae30a3a33..c1f18b8fea 100644 --- a/tools/nni_trial_tool/constants.py +++ b/tools/nni_trial_tool/constants.py @@ -34,4 +34,10 @@ STDERR_FULL_PATH = os.path.join(LOG_DIR, 'stderr') -UPDATE_METRICS_API = '/update-metrics' \ No newline at end of file +UPDATE_METRICS_API = '/update-metrics' + +STDOUT_API = '/stdout' +NNI_SYS_DIR = os.environ['NNI_SYS_DIR'] +NNI_TRIAL_JOB_ID = os.environ['NNI_TRIAL_JOB_ID'] +NNI_EXP_ID = os.environ['NNI_EXP_ID'] + diff --git a/tools/nni_trial_tool/log_utils.py b/tools/nni_trial_tool/log_utils.py index 55b1b7ed99..e4e63731d7 100644 --- a/tools/nni_trial_tool/log_utils.py +++ b/tools/nni_trial_tool/log_utils.py @@ -18,8 +18,23 @@ # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +import os +import sys +import json +import logging +import logging.handlers +import time +import threading + from datetime import datetime from enum import Enum, unique +from logging import StreamHandler + +from queue import Queue + +from .rest_utils import rest_get, rest_post, rest_put, rest_delete +from .constants import NNI_EXP_ID, NNI_TRIAL_JOB_ID, STDOUT_API +from .url_utils import gen_send_stdout_url @unique class LogType(Enum): @@ -29,7 +44,135 @@ class LogType(Enum): Error = 'ERROR' Critical = 'CRITICAL' +@unique +class StdOutputType(Enum): + Stdout = 'stdout', + Stderr = 'stderr' + def nni_log(log_type, log_message): '''Log message into stdout''' dt = datetime.now() - print('[{0}] {1} {2}'.format(dt, log_type.value, log_message)) \ No newline at end of file + print('[{0}] {1} {2}'.format(dt, log_type.value, log_message)) + +class NNIRestLogHanlder(StreamHandler): + def __init__(self, host, port, tag, std_output_type=StdOutputType.Stdout): + StreamHandler.__init__(self) + self.host = host + self.port = port + self.tag = tag + self.std_output_type = std_output_type + self.orig_stdout = sys.__stdout__ + self.orig_stderr = sys.__stderr__ + + def emit(self, record): + log_entry = {} + log_entry['tag'] = self.tag + log_entry['stdOutputType'] = self.std_output_type.name + log_entry['msg'] = self.format(record) + + try: + response = rest_post(gen_send_stdout_url(self.host, self.port), json.dumps(log_entry), 10, True) + except Exception as e: + self.orig_stderr.write(str(e) + '\n') + self.orig_stderr.flush() + +class RemoteLogger(object): + """ + NNI remote logger + """ + def __init__(self, syslog_host, syslog_port, tag, std_output_type, log_level=logging.INFO): + ''' + constructor + ''' + self.logger = logging.getLogger('nni_syslog_{}'.format(tag)) + self.log_level = log_level + self.logger.setLevel(self.log_level) + handler = NNIRestLogHanlder(syslog_host, syslog_port, tag) + self.logger.addHandler(handler) + if std_output_type == StdOutputType.Stdout: + self.orig_stdout = sys.__stdout__ + else: + self.orig_stdout = sys.__stderr__ + + def get_pipelog_reader(self): + ''' + Get pipe for remote logger + ''' + return PipeLogReader(self.logger, logging.INFO) + + def write(self, buf): + ''' + Write buffer data into logger/stdout + ''' + for line in buf.rstrip().splitlines(): + self.orig_stdout.write(line.rstrip() + '\n') + self.orig_stdout.flush() + try: + self.logger.log(self.log_level, line.rstrip()) + except Exception as e: + pass + +class PipeLogReader(threading.Thread): + """ + The reader thread reads log data from pipe + """ + def __init__(self, logger, log_level=logging.INFO): + """Setup the object with a logger and a loglevel + and start the thread + """ + threading.Thread.__init__(self) + self.queue = Queue() + self.logger = logger + self.daemon = False + self.log_level = log_level + self.fdRead, self.fdWrite = os.pipe() + self.pipeReader = os.fdopen(self.fdRead) + self.orig_stdout = sys.__stdout__ + self._is_read_completed = False + + def _populateQueue(stream, queue): + ''' + Collect lines from 'stream' and put them in 'quque'. + ''' + time.sleep(5) + while True: + try: + line = self.queue.get(True, 5) + try: + self.logger.log(self.log_level, line.rstrip()) + self.orig_stdout.write(line.rstrip() + '\n') + self.orig_stdout.flush() + except Exception as e: + pass + except Exception as e: + self._is_read_completed = True + break + + self.pip_log_reader_thread = threading.Thread(target = _populateQueue, + args = (self.pipeReader, self.queue)) + self.pip_log_reader_thread.daemon = True + self.start() + self.pip_log_reader_thread.start() + + def fileno(self): + """Return the write file descriptor of the pipe + """ + return self.fdWrite + + def run(self): + """Run the thread, logging everything. + """ + for line in iter(self.pipeReader.readline, ''): + self.queue.put(line) + self.pipeReader.close() + + def close(self): + """Close the write end of the pipe. + """ + os.close(self.fdWrite) + + @property + def is_read_completed(self): + """Return if read is completed + """ + return self._is_read_completed \ No newline at end of file diff --git a/tools/nni_trial_tool/metrics_reader.py b/tools/nni_trial_tool/metrics_reader.py index 6827dbd033..9d7f24b96d 100644 --- a/tools/nni_trial_tool/metrics_reader.py +++ b/tools/nni_trial_tool/metrics_reader.py @@ -25,14 +25,11 @@ import requests from datetime import datetime -from .constants import BASE_URL +from .constants import BASE_URL, NNI_EXP_ID, NNI_TRIAL_JOB_ID, NNI_SYS_DIR from .log_utils import LogType, nni_log from .rest_utils import rest_get, rest_post, rest_put, rest_delete from .url_utils import gen_update_metrics_url -NNI_SYS_DIR = os.environ['NNI_SYS_DIR'] -NNI_TRIAL_JOB_ID = os.environ['NNI_TRIAL_JOB_ID'] -NNI_EXP_ID = os.environ['NNI_EXP_ID'] LEN_FIELD_SIZE = 6 MAGIC = 'ME' @@ -116,7 +113,7 @@ def read_experiment_metrics(nnimanager_ip, nnimanager_port): result['metrics'] = reader.read_trial_metrics() if len(result['metrics']) > 0: nni_log(LogType.Info, 'Result metrics is {}'.format(json.dumps(result))) - response = rest_post(gen_update_metrics_url(BASE_URL.format(nnimanager_ip), nnimanager_port, NNI_EXP_ID, NNI_TRIAL_JOB_ID), json.dumps(result), 10) + response = rest_post(gen_update_metrics_url(nnimanager_ip, nnimanager_port), json.dumps(result), 10) nni_log(LogType.Info,'Report metrics to NNI manager completed, http response code is {}'.format(response.status_code)) except Exception as e: #Error logging diff --git a/tools/nni_trial_tool/rest_utils.py b/tools/nni_trial_tool/rest_utils.py index d6abf0905e..71eb353614 100644 --- a/tools/nni_trial_tool/rest_utils.py +++ b/tools/nni_trial_tool/rest_utils.py @@ -31,13 +31,15 @@ def rest_get(url, timeout): print('Get exception {0} when sending http get to url {1}'.format(str(e), url)) return None -def rest_post(url, data, timeout): +def rest_post(url, data, timeout, rethrow_exception=False): '''Call rest post method''' try: response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ data=data, timeout=timeout) return response except Exception as e: + if rethrow_exception is True: + raise print('Get exception {0} when sending http post to url {1}'.format(str(e), url)) return None diff --git a/tools/nni_trial_tool/trial_keeper.py b/tools/nni_trial_tool/trial_keeper.py index c4adeac434..29753585b9 100644 --- a/tools/nni_trial_tool/trial_keeper.py +++ b/tools/nni_trial_tool/trial_keeper.py @@ -25,11 +25,13 @@ import logging import shlex import re +import sys +import select from pyhdfs import HdfsClient from .constants import HOME_DIR, LOG_DIR, NNI_PLATFORM, STDOUT_FULL_PATH, STDERR_FULL_PATH from .hdfsClientUtility import copyDirectoryToHdfs, copyHdfsDirectoryToLocal -from .log_utils import LogType, nni_log +from .log_utils import LogType, nni_log, RemoteLogger, PipeLogReader, StdOutputType from .metrics_reader import read_experiment_metrics logger = logging.getLogger('trial_keeper') @@ -42,6 +44,11 @@ def main_loop(args): stdout_file = open(STDOUT_FULL_PATH, 'a+') stderr_file = open(STDERR_FULL_PATH, 'a+') + + trial_keeper_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial_keeper', StdOutputType.Stdout) + # redirect trial keeper's stdout and stderr to syslog + trial_syslogger_stdout = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial', StdOutputType.Stdout) + sys.stdout = sys.stderr = trial_keeper_syslogger if args.pai_hdfs_host is not None and args.nni_hdfs_exp_dir is not None: try: @@ -52,15 +59,15 @@ def main_loop(args): copyHdfsDirectoryToLocal(args.nni_hdfs_exp_dir, os.getcwd(), hdfs_client) # Notice: We don't appoint env, which means subprocess wil inherit current environment and that is expected behavior - process = Popen(args.trial_command, shell = True, stdout = stdout_file, stderr = stderr_file) + log_pipe_stdout = trial_syslogger_stdout.get_pipelog_reader() + process = Popen(args.trial_command, shell = True, stdout = log_pipe_stdout, stderr = log_pipe_stdout) nni_log(LogType.Info, 'Trial keeper spawns a subprocess (pid {0}) to run command: {1}'.format(process.pid, shlex.split(args.trial_command))) - + while True: retCode = process.poll() ## Read experiment metrics, to avoid missing metrics - read_experiment_metrics(args.nnimanager_ip, args.nnimanager_port) - - if retCode is not None: + #read_experiment_metrics(args.nnimanager_ip, args.nnimanager_port) + if retCode is not None and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if args.pai_hdfs_output_dir is not None: # Copy local directory to hdfs for OpenPAI @@ -102,8 +109,8 @@ def trial_keeper_help_info(*args): main_loop(args) except SystemExit as se: nni_log(LogType.Info, 'NNI trial keeper exit with code {}'.format(se.code)) - sys.exit(se.code) + os._exit(se.code) except Exception as e: nni_log(LogType.Error, 'Exit trial keeper with code 1 because Exception: {} is catched'.format(str(e))) - sys.exit(1) + os._exit(1) diff --git a/tools/nni_trial_tool/url_utils.py b/tools/nni_trial_tool/url_utils.py index 69ce14ecb2..d167098d35 100644 --- a/tools/nni_trial_tool/url_utils.py +++ b/tools/nni_trial_tool/url_utils.py @@ -18,8 +18,12 @@ # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -from .constants import API_ROOT_URL, UPDATE_METRICS_API +from .constants import API_ROOT_URL, BASE_URL, UPDATE_METRICS_API, STDOUT_API, NNI_TRIAL_JOB_ID, NNI_EXP_ID -def gen_update_metrics_url(base_url, port, exp_id, trial_job_id): +def gen_update_metrics_url(ip, port): '''Generate update trial metrics url''' - return '{0}:{1}{2}{3}/{4}/:{5}'.format(base_url, port, API_ROOT_URL, UPDATE_METRICS_API, exp_id, trial_job_id) \ No newline at end of file + return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, UPDATE_METRICS_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) + +def gen_send_stdout_url(ip, port): + '''Generate send stdout url''' + return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) \ No newline at end of file From ce1bc481fddd7e3087b9e11415f46b320df5f12f Mon Sep 17 00:00:00 2001 From: Lijiao <35484733+lvybriage@users.noreply.github.com> Date: Sat, 29 Dec 2018 18:23:26 +0800 Subject: [PATCH 03/54] [WebUI] Fix issue#517 & issue#459 (#524) * [WebUI] Fix issue#517 & issue#459 * update --- src/webui/src/components/TrialsDetail.tsx | 208 ++++++++++++++---- .../src/components/trial-detail/TableList.tsx | 12 +- src/webui/src/static/style/search.scss | 17 +- src/webui/src/static/style/trialsDetail.scss | 4 + 4 files changed, 181 insertions(+), 60 deletions(-) diff --git a/src/webui/src/components/TrialsDetail.tsx b/src/webui/src/components/TrialsDetail.tsx index cbfe32a4ee..7b5d8760dc 100644 --- a/src/webui/src/components/TrialsDetail.tsx +++ b/src/webui/src/components/TrialsDetail.tsx @@ -1,8 +1,8 @@ import * as React from 'react'; import axios from 'axios'; import { MANAGER_IP } from '../static/const'; -import { Row, Col, Button, Tabs, Input } from 'antd'; -const Search = Input.Search; +import { Row, Col, Tabs, Input, Select } from 'antd'; +const Option = Select.Option; import { TableObj, Parameters, DetailAccurPoint, TooltipForAccuracy } from '../static/interface'; import { getFinalResult } from '../static/function'; import Accuracy from './overview/Accuracy'; @@ -17,8 +17,10 @@ interface TrialDetailState { accSource: object; accNodata: string; tableListSource: Array; - tableBaseSource: Array; + searchResultSource: Array; + isHasSearch: boolean; experimentStatus: string; + entriesTable: number; } class TrialsDetail extends React.Component<{}, TrialDetailState> { @@ -26,6 +28,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { public _isMounted = false; public interAccuracy = 0; public interTableList = 1; + public interAllTableList = 2; constructor(props: {}) { super(props); @@ -34,8 +37,10 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { accSource: {}, accNodata: '', tableListSource: [], - tableBaseSource: [], - experimentStatus: '' + searchResultSource: [], + experimentStatus: '', + entriesTable: 20, + isHasSearch: false }; } // trial accuracy graph @@ -122,6 +127,91 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } drawTableList = () => { + this.isOffIntervals(); + axios.get(`${MANAGER_IP}/trial-jobs`) + .then(res => { + if (res.status === 200) { + const trialJobs = res.data; + const trialTable: Array = []; + Object.keys(trialJobs).map(item => { + // only succeeded trials have finalMetricData + let desc: Parameters = { + parameters: {} + }; + let duration = 0; + const id = trialJobs[item].id !== undefined + ? trialJobs[item].id + : ''; + const status = trialJobs[item].status !== undefined + ? trialJobs[item].status + : ''; + const begin = trialJobs[item].startTime; + const end = trialJobs[item].endTime; + if (begin) { + if (end) { + duration = (end - begin) / 1000; + } else { + duration = (new Date().getTime() - begin) / 1000; + } + } + if (trialJobs[item].hyperParameters !== undefined) { + const getPara = JSON.parse(trialJobs[item].hyperParameters[0]).parameters; + if (typeof getPara === 'string') { + desc.parameters = JSON.parse(getPara); + } else { + desc.parameters = getPara; + } + } else { + desc.parameters = { error: 'This trial\'s parameters are not available.' }; + } + if (trialJobs[item].logPath !== undefined) { + desc.logPath = trialJobs[item].logPath; + } + const acc = getFinalResult(trialJobs[item].finalMetricData); + trialTable.push({ + key: trialTable.length, + sequenceId: trialJobs[item].sequenceId, + id: id, + status: status, + duration: duration, + acc: acc, + description: desc + }); + }); + // search part data + const { searchResultSource } = this.state; + if (searchResultSource.length !== 0) { + const temp: Array = []; + Object.keys(searchResultSource).map(index => { + temp.push(searchResultSource[index].id); + }); + const searchResultList: Array = []; + for (let i = 0; i < temp.length; i++) { + Object.keys(trialTable).map(key => { + const item = trialTable[key]; + if (item.id === temp[i]) { + searchResultList.push(item); + } + }); + } + + if (this._isMounted) { + this.setState(() => ({ + searchResultSource: searchResultList + })); + } + } + if (this._isMounted) { + this.setState(() => ({ + tableListSource: trialTable + })); + } + } + }); + } + + // update all data in table + drawAllTableList = () => { this.isOffIntervals(); axios.get(`${MANAGER_IP}/trial-jobs`) .then(res => { @@ -176,7 +266,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { if (this._isMounted) { this.setState(() => ({ tableListSource: trialTable, - tableBaseSource: trialTable + searchResultSource: trialTable })); } } @@ -209,31 +299,29 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } } - // search a specific trial by trial No. - searchTrial = (value: string) => { - window.clearInterval(this.interTableList); - const { tableBaseSource } = this.state; - const searchResultList: Array = []; - Object.keys(tableBaseSource).map(key => { - const item = tableBaseSource[key]; - if (item.sequenceId.toString() === value || item.id.includes(value)) { - searchResultList.push(item); + // search a trial by trial No. & trial id + searchTrial = (event: React.ChangeEvent) => { + const targetValue = event.target.value; + if (targetValue === '' || targetValue === ' ') { + this.drawAllTableList(); + this.interAllTableList = window.setInterval(this.drawAllTableList, 10000); + } else { + window.clearInterval(this.interAllTableList); + const { tableListSource } = this.state; + const searchResultList: Array = []; + Object.keys(tableListSource).map(key => { + const item = tableListSource[key]; + if (item.sequenceId.toString() === targetValue || item.id.includes(targetValue)) { + searchResultList.push(item); + } + }); + if (this._isMounted) { + this.setState(() => ({ + searchResultSource: searchResultList, + isHasSearch: true + })); } - }); - this.setState(() => ({ - tableListSource: searchResultList - })); - } - - // reset btn click: rerender table - resetRenderTable = () => { - - const searchInput = document.getElementById('searchTrial') as HTMLInputElement; - if (searchInput !== null) { - searchInput.value = ''; } - this.drawTableList(); - this.interTableList = window.setInterval(this.drawTableList, 10000); } isOffIntervals = () => { @@ -250,6 +338,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { window.clearInterval(this.interTableList); window.clearInterval(Duration.intervalDuration); window.clearInterval(Para.intervalIDPara); + window.clearInterval(this.interAllTableList); break; default: } @@ -257,13 +346,31 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { }); } + handleEntriesSelect = (value: string) => { + switch (value) { + case '20': + this.setState(() => ({ entriesTable: 20 })); + break; + case '50': + this.setState(() => ({ entriesTable: 50 })); + break; + case '100': + this.setState(() => ({ entriesTable: 100 })); + break; + case 'all': + this.setState(() => ({ entriesTable: 100000 })); + break; + default: + } + } + componentDidMount() { this._isMounted = true; this.drawTableList(); this.drawPointGraph(); - this.interAccuracy = window.setInterval(this.drawPointGraph, 10000); this.interTableList = window.setInterval(this.drawTableList, 10000); + this.interAccuracy = window.setInterval(this.drawPointGraph, 10000); } componentWillUnmount() { @@ -273,11 +380,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } render() { - const { - accSource, accNodata, - tableListSource - } = this.state; - + const { accSource, accNodata, tableListSource, entriesTable, searchResultSource, isHasSearch } = this.state; const titleOfacc = ( ); @@ -309,29 +412,38 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { {/* trial table list */} + - + show + + entries - - + {/* Search: */} + this.searchTrial(value)} - style={{ width: 200 }} - id="searchTrial" + onChange={this.searchTrial} + style={{ width: 200, marginLeft: 6 }} /> - ); diff --git a/src/webui/src/components/trial-detail/TableList.tsx b/src/webui/src/components/trial-detail/TableList.tsx index 9d5d07bc52..827259ab81 100644 --- a/src/webui/src/components/trial-detail/TableList.tsx +++ b/src/webui/src/components/trial-detail/TableList.tsx @@ -21,8 +21,11 @@ echarts.registerTheme('my_theme', { }); interface TableListProps { + entries: number; tableSource: Array; + searchResult: Array; updateList: Function; + isHasSearch: boolean; } interface TableListState { @@ -150,7 +153,7 @@ class TableList extends React.Component { render() { - const { tableSource } = this.props; + const { entries, tableSource, searchResult, isHasSearch } = this.props; const { intermediateOption, modalVisible } = this.state; let bgColor = ''; const trialJob: Array = []; @@ -160,6 +163,7 @@ class TableList extends React.Component { value: item }); }); + const columns = [{ title: 'Trial No.', dataIndex: 'sequenceId', @@ -328,7 +332,7 @@ class TableList extends React.Component { 'This trial's parameters are not available.' } - + ); }; @@ -339,9 +343,9 @@ class TableList extends React.Component { Date: Wed, 2 Jan 2019 09:47:00 +0800 Subject: [PATCH 04/54] [Logging architecture refactor] Remove unused metrics related code in nni trial_tools, support kubeflow mode for logging architecture refactor (#551) * Remove unused metrics related code in nni trial_tools, support kubeflow mode for logging architecture refactor --- src/sdk/pynni/nni/platform/local.py | 4 +- tools/nni_trial_tool/constants.py | 5 +- tools/nni_trial_tool/metrics_reader.py | 122 ------------------------- tools/nni_trial_tool/trial_keeper.py | 4 +- tools/nni_trial_tool/url_utils.py | 6 +- 5 files changed, 5 insertions(+), 136 deletions(-) delete mode 100644 tools/nni_trial_tool/metrics_reader.py diff --git a/src/sdk/pynni/nni/platform/local.py b/src/sdk/pynni/nni/platform/local.py index d2303b0aa9..afc7a878b0 100644 --- a/src/sdk/pynni/nni/platform/local.py +++ b/src/sdk/pynni/nni/platform/local.py @@ -36,7 +36,7 @@ os.makedirs(_outputdir) _nni_platform = os.environ['NNI_PLATFORM'] -if _nni_platform != 'pai': +if _nni_platform not in ['pai', 'kubeflow']: _log_file_path = os.path.join(_outputdir, 'trial.log') init_logger(_log_file_path) @@ -77,7 +77,7 @@ def get_next_parameter(): return params def send_metric(string): - if _nni_platform == 'pai': + if _nni_platform in ['pai', 'kubeflow']: data = (string).encode('utf8') assert len(data) < 1000000, 'Metric too long' print('NNISDK_ME%s' % (data)) diff --git a/tools/nni_trial_tool/constants.py b/tools/nni_trial_tool/constants.py index c1f18b8fea..f554aedb26 100644 --- a/tools/nni_trial_tool/constants.py +++ b/tools/nni_trial_tool/constants.py @@ -34,10 +34,7 @@ STDERR_FULL_PATH = os.path.join(LOG_DIR, 'stderr') -UPDATE_METRICS_API = '/update-metrics' - STDOUT_API = '/stdout' NNI_SYS_DIR = os.environ['NNI_SYS_DIR'] NNI_TRIAL_JOB_ID = os.environ['NNI_TRIAL_JOB_ID'] -NNI_EXP_ID = os.environ['NNI_EXP_ID'] - +NNI_EXP_ID = os.environ['NNI_EXP_ID'] \ No newline at end of file diff --git a/tools/nni_trial_tool/metrics_reader.py b/tools/nni_trial_tool/metrics_reader.py deleted file mode 100644 index 9d7f24b96d..0000000000 --- a/tools/nni_trial_tool/metrics_reader.py +++ /dev/null @@ -1,122 +0,0 @@ -# ============================================================================================================================== # -# Copyright (c) Microsoft Corporation -# All rights reserved. -# -# MIT License -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the "Software"), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -# to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# ============================================================================================================================== # - -import argparse -import errno -import json -import os -import re -import requests - -from datetime import datetime -from .constants import BASE_URL, NNI_EXP_ID, NNI_TRIAL_JOB_ID, NNI_SYS_DIR -from .log_utils import LogType, nni_log -from .rest_utils import rest_get, rest_post, rest_put, rest_delete -from .url_utils import gen_update_metrics_url - -LEN_FIELD_SIZE = 6 -MAGIC = 'ME' - -class TrialMetricsReader(): - ''' - Read metrics data from a trial job - ''' - def __init__(self): - metrics_base_dir = os.path.join(NNI_SYS_DIR, '.nni') - self.offset_filename = os.path.join(metrics_base_dir, 'metrics_offset') - self.metrics_filename = os.path.join(metrics_base_dir, 'metrics') - if not os.path.exists(metrics_base_dir): - os.makedirs(metrics_base_dir) - - def _metrics_file_is_empty(self): - if not os.path.isfile(self.metrics_filename): - return True - statinfo = os.stat(self.metrics_filename) - return statinfo.st_size == 0 - - def _get_offset(self): - offset = 0 - if os.path.isfile(self.offset_filename): - with open(self.offset_filename, 'r') as f: - offset = int(f.readline()) - return offset - - def _write_offset(self, offset): - statinfo = os.stat(self.metrics_filename) - if offset < 0 or offset > statinfo.st_size: - raise ValueError('offset value is invalid: {}'.format(offset)) - - with open(self.offset_filename, 'w') as f: - f.write(str(offset)+'\n') - - def _read_all_available_records(self, offset): - new_offset = offset - metrics = [] - with open(self.metrics_filename, 'r') as f: - f.seek(offset) - while True: - magic_string = f.read(len(MAGIC)) - # empty data means EOF - if not magic_string: - break - nni_log(LogType.Info, 'Metrics file offset is {}'.format(offset)) - strdatalen = f.read(LEN_FIELD_SIZE) - # empty data means EOF - if not strdatalen: - raise ValueError("metric file {} format error after offset: {}.".format(self.metrics_filename, new_offset)) - datalen = int(strdatalen) - data = f.read(datalen) - - if datalen > 0 and len(data) == datalen: - nni_log(LogType.Info, 'data is \'{}\''.format(data)) - new_offset = f.tell() - metrics.append(data) - else: - raise ValueError("metric file {} format error after offset: {}.".format(self.metrics_filename, new_offset)) - self._write_offset(new_offset) - return metrics - - def read_trial_metrics(self): - ''' - Read available metrics data for a trial - ''' - if self._metrics_file_is_empty(): - return [] - - offset = self._get_offset() - return self._read_all_available_records(offset) - -def read_experiment_metrics(nnimanager_ip, nnimanager_port): - ''' - Read metrics data for specified trial jobs - ''' - result = {} - try: - reader = TrialMetricsReader() - result['jobId'] = NNI_TRIAL_JOB_ID - result['metrics'] = reader.read_trial_metrics() - if len(result['metrics']) > 0: - nni_log(LogType.Info, 'Result metrics is {}'.format(json.dumps(result))) - response = rest_post(gen_update_metrics_url(nnimanager_ip, nnimanager_port), json.dumps(result), 10) - nni_log(LogType.Info,'Report metrics to NNI manager completed, http response code is {}'.format(response.status_code)) - except Exception as e: - #Error logging - nni_log(LogType.Error, 'Error when reading metrics data: ' + str(e)) - - return json.dumps(result) \ No newline at end of file diff --git a/tools/nni_trial_tool/trial_keeper.py b/tools/nni_trial_tool/trial_keeper.py index 29753585b9..079d7b58f3 100644 --- a/tools/nni_trial_tool/trial_keeper.py +++ b/tools/nni_trial_tool/trial_keeper.py @@ -32,7 +32,6 @@ from .constants import HOME_DIR, LOG_DIR, NNI_PLATFORM, STDOUT_FULL_PATH, STDERR_FULL_PATH from .hdfsClientUtility import copyDirectoryToHdfs, copyHdfsDirectoryToLocal from .log_utils import LogType, nni_log, RemoteLogger, PipeLogReader, StdOutputType -from .metrics_reader import read_experiment_metrics logger = logging.getLogger('trial_keeper') @@ -65,8 +64,7 @@ def main_loop(args): while True: retCode = process.poll() - ## Read experiment metrics, to avoid missing metrics - #read_experiment_metrics(args.nnimanager_ip, args.nnimanager_port) + # child worker process exits and all stdout data is read if retCode is not None and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if args.pai_hdfs_output_dir is not None: diff --git a/tools/nni_trial_tool/url_utils.py b/tools/nni_trial_tool/url_utils.py index d167098d35..47284e1ce1 100644 --- a/tools/nni_trial_tool/url_utils.py +++ b/tools/nni_trial_tool/url_utils.py @@ -18,11 +18,7 @@ # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -from .constants import API_ROOT_URL, BASE_URL, UPDATE_METRICS_API, STDOUT_API, NNI_TRIAL_JOB_ID, NNI_EXP_ID - -def gen_update_metrics_url(ip, port): - '''Generate update trial metrics url''' - return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, UPDATE_METRICS_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) +from .constants import API_ROOT_URL, BASE_URL, STDOUT_API, NNI_TRIAL_JOB_ID, NNI_EXP_ID def gen_send_stdout_url(ip, port): '''Generate send stdout url''' From d5f808b09c8b38f8910b5f19f5b1a16755ae3da8 Mon Sep 17 00:00:00 2001 From: Chi Song Date: Thu, 3 Jan 2019 10:13:03 +0800 Subject: [PATCH 05/54] Doc typo and format fixes (#560) * fix incorrect document * fix doc format and typo --- docs/ExperimentConfig.md | 87 ++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/docs/ExperimentConfig.md b/docs/ExperimentConfig.md index 33a5f96633..8d98f413a9 100644 --- a/docs/ExperimentConfig.md +++ b/docs/ExperimentConfig.md @@ -5,6 +5,7 @@ The config file is written in yaml format, and need to be written correctly. This document describes the rule to write config file, and will provide some examples and templates. ## Template * __light weight(without Annotation and Assessor)__ + ``` authorName: experimentName: @@ -34,7 +35,9 @@ machineList: username: passwd: ``` + * __Use Assessor__ + ``` authorName: experimentName: @@ -71,7 +74,9 @@ machineList: username: passwd: ``` + * __Use Annotation__ + ``` authorName: experimentName: @@ -107,6 +112,7 @@ machineList: username: passwd: ``` + ## Configuration * __authorName__ * Description @@ -123,9 +129,9 @@ machineList: * __trialConcurrency__ * Description - __trialConcurrency__ specifies the max num of trial jobs run simultaneously. + __trialConcurrency__ specifies the max num of trial jobs run simultaneously. - Note: if trialGpuNum is bigger than the free gpu numbers, and the trial jobs running simultaneously can not reach trialConcurrency number, some trial jobs will be put into a queue to wait for gpu allocation. + Note: if trialGpuNum is bigger than the free gpu numbers, and the trial jobs running simultaneously can not reach trialConcurrency number, some trial jobs will be put into a queue to wait for gpu allocation. * __maxExecDuration__ * Description @@ -155,22 +161,22 @@ machineList: * __searchSpacePath__ * Description - __searchSpacePath__ specifies the path of search space file, which should be a valid path in the local linux machine. + __searchSpacePath__ specifies the path of search space file, which should be a valid path in the local linux machine. - Note: if set useAnnotation=True, the searchSpacePath field should be removed. + Note: if set useAnnotation=True, the searchSpacePath field should be removed. * __useAnnotation__ * Description __useAnnotation__ use annotation to analysis trial code and generate search space. - Note: if set useAnnotation=True, the searchSpacePath field should be removed. + Note: if set useAnnotation=True, the searchSpacePath field should be removed. * __nniManagerIp__ * Description __nniManagerIp__ set the IP address of the machine on which nni manager process runs. This field is optional, and if it's not set, eth0 device IP will be used instead. - Note: run ifconfig on NNI manager's machine to check if eth0 device exists. If not, we recommend to set nnimanagerIp explicitly. + Note: run ifconfig on NNI manager's machine to check if eth0 device exists. If not, we recommend to set nnimanagerIp explicitly. * __tuner__ @@ -181,68 +187,68 @@ machineList: * __builtinTunerName__ __builtinTunerName__ specifies the name of system tuner, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__, __BatchTuner__, __GridSearch__} - * __classArgs__ + * __classArgs__ __classArgs__ specifies the arguments of tuner algorithm. If the __builtinTunerName__ is in {__TPE__, __Random__, __Anneal__, __Evolution__}, user should set __optimize_mode__. * __codeDir__, __classFileName__, __className__ and __classArgs__ - * __codeDir__ + * __codeDir__ - __codeDir__ specifies the directory of tuner code. - * __classFileName__ + __codeDir__ specifies the directory of tuner code. + * __classFileName__ - __classFileName__ specifies the name of tuner file. + __classFileName__ specifies the name of tuner file. * __className__ - __className__ specifies the name of tuner class. + __className__ specifies the name of tuner class. * __classArgs__ - __classArgs__ specifies the arguments of tuner algorithm. + __classArgs__ specifies the arguments of tuner algorithm. * __gpuNum__ - __gpuNum__ specifies the gpu number to run the tuner process. The value of this field should be a positive number. + __gpuNum__ specifies the gpu number to run the tuner process. The value of this field should be a positive number. - Note: users could only specify one way to set tuner, for example, set {tunerName, optimizationMode} or {tunerCommand, tunerCwd}, and could not set them both. + Note: users could only specify one way to set tuner, for example, set {tunerName, optimizationMode} or {tunerCommand, tunerCwd}, and could not set them both. * __assessor__ * Description - __assessor__ specifies the assessor algorithm to run an experiment, there are two kinds of ways to set assessor. One way is to use assessor provided by nni sdk, users need to set __builtinAssessorName__ and __classArgs__. Another way is to use users' own tuner file, and need to set __codeDirectory__, __classFileName__, __className__ and __classArgs__. + __assessor__ specifies the assessor algorithm to run an experiment, there are two kinds of ways to set assessor. One way is to use assessor provided by nni sdk, users need to set __builtinAssessorName__ and __classArgs__. Another way is to use users' own assessor file, and need to set __codeDirectory__, __classFileName__, __className__ and __classArgs__. * __builtinAssessorName__ and __classArgs__ * __builtinAssessorName__ - __builtinAssessorName__ specifies the name of system assessor, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__} - * __classArgs__ - - __classArgs__ specifies the arguments of tuner algorithm + __builtinAssessorName__ specifies the name of system assessor, nni sdk provides one kind of assessor {__Medianstop__} + * __classArgs__ + + __classArgs__ specifies the arguments of assessor algorithm * __codeDir__, __classFileName__, __className__ and __classArgs__ - * __codeDir__ + * __codeDir__ - __codeDir__ specifies the directory of tuner code. - * __classFileName__ + __codeDir__ specifies the directory of assessor code. + * __classFileName__ - __classFileName__ specifies the name of tuner file. - * __className__ + __classFileName__ specifies the name of assessor file. + * __className__ - __className__ specifies the name of tuner class. - * __classArgs__ + __className__ specifies the name of assessor class. + * __classArgs__ - __classArgs__ specifies the arguments of tuner algorithm. + __classArgs__ specifies the arguments of assessor algorithm. * __gpuNum__ - __gpuNum__ specifies the gpu number to run the assessor process. The value of this field should be a positive number. + __gpuNum__ specifies the gpu number to run the assessor process. The value of this field should be a positive number. - Note: users' could only specify one way to set assessor, for example,set {assessorName, optimizationMode} or {assessorCommand, assessorCwd}, and users could not set them both.If users do not want to use assessor, assessor fileld should leave to empty. + Note: users' could only specify one way to set assessor, for example,set {assessorName, optimizationMode} or {assessorCommand, assessorCwd}, and users could not set them both.If users do not want to use assessor, assessor fileld should leave to empty. * __trial(local, remote)__ * __command__ __command__ specifies the command to run trial process. * __codeDir__ - __codeDir__ specifies the directory of your own trial file. + __codeDir__ specifies the directory of your own trial file. * __gpuNum__ - __gpuNum__ specifies the num of gpu to run the trial process. Default value is 0. + __gpuNum__ specifies the num of gpu to run the trial process. Default value is 0. * __trial(pai)__ * __command__ @@ -250,7 +256,7 @@ machineList: __command__ specifies the command to run trial process. * __codeDir__ - __codeDir__ specifies the directory of the own trial file. + __codeDir__ specifies the directory of the own trial file. * __gpuNum__ __gpuNum__ specifies the num of gpu to run the trial process. Default value is 0. @@ -306,7 +312,7 @@ machineList: * __image__ - __iamge__ set the image to be used in __ps__. + __image__ set the image to be used in __ps__. * __worker__ @@ -333,7 +339,7 @@ machineList: * __image__ - __iamge__ set the image to be used in __worker__. + __image__ set the image to be used in __worker__. @@ -347,7 +353,7 @@ machineList: __port__ is the ssh port to be used to connect machine. - Note: if users set port empty, the default value will be 22. + Note: if users set port empty, the default value will be 22. * __username__ __username__ is the account of remote machine. @@ -359,7 +365,7 @@ machineList: If users use ssh key to login remote machine, could set __sshKeyPath__ in config file. __sshKeyPath__ is the path of ssh key file, which should be valid. - Note: if users set passwd and sshKeyPath simultaneously, nni will try passwd. + Note: if users set passwd and sshKeyPath simultaneously, nni will try passwd. * __passphrase__ @@ -426,6 +432,7 @@ machineList: * __local mode__ If users want to run trial jobs in local machine, and use annotation to generate search space, could use the following config: + ``` authorName: test experimentName: test_experiment @@ -450,6 +457,7 @@ trial: ``` Could add assessor configuration in config file if set assessor. + ``` authorName: test experimentName: test_experiment @@ -482,6 +490,7 @@ trial: ``` Or you could specify your own tuner and assessor file as following: + ``` authorName: test experimentName: test_experiment @@ -518,6 +527,7 @@ trial: * __remote mode__ If run trial jobs in remote machine, users could specify the remote mahcine information as fllowing format: + ``` authorName: test experimentName: test_experiment @@ -596,7 +606,6 @@ paiConfig: passWord: test #The host of restful server of pai host: 10.10.10.10 - ``` * __kubeflow mode__ @@ -635,7 +644,9 @@ kubeflowConfig: server: 10.10.10.10 path: /var/nfs/general ``` + kubeflow use azure storage + ``` authorName: default experimentName: example_mni From 1fec96c90c579c5e54e5b0eebac374e24084aa6d Mon Sep 17 00:00:00 2001 From: QuanluZhang Date: Thu, 3 Jan 2019 12:23:16 +0800 Subject: [PATCH 06/54] fix state transition (#504) --- src/nni_manager/common/manager.ts | 2 +- src/nni_manager/core/nnimanager.ts | 32 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/src/nni_manager/common/manager.ts b/src/nni_manager/common/manager.ts index 6c44f36c5f..4ac3d9b6b7 100644 --- a/src/nni_manager/common/manager.ts +++ b/src/nni_manager/common/manager.ts @@ -85,7 +85,7 @@ interface TrialJobStatistics { } interface NNIManagerStatus { - status: 'INITIALIZED' | 'RUNNING' | 'ERROR' | 'STOPPING' | 'STOPPED' | 'DONE' | 'NO_MORE_TRIAL'; + status: 'INITIALIZED' | 'RUNNING' | 'ERROR' | 'STOPPING' | 'STOPPED' | 'DONE' | 'NO_MORE_TRIAL' | 'TUNER_NO_MORE_TRIAL'; errors: string[]; } diff --git a/src/nni_manager/core/nnimanager.ts b/src/nni_manager/core/nnimanager.ts index 02afcd2418..462c67ae93 100644 --- a/src/nni_manager/core/nnimanager.ts +++ b/src/nni_manager/core/nnimanager.ts @@ -425,14 +425,10 @@ class NNIManager implements Manager { throw new Error('Error: tuner has not been setup'); } let allFinishedTrialJobNum: number = 0; + let waitSubmittedToFinish: number; while (this.status.status !== 'STOPPING' && this.status.status !== 'STOPPED') { const finishedTrialJobNum: number = await this.requestTrialJobsStatus(); - allFinishedTrialJobNum += finishedTrialJobNum; - if (allFinishedTrialJobNum >= this.experimentProfile.params.maxTrialNum) { - // write this log for travis CI - this.log.info('Experiment done.'); - } // requestTrialNum is the number of trials that will be requested from tuner. // If trialConcurrency does not change, requestTrialNum equals finishedTrialJobNum. @@ -467,21 +463,29 @@ class NNIManager implements Manager { // as still running. DONE could be transfered from RUNNING or NO_MORE_TRIAL. assert(this.status.status === 'RUNNING' || this.status.status === 'DONE' || - this.status.status === 'NO_MORE_TRIAL'); + this.status.status === 'NO_MORE_TRIAL' || + this.status.status === 'TUNER_NO_MORE_TRIAL'); if (this.experimentProfile.execDuration > this.experimentProfile.params.maxExecDuration || this.currSubmittedTrialNum >= this.experimentProfile.params.maxTrialNum) { - if (this.status.status === 'RUNNING' || - this.status.status === 'NO_MORE_TRIAL') { - this.experimentProfile.endTime = Date.now(); - await this.storeExperimentProfile(); + if (this.status.status !== 'DONE') { + this.status.status = 'NO_MORE_TRIAL'; + waitSubmittedToFinish = this.currSubmittedTrialNum; + + assert(allFinishedTrialJobNum <= waitSubmittedToFinish); + if (allFinishedTrialJobNum >= waitSubmittedToFinish) { + this.status.status = 'DONE'; + this.experimentProfile.endTime = Date.now(); + await this.storeExperimentProfile(); + // write this log for travis CI + this.log.info('Experiment done.'); + } } - this.status.status = 'DONE'; } else { if (this.status.status === 'DONE') { delete this.experimentProfile.endTime; await this.storeExperimentProfile(); } - if (this.status.status !== 'NO_MORE_TRIAL') { + if (this.status.status !== 'TUNER_NO_MORE_TRIAL') { this.status.status = 'RUNNING'; } for (let i: number = this.trialJobs.size; i < this.experimentProfile.params.trialConcurrency; i++) { @@ -602,7 +606,7 @@ class NNIManager implements Manager { this.requestTrialJobs(this.experimentProfile.params.trialConcurrency); break; case NEW_TRIAL_JOB: - if (this.status.status === 'NO_MORE_TRIAL') { + if (this.status.status === 'TUNER_NO_MORE_TRIAL') { this.log.warning('It is not supposed to receive more trials after NO_MORE_TRIAL is set'); this.status.status = 'RUNNING'; } @@ -625,7 +629,7 @@ class NNIManager implements Manager { 'ADD_HYPERPARAMETER', tunerCommand.trial_job_id, content, undefined); break; case NO_MORE_TRIAL_JOBS: - this.status.status = 'NO_MORE_TRIAL'; + this.status.status = 'TUNER_NO_MORE_TRIAL'; break; case KILL_TRIAL_JOB: await this.trainingService.cancelTrialJob(JSON.parse(content), true); From 17a39d4c529cb9ea3c9be739afc53c54aab83063 Mon Sep 17 00:00:00 2001 From: xuehui Date: Thu, 3 Jan 2019 15:58:32 +0800 Subject: [PATCH 07/54] Add enas nni version from contributor (#557) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * update readme * update * fix path * update reference * fix bug in config file * update nni_arch_overview.png * update * update * update * add enas_nni --- examples/tuners/enas_nni/README.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 examples/tuners/enas_nni/README.md diff --git a/examples/tuners/enas_nni/README.md b/examples/tuners/enas_nni/README.md new file mode 100644 index 0000000000..ef6948330a --- /dev/null +++ b/examples/tuners/enas_nni/README.md @@ -0,0 +1,6 @@ + **Run ENAS in NNI** + === + + Now we have an enas example [enas-nni](https://github.com/countif/enas_nni) run in nni from our contributors. + Thanks our lovely contributors. + And welcome more and more people to join us! \ No newline at end of file From 50697a2ff5e710fe87cf21aed8565a413b5043d1 Mon Sep 17 00:00:00 2001 From: chicm-ms <38930155+chicm-ms@users.noreply.github.com> Date: Thu, 3 Jan 2019 16:37:53 +0800 Subject: [PATCH 08/54] Code coverage report (#559) * Add UT code coverage report * updates * updates * updates * updates * updates * updates * integration test python code coverage report --- src/nni_manager/core/test/import_all.test.ts | 15 + src/nni_manager/package.json | 35 +- src/nni_manager/yarn.lock | 1524 ++++++++++++++---- src/sdk/pynni/nni/__main__.py | 4 + src/sdk/pynni/nni/msg_dispatcher_base.py | 7 +- src/sdk/pynni/setup.py | 3 +- src/sdk/pynni/ut.sh | 3 + test/.coveragerc | 31 + test/it.sh | 18 + tools/nni_cmd/nnictl.py | 4 + 10 files changed, 1305 insertions(+), 339 deletions(-) create mode 100644 src/nni_manager/core/test/import_all.test.ts create mode 100644 src/sdk/pynni/ut.sh create mode 100644 test/.coveragerc create mode 100644 test/it.sh diff --git a/src/nni_manager/core/test/import_all.test.ts b/src/nni_manager/core/test/import_all.test.ts new file mode 100644 index 0000000000..f4ef2dd646 --- /dev/null +++ b/src/nni_manager/core/test/import_all.test.ts @@ -0,0 +1,15 @@ +import * as glob from 'glob'; + +// Istanbul only generates report for used/imported files, the files are not used/imported by test cases +// are not included in code coverage reports. +// This is a workaround to import all files in order to show all source files in code coverage reports. + +glob.sync('**/*.ts').forEach((file) => { + if (file.indexOf('node_modules/') < 0 && file.indexOf('types/') < 0 + && file.indexOf('.test.ts') < 0 && file.indexOf('main.ts')) { + try { + import('../../' + file); + } catch(err) { + } + } +}) diff --git a/src/nni_manager/package.json b/src/nni_manager/package.json index 08824ba619..0d27e36461 100644 --- a/src/nni_manager/package.json +++ b/src/nni_manager/package.json @@ -5,16 +5,19 @@ "scripts": { "postbuild": "cp -rf scripts ./dist/ && cp -rf config ./dist/", "build": "tsc", - "test": "mocha -r ts-node/register -t 15000 --recursive **/*.test.ts --exclude node_modules/**/**/*.test.ts --exclude core/test/nnimanager.test.ts --colors", + "test": "nyc mocha -r ts-node/register -t 15000 --recursive **/*.test.ts --exclude node_modules/**/**/*.test.ts --exclude core/test/nnimanager.test.ts --colors", "start": "node dist/main.js", "tslint": "tslint -p ." }, "license": "MIT", "dependencies": { + "azure-storage": "^2.10.2", "chai-as-promised": "^7.1.1", "child-process-promise": "^2.2.1", "express": "^4.16.3", "express-joi-validator": "^2.0.0", + "js-base64": "^2.4.9", + "kubernetes-client": "^6.5.0", "node-nvidia-smi": "^1.0.0", "rx": "^4.1.0", "sqlite3": "^4.0.2", @@ -25,15 +28,13 @@ "ts-deferred": "^1.0.4", "typescript-ioc": "^1.2.4", "typescript-string-operations": "^1.3.1", - "webhdfs": "^1.2.0", - "azure-storage": "^2.10.2", - "kubernetes-client": "^6.5.0", - "js-base64": "^2.4.9" + "webhdfs": "^1.2.0" }, "devDependencies": { "@types/chai": "^4.1.4", "@types/chai-as-promised": "^7.1.0", "@types/express": "^4.16.0", + "@types/glob": "^7.1.1", "@types/mocha": "^5.2.5", "@types/node": "^10.12.18", "@types/request": "^2.47.1", @@ -43,7 +44,9 @@ "@types/stream-buffers": "^3.0.2", "@types/tmp": "^0.0.33", "chai": "^4.1.2", + "glob": "^7.1.3", "mocha": "^5.2.0", + "nyc": "^13.1.0", "request": "^2.87.0", "rmdir": "^1.2.0", "tmp": "^0.0.33", @@ -54,5 +57,27 @@ }, "engines": { "node": ">=10.0.0" + }, + "nyc": { + "include": [ + "**/*.ts" + ], + "exclude": [ + "**/test/*", + "./node_modules/" + ], + "extension": [ + ".ts", + ".tsx" + ], + "require": [ + "ts-node/register" + ], + "reporter": [ + "text-summary", + "html" + ], + "sourceMap": true, + "instrument": true } } diff --git a/src/nni_manager/yarn.lock b/src/nni_manager/yarn.lock index b568f7d179..2c3b5a3525 100644 --- a/src/nni_manager/yarn.lock +++ b/src/nni_manager/yarn.lock @@ -2,10 +2,91 @@ # yarn lockfile v1 +"@babel/code-frame@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0.tgz#06e2ab19bdb535385559aabb5ba59729482800f8" + dependencies: + "@babel/highlight" "^7.0.0" + +"@babel/generator@^7.0.0", "@babel/generator@^7.2.2": + version "7.2.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.2.2.tgz#18c816c70962640eab42fe8cae5f3947a5c65ccc" + dependencies: + "@babel/types" "^7.2.2" + jsesc "^2.5.1" + lodash "^4.17.10" + source-map "^0.5.0" + trim-right "^1.0.1" + +"@babel/helper-function-name@^7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53" + dependencies: + "@babel/helper-get-function-arity" "^7.0.0" + "@babel/template" "^7.1.0" + "@babel/types" "^7.0.0" + +"@babel/helper-get-function-arity@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/helper-split-export-declaration@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.0.0.tgz#3aae285c0311c2ab095d997b8c9a94cad547d813" + dependencies: + "@babel/types" "^7.0.0" + +"@babel/highlight@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0.tgz#f710c38c8d458e6dd9a201afb637fcb781ce99e4" + dependencies: + chalk "^2.0.0" + esutils "^2.0.2" + js-tokens "^4.0.0" + +"@babel/parser@^7.0.0", "@babel/parser@^7.2.2", "@babel/parser@^7.2.3": + version "7.2.3" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.2.3.tgz#32f5df65744b70888d17872ec106b02434ba1489" + +"@babel/template@^7.0.0", "@babel/template@^7.1.0": + version "7.2.2" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.2.2.tgz#005b3fdf0ed96e88041330379e0da9a708eb2907" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/parser" "^7.2.2" + "@babel/types" "^7.2.2" + +"@babel/traverse@^7.0.0": + version "7.2.3" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.2.3.tgz#7ff50cefa9c7c0bd2d81231fdac122f3957748d8" + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/generator" "^7.2.2" + "@babel/helper-function-name" "^7.1.0" + "@babel/helper-split-export-declaration" "^7.0.0" + "@babel/parser" "^7.2.3" + "@babel/types" "^7.2.2" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.10" + +"@babel/types@^7.0.0", "@babel/types@^7.2.2": + version "7.2.2" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.2.2.tgz#44e10fc24e33af524488b716cdaee5360ea8ed1e" + dependencies: + esutils "^2.0.2" + lodash "^4.17.10" + to-fast-properties "^2.0.0" + +"@sindresorhus/is@^0.7.0": + version "0.7.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" + "@types/body-parser@*": version "1.17.0" resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.17.0.tgz#9f5c9d9bd04bb54be32d5eb9fc0d8c974e6cf58c" - integrity sha512-a2+YeUjPkztKJu5aIF2yArYFQQp8d51wZ7DavSHjFuY1mqVgidGyzEQ41JIVNy82fXj8yPgy2vJmfIywgESW6w== dependencies: "@types/connect" "*" "@types/node" "*" @@ -13,36 +94,30 @@ "@types/caseless@*": version "0.12.1" resolved "https://registry.yarnpkg.com/@types/caseless/-/caseless-0.12.1.tgz#9794c69c8385d0192acc471a540d1f8e0d16218a" - integrity sha512-FhlMa34NHp9K5MY1Uz8yb+ZvuX0pnvn3jScRSNAb75KHGB8d3rEU6hqMs3Z2vjuytcMfRg6c5CHMc3wtYyD2/A== "@types/chai-as-promised@^7.1.0": version "7.1.0" resolved "https://registry.yarnpkg.com/@types/chai-as-promised/-/chai-as-promised-7.1.0.tgz#010b04cde78eacfb6e72bfddb3e58fe23c2e78b9" - integrity sha512-MFiW54UOSt+f2bRw8J7LgQeIvE/9b4oGvwU7XW30S9QGAiHGnU/fmiOprsyMkdmH2rl8xSPc0/yrQw8juXU6bQ== dependencies: "@types/chai" "*" "@types/chai@*", "@types/chai@^4.1.4": version "4.1.4" resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.1.4.tgz#5ca073b330d90b4066d6ce18f60d57f2084ce8ca" - integrity sha512-h6+VEw2Vr3ORiFCyyJmcho2zALnUq9cvdB/IO8Xs9itrJVCenC7o26A6+m7D0ihTTr65eS259H5/Ghl/VjYs6g== "@types/connect@*": version "3.4.32" resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.32.tgz#aa0e9616b9435ccad02bc52b5b454ffc2c70ba28" - integrity sha512-4r8qa0quOvh7lGD0pre62CAb1oni1OO6ecJLGCezTmhQ8Fz50Arx9RUszryR8KlgK6avuSXvviL6yWyViQABOg== dependencies: "@types/node" "*" "@types/events@*": version "1.2.0" resolved "https://registry.yarnpkg.com/@types/events/-/events-1.2.0.tgz#81a6731ce4df43619e5c8c945383b3e62a89ea86" - integrity sha512-KEIlhXnIutzKwRbQkGWb/I4HFqBuUykAdHgDED6xqwXJfONCjF5VoE0cXEiurh3XauygxzeDzgtXUqvLkxFzzA== "@types/express-serve-static-core@*": version "4.16.0" resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.16.0.tgz#fdfe777594ddc1fe8eb8eccce52e261b496e43e7" - integrity sha512-lTeoCu5NxJU4OD9moCgm0ESZzweAx0YqsAcab6OB0EB3+As1OaHtKnaGJvcngQxYsi9UNv0abn4/DRavrRxt4w== dependencies: "@types/events" "*" "@types/node" "*" @@ -51,7 +126,6 @@ "@types/express@^4.16.0": version "4.16.0" resolved "https://registry.yarnpkg.com/@types/express/-/express-4.16.0.tgz#6d8bc42ccaa6f35cf29a2b7c3333cb47b5a32a19" - integrity sha512-TtPEYumsmSTtTetAPXlJVf3kEqb6wZK0bZojpJQrnD/djV4q1oB6QQ8aKvKqwNPACoe02GNiy5zDzcYivR5Z2w== dependencies: "@types/body-parser" "*" "@types/express-serve-static-core" "*" @@ -60,39 +134,44 @@ "@types/form-data@*": version "2.2.1" resolved "https://registry.yarnpkg.com/@types/form-data/-/form-data-2.2.1.tgz#ee2b3b8eaa11c0938289953606b745b738c54b1e" - integrity sha512-JAMFhOaHIciYVh8fb5/83nmuO/AHwmto+Hq7a9y8FzLDcC1KCU344XDOMEmahnrTFlHjgh4L0WJFczNIX2GxnQ== dependencies: "@types/node" "*" +"@types/glob@^7.1.1": + version "7.1.1" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.1.tgz#aa59a1c6e3fbc421e07ccd31a944c30eba521575" + dependencies: + "@types/events" "*" + "@types/minimatch" "*" + "@types/node" "*" + "@types/mime@*": version "2.0.0" resolved "https://registry.yarnpkg.com/@types/mime/-/mime-2.0.0.tgz#5a7306e367c539b9f6543499de8dd519fac37a8b" - integrity sha512-A2TAGbTFdBw9azHbpVd+/FkdW2T6msN1uct1O9bH3vTerEHKZhTXJUQXy+hNq1B0RagfU8U+KBdqiZpxjhOUQA== + +"@types/minimatch@*": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" "@types/mocha@^5.2.5": version "5.2.5" resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-5.2.5.tgz#8a4accfc403c124a0bafe8a9fc61a05ec1032073" - integrity sha512-lAVp+Kj54ui/vLUFxsJTMtWvZraZxum3w3Nwkble2dNuV5VnPA+Mi2oGX9XYJAaIvZi3tn3cbjS/qcJXRb6Bww== "@types/node@*": version "10.5.2" resolved "https://registry.yarnpkg.com/@types/node/-/node-10.5.2.tgz#f19f05314d5421fe37e74153254201a7bf00a707" - integrity sha512-m9zXmifkZsMHZBOyxZWilMwmTlpC8x5Ty360JKTiXvlXZfBWYpsg9ZZvP/Ye+iZUh+Q+MxDLjItVTWIsfwz+8Q== "@types/node@^10.5.5": version "10.5.5" resolved "https://registry.yarnpkg.com/@types/node/-/node-10.5.5.tgz#8e84d24e896cd77b0d4f73df274027e3149ec2ba" - integrity sha512-6Qnb1gXbp3g1JX9QVJj3A6ORzc9XCyhokxUKaoonHgNXcQhmk8adhotxfkeK8El9TnFeUuH72yI6jQ5nDJKS6w== "@types/range-parser@*": version "1.2.2" resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.2.tgz#fa8e1ad1d474688a757140c91de6dace6f4abc8d" - integrity sha512-HtKGu+qG1NPvYe1z7ezLsyIaXYyi8SoAVqWDZgDQ8dLrsZvSzUNCwZyfX33uhWxL/SU0ZDQZ3nwZ0nimt507Kw== "@types/request@^2.47.1": version "2.47.1" resolved "https://registry.yarnpkg.com/@types/request/-/request-2.47.1.tgz#25410d3afbdac04c91a94ad9efc9824100735824" - integrity sha512-TV3XLvDjQbIeVxJ1Z3oCTDk/KuYwwcNKVwz2YaT0F5u86Prgc4syDAp6P96rkTQQ4bIdh+VswQIC9zS6NjY7/g== dependencies: "@types/caseless" "*" "@types/form-data" "*" @@ -102,82 +181,70 @@ "@types/rx-core-binding@*": version "4.0.4" resolved "https://registry.yarnpkg.com/@types/rx-core-binding/-/rx-core-binding-4.0.4.tgz#d969d32f15a62b89e2862c17b3ee78fe329818d3" - integrity sha512-5pkfxnC4w810LqBPUwP5bg7SFR/USwhMSaAeZQQbEHeBp57pjKXRlXmqpMrLJB4y1oglR/c2502853uN0I+DAQ== dependencies: "@types/rx-core" "*" "@types/rx-core@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-core/-/rx-core-4.0.3.tgz#0b3354b1238cedbe2b74f6326f139dbc7a591d60" - integrity sha1-CzNUsSOM7b4rdPYybxOdvHpZHWA= "@types/rx-lite-aggregates@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-lite-aggregates/-/rx-lite-aggregates-4.0.3.tgz#6efb2b7f3d5f07183a1cb2bd4b1371d7073384c2" - integrity sha512-MAGDAHy8cRatm94FDduhJF+iNS5//jrZ/PIfm+QYw9OCeDgbymFHChM8YVIvN2zArwsRftKgE33QfRWvQk4DPg== dependencies: "@types/rx-lite" "*" "@types/rx-lite-async@*": version "4.0.2" resolved "https://registry.yarnpkg.com/@types/rx-lite-async/-/rx-lite-async-4.0.2.tgz#27fbf0caeff029f41e2d2aae638b05e91ceb600c" - integrity sha512-vTEv5o8l6702ZwfAM5aOeVDfUwBSDOs+ARoGmWAKQ6LOInQ8J4/zjM7ov12fuTpktUKdMQjkeCp07Vd73mPkxw== dependencies: "@types/rx-lite" "*" "@types/rx-lite-backpressure@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-lite-backpressure/-/rx-lite-backpressure-4.0.3.tgz#05abb19bdf87cc740196c355e5d0b37bb50b5d56" - integrity sha512-Y6aIeQCtNban5XSAF4B8dffhIKu6aAy/TXFlScHzSxh6ivfQBQw6UjxyEJxIOt3IT49YkS+siuayM2H/Q0cmgA== dependencies: "@types/rx-lite" "*" "@types/rx-lite-coincidence@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-lite-coincidence/-/rx-lite-coincidence-4.0.3.tgz#80bd69acc4054a15cdc1638e2dc8843498cd85c0" - integrity sha512-1VNJqzE9gALUyMGypDXZZXzR0Tt7LC9DdAZQ3Ou/Q0MubNU35agVUNXKGHKpNTba+fr8GdIdkC26bRDqtCQBeQ== dependencies: "@types/rx-lite" "*" "@types/rx-lite-experimental@*": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/rx-lite-experimental/-/rx-lite-experimental-4.0.1.tgz#c532f5cbdf3f2c15da16ded8930d1b2984023cbd" - integrity sha1-xTL1y98/LBXaFt7Ykw0bKYQCPL0= dependencies: "@types/rx-lite" "*" "@types/rx-lite-joinpatterns@*": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/rx-lite-joinpatterns/-/rx-lite-joinpatterns-4.0.1.tgz#f70fe370518a8432f29158cc92ffb56b4e4afc3e" - integrity sha1-9w/jcFGKhDLykVjMkv+1a05K/D4= dependencies: "@types/rx-lite" "*" "@types/rx-lite-testing@*": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/rx-lite-testing/-/rx-lite-testing-4.0.1.tgz#21b19d11f4dfd6ffef5a9d1648e9c8879bfe21e9" - integrity sha1-IbGdEfTf1v/vWp0WSOnIh5v+Iek= dependencies: "@types/rx-lite-virtualtime" "*" "@types/rx-lite-time@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-lite-time/-/rx-lite-time-4.0.3.tgz#0eda65474570237598f3448b845d2696f2dbb1c4" - integrity sha512-ukO5sPKDRwCGWRZRqPlaAU0SKVxmWwSjiOrLhoQDoWxZWg6vyB9XLEZViKOzIO6LnTIQBlk4UylYV0rnhJLxQw== dependencies: "@types/rx-lite" "*" "@types/rx-lite-virtualtime@*": version "4.0.3" resolved "https://registry.yarnpkg.com/@types/rx-lite-virtualtime/-/rx-lite-virtualtime-4.0.3.tgz#4b30cacd0fe2e53af29f04f7438584c7d3959537" - integrity sha512-3uC6sGmjpOKatZSVHI2xB1+dedgml669ZRvqxy+WqmGJDVusOdyxcKfyzjW0P3/GrCiN4nmRkLVMhPwHCc5QLg== dependencies: "@types/rx-lite" "*" "@types/rx-lite@*": version "4.0.5" resolved "https://registry.yarnpkg.com/@types/rx-lite/-/rx-lite-4.0.5.tgz#b3581525dff69423798daa9a0d33c1e66a5e8c4c" - integrity sha512-KZk5XTR1dm/kNgBx8iVpjno6fRYtAUQWBOmj+O8j724+nk097sz4fOoHJNpCkOJUtHUurZlJC7QvSFCZHbkC+w== dependencies: "@types/rx-core" "*" "@types/rx-core-binding" "*" @@ -185,7 +252,6 @@ "@types/rx@^4.1.1": version "4.1.1" resolved "https://registry.yarnpkg.com/@types/rx/-/rx-4.1.1.tgz#598fc94a56baed975f194574e0f572fd8e627a48" - integrity sha1-WY/JSla67ZdfGUV04PVy/Y5iekg= dependencies: "@types/rx-core" "*" "@types/rx-core-binding" "*" @@ -203,7 +269,6 @@ "@types/serve-static@*": version "1.13.2" resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.2.tgz#f5ac4d7a6420a99a6a45af4719f4dcd8cd907a48" - integrity sha512-/BZ4QRLpH/bNYgZgwhKEh+5AsboDBcUdlBYgzoLX0fpj3Y2gp6EApyOlM3bK53wQS/OE1SrdSYBAbux2D1528Q== dependencies: "@types/express-serve-static-core" "*" "@types/mime" "*" @@ -211,7 +276,6 @@ "@types/sqlite3@^3.1.3": version "3.1.3" resolved "https://registry.yarnpkg.com/@types/sqlite3/-/sqlite3-3.1.3.tgz#580d547203b8ad6e11aa6a6769c8ca5d7e197d13" - integrity sha512-BgGToABnI/8/HnZtZz2Qac6DieU2Dm/j3rtbMmUlDVo4T/uLu8cuVfU/n2UkHowiiwXb6/7h/CmSqBIVKgcTMA== dependencies: "@types/events" "*" "@types/node" "*" @@ -219,14 +283,12 @@ "@types/ssh2-streams@*": version "0.1.2" resolved "https://registry.yarnpkg.com/@types/ssh2-streams/-/ssh2-streams-0.1.2.tgz#7aa18b8c2450f17699e9ea18a76efc838188d58d" - integrity sha1-eqGLjCRQ8XaZ6eoYp278g4GI1Y0= dependencies: "@types/node" "*" "@types/ssh2@^0.5.35": version "0.5.35" resolved "https://registry.yarnpkg.com/@types/ssh2/-/ssh2-0.5.35.tgz#d6e60d59b7fc22db10abf4730aa7448babde7e3b" - integrity sha1-1uYNWbf8ItsQq/RzCqdEi6vefjs= dependencies: "@types/node" "*" "@types/ssh2-streams" "*" @@ -234,37 +296,38 @@ "@types/stream-buffers@^3.0.2": version "3.0.2" resolved "https://registry.yarnpkg.com/@types/stream-buffers/-/stream-buffers-3.0.2.tgz#b73bfcceae39ecb259750b44ef38a36cfc20e370" - integrity sha1-tzv8zq457LJZdQtE7zijbPwg43A= dependencies: "@types/node" "*" "@types/tmp@^0.0.33": version "0.0.33" resolved "https://registry.yarnpkg.com/@types/tmp/-/tmp-0.0.33.tgz#1073c4bc824754ae3d10cfab88ab0237ba964e4d" - integrity sha1-EHPEvIJHVK49EM+riKsCN7qWTk0= "@types/tough-cookie@*": version "2.3.3" resolved "https://registry.yarnpkg.com/@types/tough-cookie/-/tough-cookie-2.3.3.tgz#7f226d67d654ec9070e755f46daebf014628e9d9" - integrity sha512-MDQLxNFRLasqS4UlkWMSACMKeSm1x4Q3TxzUC7KQUsh6RK1ZrQ0VEyE3yzXcBu+K8ejVj4wuX32eUG02yNp+YQ== abbrev@1: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" - integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== accepts@~1.3.5: version "1.3.5" resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.5.tgz#eb777df6011723a3b14e8a72c0805c8e86746bd2" - integrity sha1-63d99gEXI6OxTopywIBcjoZ0a9I= dependencies: mime-types "~2.1.18" negotiator "0.6.1" +aggregate-error@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" + dependencies: + clean-stack "^1.0.0" + indent-string "^3.0.0" + ajv@^5.1.0, ajv@^5.3.0: version "5.5.2" resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" - integrity sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU= dependencies: co "^4.6.0" fast-deep-equal "^1.0.0" @@ -274,34 +337,38 @@ ajv@^5.1.0, ajv@^5.3.0: ansi-regex@^2.0.0: version "2.1.1" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" - integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= ansi-regex@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" - integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= ansi-styles@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" - integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= ansi-styles@^3.2.1: version "3.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== dependencies: color-convert "^1.9.0" +append-transform@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-1.0.0.tgz#046a52ae582a228bd72f58acfbe2967c678759ab" + dependencies: + default-require-extensions "^2.0.0" + aproba@^1.0.3: version "1.2.0" resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" - integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== + +archy@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40" are-we-there-yet@~1.1.2: version "1.1.5" resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" - integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w== dependencies: delegates "^1.0.0" readable-stream "^2.0.6" @@ -309,71 +376,84 @@ are-we-there-yet@~1.1.2: argparse@^1.0.7: version "1.0.10" resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" - integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== dependencies: sprintf-js "~1.0.2" array-flatten@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" - integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= array-union@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" - integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= dependencies: array-uniq "^1.0.1" array-uniq@^1.0.1: version "1.0.3" resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" - integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= -arrify@^1.0.0: +arrify@^1.0.0, arrify@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" - integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= asn1@~0.2.0, asn1@~0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" - integrity sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y= assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= assertion-error@^1.0.1: version "1.1.0" resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.1.0.tgz#e60b6b0e8f301bd97e5375215bda406c85118c0b" - integrity sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw== + +async-limiter@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8" + +async@^2.5.0: + version "2.6.1" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.1.tgz#b245a23ca71930044ec53fa46aa00a3e87c6a610" + dependencies: + lodash "^4.17.10" asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= aws-sign2@~0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= aws4@^1.6.0: version "1.7.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" - integrity sha512-32NDda82rhwD9/JBCCkB+MRYDp0oSvlo2IL6rQWA10PQi7tDUM3eqMSltXmY+Oyl/7N3P3qNtAlv7X0d9bI28w== aws4@^1.8.0: version "1.8.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" - integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ== + +azure-storage@^2.10.2: + version "2.10.2" + resolved "https://registry.yarnpkg.com/azure-storage/-/azure-storage-2.10.2.tgz#3bcabdbf10e72fd0990db81116e49023c4a675b6" + dependencies: + browserify-mime "~1.2.9" + extend "^3.0.2" + json-edm-parser "0.1.2" + md5.js "1.3.4" + readable-stream "~2.0.0" + request "^2.86.0" + underscore "~1.8.3" + uuid "^3.0.0" + validator "~9.4.1" + xml2js "0.2.8" + xmlbuilder "^9.0.7" babel-code-frame@^6.22.0: version "6.26.0" resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" - integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= dependencies: chalk "^1.1.3" esutils "^2.0.2" @@ -382,19 +462,20 @@ babel-code-frame@^6.22.0: balanced-match@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + +base64url@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/base64url/-/base64url-3.0.1.tgz#6399d572e2bc3f90a9a8b22d5dbb0a32d33f788d" bcrypt-pbkdf@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= dependencies: tweetnacl "^0.14.3" body-parser@1.18.2: version "1.18.2" resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.18.2.tgz#87678a19d84b47d859b83199bd59bce222b10454" - integrity sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ= dependencies: bytes "3.0.0" content-type "~1.0.4" @@ -410,14 +491,12 @@ body-parser@1.18.2: boom@2.6.x: version "2.6.1" resolved "https://registry.yarnpkg.com/boom/-/boom-2.6.1.tgz#4dc8ef9b6dfad9c43bbbfbe71fa4c21419f22753" - integrity sha1-Tcjvm2362cQ7u/vnH6TCFBnyJ1M= dependencies: hoek "2.x.x" brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== dependencies: balanced-match "^1.0.0" concat-map "0.0.1" @@ -425,49 +504,69 @@ brace-expansion@^1.1.7: browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" - integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +browserify-mime@~1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/browserify-mime/-/browserify-mime-1.2.9.tgz#aeb1af28de6c0d7a6a2ce40adb68ff18422af31f" buffer-from@^1.0.0, buffer-from@^1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" - integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== buffer-stream-reader@^0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/buffer-stream-reader/-/buffer-stream-reader-0.1.1.tgz#ca8bf93631deedd8b8f8c3bb44991cc30951e259" - integrity sha1-yov5NjHe7di4+MO7RJkcwwlR4lk= -builtin-modules@^1.1.1: +builtin-modules@^1.0.0, builtin-modules@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" - integrity sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8= bytes@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" - integrity sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg= + +cacheable-request@^2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" + dependencies: + clone-response "1.0.2" + get-stream "3.0.0" + http-cache-semantics "3.8.1" + keyv "3.0.0" + lowercase-keys "1.0.0" + normalize-url "2.0.1" + responselike "1.0.2" + +caching-transform@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caching-transform/-/caching-transform-2.0.0.tgz#e1292bd92d35b6e8b1ed7075726724b3bd64eea0" + dependencies: + make-dir "^1.0.0" + md5-hex "^2.0.0" + package-hash "^2.0.0" + write-file-atomic "^2.0.0" callsites@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/callsites/-/callsites-1.0.1.tgz#c14c24188ce8e1d6a030b4c3c942e6ba895b6a1a" - integrity sha1-wUwkGIzo4dagMLTDyULmuolbaho= + +camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" caseless@~0.12.0: version "0.12.0" resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= chai-as-promised@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.1.tgz#08645d825deb8696ee61725dbf590c012eb00ca0" - integrity sha512-azL6xMoi+uxu6z4rhWQ1jbdUhOMhis2PvscD/xjLqNMkv3BPPp2JyyuTHOrf9BOosGpNQ11v6BKv/g57RXbiaA== dependencies: check-error "^1.0.2" chai@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chai/-/chai-4.1.2.tgz#0f64584ba642f0f2ace2806279f4f06ca23ad73c" - integrity sha1-D2RYS6ZC8PKs4oBiefTwbKI61zw= dependencies: assertion-error "^1.0.1" check-error "^1.0.1" @@ -479,7 +578,6 @@ chai@^4.1.2: chalk@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" - integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= dependencies: ansi-styles "^2.2.1" escape-string-regexp "^1.0.2" @@ -487,10 +585,9 @@ chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^2.3.0: +chalk@^2.0.0, chalk@^2.3.0: version "2.4.1" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" - integrity sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ== dependencies: ansi-styles "^3.2.1" escape-string-regexp "^1.0.5" @@ -499,12 +596,10 @@ chalk@^2.3.0: check-error@^1.0.1, check-error@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.2.tgz#574d312edd88bb5dd8912e9286dd6c0aed4aac82" - integrity sha1-V00xLt2Iu13YkS6Sht1sCu1KrII= child-process-promise@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/child-process-promise/-/child-process-promise-2.2.1.tgz#4730a11ef610fad450b8f223c79d31d7bdad8074" - integrity sha1-RzChHvYQ+tRQuPIjx50x172tgHQ= dependencies: cross-spawn "^4.0.2" node-version "^1.0.0" @@ -513,211 +608,283 @@ child-process-promise@^2.2.1: chownr@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181" - integrity sha1-4qdQQqlVGQi+vSW4Uj1fl2nXkYE= + +clean-stack@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-1.3.0.tgz#9e821501ae979986c46b1d66d2d432db2fd4ae31" + +cliui@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49" + dependencies: + string-width "^2.1.1" + strip-ansi "^4.0.0" + wrap-ansi "^2.0.0" + +clone-response@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + dependencies: + mimic-response "^1.0.0" co@^4.6.0: version "4.6.0" resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" - integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= code-point-at@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" - integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= color-convert@^1.9.0: version "1.9.2" resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.2.tgz#49881b8fba67df12a96bdf3f56c0aab9e7913147" - integrity sha512-3NUJZdhMhcdPn8vJ9v2UQJoH0qqoGUkYTgFEPZaPjEtwmmKUfNV46zZmgB2M5M4DCEQHMaCfWHCxiBflLm04Tg== dependencies: color-name "1.1.1" color-name@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" - integrity sha1-SxQVMEz1ACjqgWQ2Q72C6gWANok= combined-stream@1.0.6, combined-stream@~1.0.5: version "1.0.6" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.6.tgz#723e7df6e801ac5613113a7e445a9b69cb632818" - integrity sha1-cj599ugBrFYTETp+RFqbactjKBg= dependencies: delayed-stream "~1.0.0" combined-stream@~1.0.6: version "1.0.7" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.7.tgz#2d1d24317afb8abe95d6d2c0b07b57813539d828" - integrity sha512-brWl9y6vOB1xYPZcpZde3N9zDByXTosAeMDo4p1wzo6UMOX4vumB+TP1RZ76sfE6Md68Q0NJSrE/gbezd4Ul+w== dependencies: delayed-stream "~1.0.0" commander@2.15.1: version "2.15.1" resolved "https://registry.yarnpkg.com/commander/-/commander-2.15.1.tgz#df46e867d0fc2aec66a34662b406a9ccafff5b0f" - integrity sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag== commander@^2.12.1: version "2.16.0" resolved "https://registry.yarnpkg.com/commander/-/commander-2.16.0.tgz#f16390593996ceb4f3eeb020b31d78528f7f8a50" - integrity sha512-sVXqklSaotK9at437sFlFpyOcJonxe0yST/AG9DkQKUdIE6IqGIMv4SfAQSKaJbSdVEJYItASCrBiVQHq1HQew== + +commander@~2.17.1: + version "2.17.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf" + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" concat-map@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= console-control-strings@^1.0.0, console-control-strings@~1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" - integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= content-disposition@0.5.2: version "0.5.2" resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" - integrity sha1-DPaLud318r55YcOoUXjLhdunjLQ= content-type@~1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" - integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== + +convert-source-map@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20" + dependencies: + safe-buffer "~5.1.1" cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" - integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw= cookie@0.3.1: version "0.3.1" resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb" - integrity sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s= core-util-is@1.0.2, core-util-is@~1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= -cross-spawn@^4.0.2: +cross-spawn@^4, cross-spawn@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-4.0.2.tgz#7b9247621c23adfdd3856004a823cbe397424d41" - integrity sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE= dependencies: lru-cache "^4.0.1" which "^1.2.9" +cross-spawn@^5.0.1: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + dashdash@^1.12.0: version "1.14.1" resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= dependencies: assert-plus "^1.0.0" +debug-log@^1.0.1: + version "1.0.1" + resolved "http://registry.npmjs.org/debug-log/-/debug-log-1.0.1.tgz#2307632d4c04382b8df8a32f70b895046d52745f" + debug@2.6.9, debug@^2.1.2: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== dependencies: ms "2.0.0" debug@3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" - integrity sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g== dependencies: ms "2.0.0" +debug@^3.1.0: + version "3.2.6" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" + dependencies: + ms "^2.1.1" + +debug@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + dependencies: + ms "^2.1.1" + +decamelize@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + dependencies: + mimic-response "^1.0.0" + deep-eql@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-3.0.1.tgz#dfc9404400ad1c8fe023e7da1df1c147c4b444df" - integrity sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw== dependencies: type-detect "^4.0.0" deep-extend@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" - integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deepmerge@^2.1.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-2.2.1.tgz#5d3ff22a01c00f645405a2fbc17d0778a1801170" + +default-require-extensions@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/default-require-extensions/-/default-require-extensions-2.0.0.tgz#f5f8fbb18a7d6d50b21f641f649ebb522cfe24f7" + dependencies: + strip-bom "^3.0.0" delayed-stream@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= delegates@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" - integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= depd@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.1.tgz#5783b4e1c459f06fa5ca27f991f3d06e7a310359" - integrity sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k= depd@~1.1.1, depd@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" - integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= destroy@~1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" - integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA= detect-libc@^1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" - integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups= diff@3.5.0, diff@^3.1.0, diff@^3.2.0: version "3.5.0" resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" - integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" ecc-jsbn@~0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" - integrity sha1-D8c6ntXw1Tw4GTOYUj735UN3dQU= dependencies: jsbn "~0.1.0" ee-first@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" - integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" - integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k= + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +es6-error@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/es6-error/-/es6-error-4.1.1.tgz#9e3af407459deed47e9a91f9b885a84eb05c561d" + +es6-promise@^4.2.5: + version "4.2.5" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.5.tgz#da6d0d5692efb461e082c14817fe2427d8f5d054" escape-html@~1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" - integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg= escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= esprima@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" - integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== esutils@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" - integrity sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs= etag@~1.8.1: version "1.8.1" resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" - integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= + +execa@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.7.0.tgz#944becd34cc41ee32a63a9faf27ad5a65fc59777" + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" express-joi-validator@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/express-joi-validator/-/express-joi-validator-2.0.0.tgz#24e26e6a8327f69985ed72588f00e295dc3e3234" - integrity sha1-JOJuaoMn9pmF7XJYjwDildw+MjQ= dependencies: boom "2.6.x" extend "2.0.x" @@ -726,7 +893,6 @@ express-joi-validator@^2.0.0: express@^4.16.3: version "4.16.3" resolved "https://registry.yarnpkg.com/express/-/express-4.16.3.tgz#6af8a502350db3246ecc4becf6b5a34d22f7ed53" - integrity sha1-avilAjUNsyRuzEvs9rWjTSL37VM= dependencies: accepts "~1.3.5" array-flatten "1.1.1" @@ -762,37 +928,30 @@ express@^4.16.3: extend@2.0.x: version "2.0.2" resolved "https://registry.yarnpkg.com/extend/-/extend-2.0.2.tgz#1b74985400171b85554894459c978de6ef453ab7" - integrity sha512-AgFD4VU+lVLP6vjnlNfF7OeInLTyeyckCNPEsuxz1vi786UuK/nk6ynPuhn/h+Ju9++TQyr5EpLRI14fc1QtTQ== -extend@^3.0.0, extend@~3.0.1, extend@~3.0.2: +extend@^3.0.0, extend@^3.0.2, extend@~3.0.1, extend@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== extsprintf@1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= extsprintf@^1.2.0: version "1.4.0" resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" - integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= fast-deep-equal@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" - integrity sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ= fast-json-stable-stringify@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" - integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= finalhandler@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.1.tgz#eebf4ed840079c83f4249038c9d703008301b105" - integrity sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg== dependencies: debug "2.6.9" encodeurl "~1.0.2" @@ -802,15 +961,46 @@ finalhandler@1.1.1: statuses "~1.4.0" unpipe "~1.0.0" +find-cache-dir@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.0.0.tgz#4c1faed59f45184530fb9d7fa123a4d04a98472d" + dependencies: + commondir "^1.0.1" + make-dir "^1.0.0" + pkg-dir "^3.0.0" + +find-up@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + dependencies: + locate-path "^3.0.0" + +fluent-openapi@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fluent-openapi/-/fluent-openapi-1.0.0.tgz#57ba662c70cfcee3fcc2f62e0e0672001981bc5c" + dependencies: + lodash.merge "^4.6.1" + +foreground-child@^1.5.6: + version "1.5.6" + resolved "http://registry.npmjs.org/foreground-child/-/foreground-child-1.5.6.tgz#4fd71ad2dfde96789b980a5c0a295937cb2f5ce9" + dependencies: + cross-spawn "^4" + signal-exit "^3.0.0" + forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= form-data@~2.3.1, form-data@~2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.2.tgz#4970498be604c20c005d4f5c23aecd21d6b49099" - integrity sha1-SXBJi+YEwgwAXU9cI67NIda0kJk= dependencies: asynckit "^0.4.0" combined-stream "1.0.6" @@ -819,29 +1009,31 @@ form-data@~2.3.1, form-data@~2.3.2: forwarded@~0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84" - integrity sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ= fresh@0.5.2: version "0.5.2" resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" - integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= + +from2@^2.1.1: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" fs-minipass@^1.2.5: version "1.2.5" resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" - integrity sha512-JhBl0skXjUPCFH7x6x61gQxrKyXsxB5gcgePLZCwfyCGGsTISMoIeObbrvVeP6Xmyaudw4TT43qV2Gz+iyd2oQ== dependencies: minipass "^2.2.1" fs.realpath@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= gauge@~2.7.3: version "2.7.4" resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" - integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c= dependencies: aproba "^1.0.3" console-control-strings "^1.0.0" @@ -852,22 +1044,27 @@ gauge@~2.7.3: strip-ansi "^3.0.1" wide-align "^1.1.0" +get-caller-file@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" + get-func-name@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/get-func-name/-/get-func-name-2.0.0.tgz#ead774abee72e20409433a066366023dd6887a41" - integrity sha1-6td0q+5y4gQJQzoGY2YCPdaIekE= + +get-stream@3.0.0, get-stream@^3.0.0: + version "3.0.0" + resolved "http://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" getpass@^0.1.1: version "0.1.7" resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= dependencies: assert-plus "^1.0.0" glob-parent@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" - integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4= dependencies: is-glob "^3.1.0" path-dirname "^1.0.0" @@ -875,7 +1072,6 @@ glob-parent@^3.0.0: glob@7.1.2, glob@^7.0.3, glob@^7.0.5, glob@^7.1.1: version "7.1.2" resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" - integrity sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ== dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" @@ -884,10 +1080,24 @@ glob@7.1.2, glob@^7.0.3, glob@^7.0.5, glob@^7.1.1: once "^1.3.0" path-is-absolute "^1.0.0" +glob@^7.1.3: + version "7.1.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.1.0: + version "11.9.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.9.0.tgz#bde236808e987f290768a93d065060d78e6ab249" + globby@^6.0.0: version "6.1.0" resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" - integrity sha1-9abXDoOV4hyFj7BInWTfAkJNUGw= dependencies: array-union "^1.0.1" glob "^7.0.3" @@ -895,20 +1105,53 @@ globby@^6.0.0: pify "^2.0.0" pinkie-promise "^2.0.0" +got@^8.3.2: + version "8.3.2" + resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" + dependencies: + "@sindresorhus/is" "^0.7.0" + cacheable-request "^2.1.1" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + into-stream "^3.1.0" + is-retry-allowed "^1.1.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + mimic-response "^1.0.0" + p-cancelable "^0.4.0" + p-timeout "^2.0.1" + pify "^3.0.0" + safe-buffer "^5.1.1" + timed-out "^4.0.1" + url-parse-lax "^3.0.0" + url-to-options "^1.0.1" + +graceful-fs@^4.1.11, graceful-fs@^4.1.2: + version "4.1.15" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" + growl@1.10.5: version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" - integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== + +handlebars@^4.0.11: + version "4.0.12" + resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.12.tgz#2c15c8a96d46da5e266700518ba8cb8d919d5bc5" + dependencies: + async "^2.5.0" + optimist "^0.6.1" + source-map "^0.6.1" + optionalDependencies: + uglify-js "^3.1.4" har-schema@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" - integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= har-validator@~5.0.3: version "5.0.3" resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" - integrity sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0= dependencies: ajv "^5.1.0" har-schema "^2.0.0" @@ -916,7 +1159,6 @@ har-validator@~5.0.3: har-validator@~5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.0.tgz#44657f5688a22cfd4b72486e81b3a3fb11742c29" - integrity sha512-+qnmNjI4OfH2ipQ9VQOw23bBd/ibtfbVdK2fYbY4acTDqKTW/YDp9McimZdDbG8iV9fZizUqQMD5xvriB146TA== dependencies: ajv "^5.3.0" har-schema "^2.0.0" @@ -924,34 +1166,53 @@ har-validator@~5.1.0: has-ansi@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" - integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= dependencies: ansi-regex "^2.0.0" has-flag@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + dependencies: + has-symbol-support-x "^1.4.1" has-unicode@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" - integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= + +hash-base@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" he@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" - integrity sha1-k0EP0hsAlzUVH4howvJx80J+I/0= hoek@2.x.x: version "2.16.3" resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed" - integrity sha1-ILt0A9POo5jpHcRxCo/xuCdKJe0= + +hosted-git-info@^2.1.4: + version "2.7.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.7.1.tgz#97f236977bd6e125408930ff6de3eec6281ec047" + +http-cache-semantics@3.8.1: + version "3.8.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" http-errors@1.6.2: version "1.6.2" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.2.tgz#0a002cc85707192a7e7946ceedc11155f60ec736" - integrity sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY= dependencies: depd "1.1.1" inherits "2.0.3" @@ -961,7 +1222,6 @@ http-errors@1.6.2: http-errors@~1.6.2: version "1.6.3" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" - integrity sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0= dependencies: depd "~1.1.2" inherits "2.0.3" @@ -971,7 +1231,6 @@ http-errors@~1.6.2: http-signature@~1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" - integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE= dependencies: assert-plus "^1.0.0" jsprim "^1.2.2" @@ -980,118 +1239,204 @@ http-signature@~1.2.0: iconv-lite@0.4.19: version "0.4.19" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.19.tgz#f7468f60135f5e5dad3399c0a81be9a1603a082b" - integrity sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ== iconv-lite@^0.4.4: version "0.4.23" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" - integrity sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA== dependencies: safer-buffer ">= 2.1.2 < 3" ignore-walk@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" - integrity sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ== dependencies: minimatch "^3.0.4" +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + inflight@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= dependencies: once "^1.3.0" wrappy "1" -inherits@2, inherits@2.0.3, inherits@~2.0.3: +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@~2.0.1, inherits@~2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" - integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= ini@~1.3.0: version "1.3.5" resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" - integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw== + +into-stream@^3.1.0: + version "3.1.0" + resolved "http://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" + dependencies: + from2 "^2.1.1" + p-is-promise "^1.1.0" + +invert-kv@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" ipaddr.js@1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.6.0.tgz#e3fa357b773da619f26e95f049d055c72796f86b" - integrity sha1-4/o1e3c9phnybpXwSdBVxyeW+Gs= + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "http://registry.npmjs.org/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" is-extglob@^2.1.0: version "2.1.1" resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" - integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= is-fullwidth-code-point@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" - integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= dependencies: number-is-nan "^1.0.0" is-fullwidth-code-point@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" - integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= is-glob@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" - integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo= dependencies: is-extglob "^2.1.0" +is-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.1.tgz#8952688c5ec2ffd6b03ecc85e769e02903083470" + +is-plain-obj@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-retry-allowed@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34" + +is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + is-typedarray@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= is@~0.2.6: version "0.2.7" resolved "http://registry.npmjs.org/is/-/is-0.2.7.tgz#3b34a2c48f359972f35042849193ae7264b63562" - integrity sha1-OzSixI81mXLzUEKEkZOucmS2NWI= isarray@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" - integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= isemail@1.x.x: version "1.2.0" resolved "https://registry.yarnpkg.com/isemail/-/isemail-1.2.0.tgz#be03df8cc3e29de4d2c5df6501263f1fa4595e9a" - integrity sha1-vgPfjMPineTSxd9lASY/H6RZXpo= isexe@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= isstream@~0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= + +istanbul-lib-coverage@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.1.tgz#2aee0e073ad8c5f6a0b00e0dfbf52b4667472eda" + +istanbul-lib-hook@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-hook/-/istanbul-lib-hook-2.0.1.tgz#918a57b75a0f951d552a08487ca1fa5336433d72" + dependencies: + append-transform "^1.0.0" + +istanbul-lib-instrument@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-3.0.0.tgz#b5f066b2a161f75788be17a9d556f40a0cf2afc9" + dependencies: + "@babel/generator" "^7.0.0" + "@babel/parser" "^7.0.0" + "@babel/template" "^7.0.0" + "@babel/traverse" "^7.0.0" + "@babel/types" "^7.0.0" + istanbul-lib-coverage "^2.0.1" + semver "^5.5.0" + +istanbul-lib-report@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-2.0.2.tgz#430a2598519113e1da7af274ba861bd42dd97535" + dependencies: + istanbul-lib-coverage "^2.0.1" + make-dir "^1.3.0" + supports-color "^5.4.0" + +istanbul-lib-source-maps@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-2.0.1.tgz#ce8b45131d8293fdeaa732f4faf1852d13d0a97e" + dependencies: + debug "^3.1.0" + istanbul-lib-coverage "^2.0.1" + make-dir "^1.3.0" + rimraf "^2.6.2" + source-map "^0.6.1" + +istanbul-reports@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-2.0.1.tgz#fb8d6ea850701a3984350b977a969e9a556116a7" + dependencies: + handlebars "^4.0.11" + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" joi@6.x.x: version "6.10.1" resolved "https://registry.yarnpkg.com/joi/-/joi-6.10.1.tgz#4d50c318079122000fe5f16af1ff8e1917b77e06" - integrity sha1-TVDDGAeRIgAP5fFq8f+OGRe3fgY= dependencies: hoek "2.x.x" isemail "1.x.x" moment "2.x.x" topo "1.x.x" +js-base64@^2.4.9: + version "2.5.0" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.5.0.tgz#42255ba183ab67ce59a0dee640afdc00ab5ae93e" + js-tokens@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" - integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= -js-yaml@^3.7.0: +js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + +js-yaml@^3.10.0, js-yaml@^3.7.0: version "3.12.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" - integrity sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A== dependencies: argparse "^1.0.7" esprima "^4.0.0" @@ -1099,111 +1444,275 @@ js-yaml@^3.7.0: jsbn@~0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + +json-edm-parser@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/json-edm-parser/-/json-edm-parser-0.1.2.tgz#1e60b0fef1bc0af67bc0d146dfdde5486cd615b4" + dependencies: + jsonparse "~1.2.0" + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" json-schema-traverse@^0.3.0: version "0.3.1" resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" - integrity sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A= json-schema@0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" - integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM= json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= + +jsonparse@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.2.0.tgz#5c0c5685107160e72fe7489bddea0b44c2bc67bd" jsprim@^1.2.2: version "1.4.1" resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" - integrity sha1-MT5mvB5cwG5Di8G3SZwuXFastqI= dependencies: assert-plus "1.0.0" extsprintf "1.3.0" json-schema "0.2.3" verror "1.10.0" +keyv@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" + dependencies: + json-buffer "3.0.0" + +kubernetes-client@^6.5.0: + version "6.5.1" + resolved "https://registry.yarnpkg.com/kubernetes-client/-/kubernetes-client-6.5.1.tgz#40dc285b7d30d8ddd083b16a6d06068521f1d017" + dependencies: + deepmerge "^2.1.1" + fluent-openapi "^1.0.0" + js-yaml "^3.10.0" + openid-client "^2.4.4" + qs "^6.5.2" + request "^2.88.0" + ws "^6.0.0" + +lcid@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" + dependencies: + invert-kv "^1.0.0" + +load-json-file@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" + dependencies: + graceful-fs "^4.1.2" + parse-json "^4.0.0" + pify "^3.0.0" + strip-bom "^3.0.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +lodash.assign@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7" + +lodash.clone@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clone/-/lodash.clone-4.5.0.tgz#195870450f5a13192478df4bc3d23d2dea1907b6" + +lodash.fill@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/lodash.fill/-/lodash.fill-3.4.0.tgz#a3c74ae640d053adf0dc2079f8720788e8bfef85" + +lodash.flatten@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" + +lodash.flattendeep@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz#fb030917f86a3134e5bc9bec0d69e0013ddfedb2" + +lodash.intersection@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.intersection/-/lodash.intersection-4.4.0.tgz#0a11ba631d0e95c23c7f2f4cbb9a692ed178e705" + +lodash.merge@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.1.tgz#adc25d9cb99b9391c59624f379fbba60d7111d54" + +lodash.omit@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.omit/-/lodash.omit-4.5.0.tgz#6eb19ae5a1ee1dd9df0b969e66ce0b7fa30b5e60" + +lodash.partialright@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/lodash.partialright/-/lodash.partialright-4.2.1.tgz#0130d80e83363264d40074f329b8a3e7a8a1cc4b" + +lodash.pick@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.pick/-/lodash.pick-4.4.0.tgz#52f05610fff9ded422611441ed1fc123a03001b3" + +lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + +lodash@^4.17.10, lodash@^4.17.11: + version "4.17.11" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d" + +long@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" + +lowercase-keys@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" + +lowercase-keys@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + lru-cache@^4.0.1: version "4.1.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.3.tgz#a1175cf3496dfc8436c156c334b4955992bce69c" - integrity sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA== dependencies: pseudomap "^1.0.2" yallist "^2.1.2" +lru-cache@^4.1.3: + version "4.1.5" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd" + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +make-dir@^1.0.0, make-dir@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" + dependencies: + pify "^3.0.0" + make-error@^1.1.1: version "1.3.4" resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.4.tgz#19978ed575f9e9545d2ff8c13e33b5d18a67d535" - integrity sha512-0Dab5btKVPhibSalc9QGXb559ED7G7iLjFXBaj9Wq8O3vorueR5K5jaE3hkG6ZQINyhA/JgG6Qk4qdFQjsYV6g== + +md5-hex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/md5-hex/-/md5-hex-2.0.0.tgz#d0588e9f1c74954492ecd24ac0ac6ce997d92e33" + dependencies: + md5-o-matic "^0.1.1" + +md5-o-matic@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/md5-o-matic/-/md5-o-matic-0.1.1.tgz#822bccd65e117c514fab176b25945d54100a03c3" + +md5.js@1.3.4: + version "1.3.4" + resolved "http://registry.npmjs.org/md5.js/-/md5.js-1.3.4.tgz#e9bdbde94a20a5ac18b04340fc5764d5b09d901d" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" media-typer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" - integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g= + +mem@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mem/-/mem-1.1.0.tgz#5edd52b485ca1d900fe64895505399a0dfa45f76" + dependencies: + mimic-fn "^1.0.0" merge-descriptors@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" - integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E= + +merge-source-map@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/merge-source-map/-/merge-source-map-1.1.0.tgz#2fdde7e6020939f70906a68f2d7ae685e4c8c646" + dependencies: + source-map "^0.6.1" methods@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" - integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= mime-db@~1.35.0: version "1.35.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.35.0.tgz#0569d657466491283709663ad379a99b90d9ab47" - integrity sha512-JWT/IcCTsB0Io3AhWUMjRqucrHSPsSf2xKLaRldJVULioggvkJvggZ3VXNNSRkCddE6D+BUI4HEIZIA2OjwIvg== mime-db@~1.36.0: version "1.36.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.36.0.tgz#5020478db3c7fe93aad7bbcc4dcf869c43363397" - integrity sha512-L+xvyD9MkoYMXb1jAmzI/lWYAxAMCPvIBSWur0PZ5nOf5euahRLVqH//FKW9mWp2lkqUgYiXPgkzfMUFi4zVDw== mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.18: version "2.1.19" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.19.tgz#71e464537a7ef81c15f2db9d97e913fc0ff606f0" - integrity sha512-P1tKYHVSZ6uFo26mtnve4HQFE3koh1UWVkp8YUC+ESBHe945xWSoXuHHiGarDqcEZ+whpCDnlNw5LON0kLo+sw== dependencies: mime-db "~1.35.0" mime-types@~2.1.19: version "2.1.20" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.20.tgz#930cb719d571e903738520f8470911548ca2cc19" - integrity sha512-HrkrPaP9vGuWbLK1B1FfgAkbqNjIuy4eHlIYnFi7kamZyLLrGlo2mpcx0bBmNpKqBtYtAfGbodDddIgddSJC2A== dependencies: mime-db "~1.36.0" mime@1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/mime/-/mime-1.4.1.tgz#121f9ebc49e3766f311a76e1fa1c8003c4b03aa6" - integrity sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ== + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + +mimic-response@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" minimatch@3.0.4, minimatch@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== dependencies: brace-expansion "^1.1.7" minimist@0.0.8: version "0.0.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" - integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= minimist@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" - integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ= + +minimist@~0.0.1: + version "0.0.10" + resolved "http://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf" minipass@^2.2.1, minipass@^2.3.3: version "2.3.3" resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.3.tgz#a7dcc8b7b833f5d368759cce544dccb55f50f233" - integrity sha512-/jAn9/tEX4gnpyRATxgHEOV6xbcyxgT7iUnxo9Y3+OB0zX00TgKIv/2FZCf5brBbICcwbLqVv2ImjvWWrQMSYw== dependencies: safe-buffer "^5.1.2" yallist "^3.0.0" @@ -1211,21 +1720,18 @@ minipass@^2.2.1, minipass@^2.3.3: minizlib@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.1.0.tgz#11e13658ce46bc3a70a267aac58359d1e0c29ceb" - integrity sha512-4T6Ur/GctZ27nHfpt9THOdRZNgyJ9FZchYO1ceg5S8Q3DNLCKYy44nCZzgCJgcvx2UM8czmqak5BCxJMrq37lA== dependencies: minipass "^2.2.1" mkdirp@0.5.1, mkdirp@^0.5.0, mkdirp@^0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" - integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= dependencies: minimist "0.0.8" mocha@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/mocha/-/mocha-5.2.0.tgz#6d8ae508f59167f940f2b5b3c4a612ae50c90ae6" - integrity sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ== dependencies: browser-stdout "1.3.1" commander "2.15.1" @@ -1242,22 +1748,22 @@ mocha@^5.2.0: moment@2.x.x: version "2.22.2" resolved "https://registry.yarnpkg.com/moment/-/moment-2.22.2.tgz#3c257f9839fc0e93ff53149632239eb90783ff66" - integrity sha1-PCV/mDn8DpP/UxSWMiOeuQeD/2Y= ms@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" - integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + +ms@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" nan@~2.10.0: version "2.10.0" resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" - integrity sha512-bAdJv7fBLhWC+/Bls0Oza+mvTaNQtP+1RyhhhvD95pgUJz6XM5IzgmxOkItJ9tkoCiplvAnXI1tNmmUD/eScyA== needle@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" - integrity sha512-t/ZswCM9JTWjAdXS9VpvqhI2Ct2sL2MdY4fUXqGJaGBk13ge99ObqRksRTbBE56K+wxUXwwfZYOuZHifFW9q+Q== dependencies: debug "^2.1.2" iconv-lite "^0.4.4" @@ -1266,19 +1772,40 @@ needle@^2.2.1: negotiator@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" - integrity sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk= + +node-forge@^0.7.6: + version "0.7.6" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.7.6.tgz#fdf3b418aee1f94f0ef642cd63486c77ca9724ac" + +node-jose@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/node-jose/-/node-jose-1.1.0.tgz#15b0808ed7cf40cc6114c95c1a4d7394051c472a" + dependencies: + base64url "^3.0.0" + es6-promise "^4.2.5" + lodash.assign "^4.2.0" + lodash.clone "^4.5.0" + lodash.fill "^3.4.0" + lodash.flatten "^4.4.0" + lodash.intersection "^4.4.0" + lodash.merge "^4.6.1" + lodash.omit "^4.5.0" + lodash.partialright "^4.2.1" + lodash.pick "^4.4.0" + lodash.uniq "^4.5.0" + long "^4.0.0" + node-forge "^0.7.6" + uuid "^3.3.2" node-nvidia-smi@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/node-nvidia-smi/-/node-nvidia-smi-1.0.0.tgz#6aa57574540b2bed91c9a80218516ffa686e5ac7" - integrity sha1-aqV1dFQLK+2RyagCGFFv+mhuWsc= dependencies: xml2js "^0.4.17" node-pre-gyp@^0.10.3: version "0.10.3" resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.3.tgz#3070040716afdc778747b61b6887bf78880b80fc" - integrity sha512-d1xFs+C/IPS8Id0qPTZ4bUT8wWryfR/OzzAFxweG+uLN85oPzyo2Iw6bVlLQ/JOdgNonXLCoRyqDzDWq4iw72A== dependencies: detect-libc "^1.0.2" mkdirp "^0.5.1" @@ -1294,12 +1821,10 @@ node-pre-gyp@^0.10.3: node-version@^1.0.0: version "1.2.0" resolved "https://registry.yarnpkg.com/node-version/-/node-version-1.2.0.tgz#34fde3ffa8e1149bd323983479dda620e1b5060d" - integrity sha512-ma6oU4Sk0qOoKEAymVoTvk8EdXEobdS7m/mAGhDJ8Rouugho48crHBORAmy5BoOcv8wraPM6xumapQp5hl4iIQ== node.extend@1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/node.extend/-/node.extend-1.0.8.tgz#bab04379f7383f4587990c9df07b6a7f65db772b" - integrity sha1-urBDefc4P0WHmQyd8Htqf2Xbdys= dependencies: is "~0.2.6" object-keys "~0.4.0" @@ -1307,35 +1832,53 @@ node.extend@1.0.8: node.flow@1.2.3: version "1.2.3" resolved "https://registry.yarnpkg.com/node.flow/-/node.flow-1.2.3.tgz#e1c44a82aeca8d78b458a77fb3dc642f2eba2649" - integrity sha1-4cRKgq7KjXi0WKd/s9xkLy66Jkk= dependencies: node.extend "1.0.8" nopt@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" - integrity sha1-0NRoWv1UFRk8jHUFYC0NF81kR00= dependencies: abbrev "1" osenv "^0.1.4" +normalize-package-data@^2.3.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-url@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" + dependencies: + prepend-http "^2.0.0" + query-string "^5.0.1" + sort-keys "^2.0.0" + npm-bundled@^1.0.1: version "1.0.3" resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" - integrity sha512-ByQ3oJ/5ETLyglU2+8dBObvhfWXX8dtPZDMePCahptliFX2iIuhyEszyFk401PZUNQH20vvdW5MLjJxkwU80Ow== npm-packlist@^1.1.6: version "1.1.11" resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.11.tgz#84e8c683cbe7867d34b1d357d893ce29e28a02de" - integrity sha512-CxKlZ24urLkJk+9kCm48RTQ7L4hsmgSVzEk0TLGPzzyuFxD7VNgy5Sl24tOLMzQv773a/NeJ1ce1DKeacqffEA== dependencies: ignore-walk "^3.0.1" npm-bundled "^1.0.1" +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + dependencies: + path-key "^2.0.0" + npmlog@^4.0.2: version "4.1.2" resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" - integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== dependencies: are-we-there-yet "~1.1.2" console-control-strings "~1.1.0" @@ -1345,133 +1888,282 @@ npmlog@^4.0.2: number-is-nan@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" - integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= + +nyc@^13.1.0: + version "13.1.0" + resolved "https://registry.yarnpkg.com/nyc/-/nyc-13.1.0.tgz#463665c7ff6b5798e322624a5eb449a678db90e3" + dependencies: + archy "^1.0.0" + arrify "^1.0.1" + caching-transform "^2.0.0" + convert-source-map "^1.6.0" + debug-log "^1.0.1" + find-cache-dir "^2.0.0" + find-up "^3.0.0" + foreground-child "^1.5.6" + glob "^7.1.3" + istanbul-lib-coverage "^2.0.1" + istanbul-lib-hook "^2.0.1" + istanbul-lib-instrument "^3.0.0" + istanbul-lib-report "^2.0.2" + istanbul-lib-source-maps "^2.0.1" + istanbul-reports "^2.0.1" + make-dir "^1.3.0" + merge-source-map "^1.1.0" + resolve-from "^4.0.0" + rimraf "^2.6.2" + signal-exit "^3.0.2" + spawn-wrap "^1.4.2" + test-exclude "^5.0.0" + uuid "^3.3.2" + yargs "11.1.0" + yargs-parser "^9.0.2" oauth-sign@~0.8.2: version "0.8.2" resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" - integrity sha1-Rqarfwrq2N6unsBWV4C31O/rnUM= oauth-sign@~0.9.0: version "0.9.0" resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" - integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== object-assign@^4.0.1, object-assign@^4.1.0: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" - integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= object-keys@~0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-0.4.0.tgz#28a6aae7428dd2c3a92f3d95f21335dd204e0336" - integrity sha1-KKaq50KN0sOpLz2V8hM13SBOAzY= + +oidc-token-hash@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/oidc-token-hash/-/oidc-token-hash-3.0.1.tgz#f9e2496a3eea5f755671be54a97f57170a74081d" + dependencies: + base64url "^3.0.0" on-finished@~2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" - integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc= dependencies: ee-first "1.1.1" once@^1.3.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= dependencies: wrappy "1" -os-homedir@^1.0.0: +openid-client@^2.4.4: + version "2.4.5" + resolved "https://registry.yarnpkg.com/openid-client/-/openid-client-2.4.5.tgz#675e42dc50ab25bc20a625533f5da7e0f573b8cb" + dependencies: + base64url "^3.0.0" + got "^8.3.2" + lodash "^4.17.11" + lru-cache "^4.1.3" + node-jose "^1.1.0" + oidc-token-hash "^3.0.1" + p-any "^1.1.0" + +optimist@^0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686" + dependencies: + minimist "~0.0.1" + wordwrap "~0.0.2" + +os-homedir@^1.0.0, os-homedir@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" - integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= + +os-locale@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-2.1.0.tgz#42bc2900a6b5b8bd17376c8e882b65afccf24bf2" + dependencies: + execa "^0.7.0" + lcid "^1.0.0" + mem "^1.1.0" os-tmpdir@^1.0.0, os-tmpdir@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= osenv@^0.1.4: version "0.1.5" resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" - integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== dependencies: os-homedir "^1.0.0" os-tmpdir "^1.0.0" +p-any@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-any/-/p-any-1.1.0.tgz#1d03835c7eed1e34b8e539c47b7b60d0d015d4e1" + dependencies: + p-some "^2.0.0" + +p-cancelable@^0.4.0: + version "0.4.1" + resolved "http://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-is-promise@^1.1.0: + version "1.1.0" + resolved "http://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + dependencies: + p-try "^1.0.0" + +p-limit@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.0.0.tgz#e624ed54ee8c460a778b3c9f3670496ff8a57aec" + dependencies: + p-try "^2.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + dependencies: + p-limit "^2.0.0" + +p-some@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-some/-/p-some-2.0.1.tgz#65d87c8b154edbcf5221d167778b6d2e150f6f06" + dependencies: + aggregate-error "^1.0.0" + +p-timeout@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" + dependencies: + p-finally "^1.0.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + +p-try@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.0.0.tgz#85080bb87c64688fa47996fe8f7dfbe8211760b1" + +package-hash@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/package-hash/-/package-hash-2.0.0.tgz#78ae326c89e05a4d813b68601977af05c00d2a0d" + dependencies: + graceful-fs "^4.1.11" + lodash.flattendeep "^4.4.0" + md5-hex "^2.0.0" + release-zalgo "^1.0.0" + parent-module@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-0.1.0.tgz#b5292863a1e8c476ecf857e7d75c98920b24b8a6" - integrity sha1-tSkoY6HoxHbs+Ffn11yYkgskuKY= dependencies: callsites "^1.0.0" +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + parseurl@~1.3.2: version "1.3.2" resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.2.tgz#fc289d4ed8993119460c156253262cdc8de65bf3" - integrity sha1-/CidTtiZMRlGDBViUyYs3I3mW/M= path-dirname@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" - integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA= + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-key@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" path-parse@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1" - integrity sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME= path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w= + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + dependencies: + pify "^3.0.0" pathval@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.0.tgz#b942e6d4bde653005ef6b71361def8727d0645e0" - integrity sha1-uULm1L3mUwBe9rcTYd74cn0GReA= performance-now@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" - integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= pify@^2.0.0: version "2.3.0" resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" - integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" pinkie-promise@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" - integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= dependencies: pinkie "^2.0.0" pinkie@^2.0.0: version "2.0.4" resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" - integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pkg-dir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3" + dependencies: + find-up "^3.0.0" + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + +process-nextick-args@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" process-nextick-args@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" - integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw== promise-polyfill@^6.0.1: version "6.1.0" resolved "https://registry.yarnpkg.com/promise-polyfill/-/promise-polyfill-6.1.0.tgz#dfa96943ea9c121fca4de9b5868cb39d3472e057" - integrity sha1-36lpQ+qcEh/KTem1hoyznTRy4Fc= proxy-addr@~2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.3.tgz#355f262505a621646b3130a728eb647e22055341" - integrity sha512-jQTChiCJteusULxjBp8+jftSQE5Obdl3k4cnmLA6WXtK6XFuWRnvVL7aCiBqaLPM8c4ph0S4tKna8XvmIwEnXQ== dependencies: forwarded "~0.1.2" ipaddr.js "1.6.0" @@ -1479,37 +2171,42 @@ proxy-addr@~2.0.3: pseudomap@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" - integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= psl@^1.1.24: version "1.1.29" resolved "https://registry.yarnpkg.com/psl/-/psl-1.1.29.tgz#60f580d360170bb722a797cc704411e6da850c67" - integrity sha512-AeUmQ0oLN02flVHXWh9sSJF7mcdFq0ppid/JkErufc3hGIV/AMa8Fo9VgDo/cT2jFdOWoFvHp90qqBH54W+gjQ== punycode@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" - integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= qs@6.5.1: version "6.5.1" resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.1.tgz#349cdf6eef89ec45c12d7d5eb3fc0c870343a6d8" - integrity sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A== + +qs@^6.5.2: + version "6.6.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.6.0.tgz#a99c0f69a8d26bf7ef012f871cdabb0aee4424c2" qs@~6.5.1, qs@~6.5.2: version "6.5.2" resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" - integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== + +query-string@^5.0.1: + version "5.1.1" + resolved "http://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" range-parser@~1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" - integrity sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4= raw-body@2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.3.2.tgz#bcd60c77d3eb93cde0050295c3f379389bc88f89" - integrity sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k= dependencies: bytes "3.0.0" http-errors "1.6.2" @@ -1519,17 +2216,30 @@ raw-body@2.3.2: rc@^1.2.7: version "1.2.8" resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" - integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== dependencies: deep-extend "^0.6.0" ini "~1.3.0" minimist "^1.2.0" strip-json-comments "~2.0.1" -readable-stream@^2.0.6: +read-pkg-up@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-4.0.0.tgz#1b221c6088ba7799601c808f91161c66e58f8978" + dependencies: + find-up "^3.0.0" + read-pkg "^3.0.0" + +read-pkg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" + dependencies: + load-json-file "^4.0.0" + normalize-package-data "^2.3.2" + path-type "^3.0.0" + +readable-stream@^2.0.0, readable-stream@^2.0.6: version "2.3.6" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" - integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== + resolved "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" dependencies: core-util-is "~1.0.0" inherits "~2.0.3" @@ -1539,15 +2249,30 @@ readable-stream@^2.0.6: string_decoder "~1.1.1" util-deprecate "~1.0.1" +readable-stream@~2.0.0: + version "2.0.6" + resolved "http://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + string_decoder "~0.10.x" + util-deprecate "~1.0.1" + reflect-metadata@^0.1.10: version "0.1.12" resolved "https://registry.yarnpkg.com/reflect-metadata/-/reflect-metadata-0.1.12.tgz#311bf0c6b63cd782f228a81abe146a2bfa9c56f2" - integrity sha512-n+IyV+nGz3+0q3/Yf1ra12KpCyi001bi4XFxSjbiWWjfqb52iTTtpGXmCCAOWWIAn9KEuFZKGqBERHmrtScZ3A== -request@^2.74.0: +release-zalgo@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/release-zalgo/-/release-zalgo-1.0.0.tgz#09700b7e5074329739330e535c5a90fb67851730" + dependencies: + es6-error "^4.0.1" + +request@^2.74.0, request@^2.86.0, request@^2.88.0: version "2.88.0" resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" - integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== dependencies: aws-sign2 "~0.7.0" aws4 "^1.8.0" @@ -1573,7 +2298,6 @@ request@^2.74.0: request@^2.87.0: version "2.87.0" resolved "https://registry.yarnpkg.com/request/-/request-2.87.0.tgz#32f00235cd08d482b4d0d68db93a829c0ed5756e" - integrity sha512-fcogkm7Az5bsS6Sl0sibkbhcKsnyon/jV1kF3ajGmF0c8HrttdKTPRT9hieOaQHA5HEq6r8OyWOo/o781C1tNw== dependencies: aws-sign2 "~0.7.0" aws4 "^1.6.0" @@ -1596,70 +2320,85 @@ request@^2.87.0: tunnel-agent "^0.6.0" uuid "^3.1.0" +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + require-glob@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/require-glob/-/require-glob-3.2.0.tgz#90bfe2c8efb4b9f972eb9a3f5e580832e04f64d3" - integrity sha1-kL/iyO+0ufly65o/XlgIMuBPZNM= dependencies: glob-parent "^3.0.0" globby "^6.0.0" parent-module "^0.1.0" +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + resolve@^1.3.2: version "1.8.1" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.8.1.tgz#82f1ec19a423ac1fbd080b0bab06ba36e84a7a26" - integrity sha512-AicPrAC7Qu1JxPCZ9ZgCZlY35QgFnNqc+0LtbRNxnVw4TXvjQ72wnuL9JQcEBgXkI9JM8MsT9kaQoHcpCRJOYA== dependencies: path-parse "^1.0.5" -rimraf@^2.6.1: +responselike@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + dependencies: + lowercase-keys "^1.0.0" + +rimraf@^2.6.1, rimraf@^2.6.2: version "2.6.2" resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" - integrity sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w== dependencies: glob "^7.0.5" rmdir@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/rmdir/-/rmdir-1.2.0.tgz#4fe0357cb06168c258e73e968093dc4e8a0f3253" - integrity sha1-T+A1fLBhaMJY5z6WgJPcTooPMlM= dependencies: node.flow "1.2.3" rx@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/rx/-/rx-4.1.0.tgz#a5f13ff79ef3b740fe30aa803fb09f98805d4782" - integrity sha1-pfE/957zt0D+MKqAP7CfmIBdR4I= safe-buffer@5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.1.tgz#893312af69b2123def71f57889001671eeb2c853" - integrity sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg== safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== "safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2: version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sax@0.5.x: + version "0.5.8" + resolved "http://registry.npmjs.org/sax/-/sax-0.5.8.tgz#d472db228eb331c2506b0e8c15524adb939d12c1" sax@>=0.6.0, sax@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" - integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +"semver@2 || 3 || 4 || 5", semver@^5.5.0: + version "5.6.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.6.0.tgz#7e74256fbaa49c75aa7c7a205cc22799cac80004" semver@^5.1.0, semver@^5.3.0: version "5.5.0" resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" - integrity sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA== send@0.16.2: version "0.16.2" resolved "https://registry.yarnpkg.com/send/-/send-0.16.2.tgz#6ecca1e0f8c156d141597559848df64730a6bbc1" - integrity sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw== dependencies: debug "2.6.9" depd "~1.1.2" @@ -1678,55 +2417,99 @@ send@0.16.2: serve-static@1.13.2: version "1.13.2" resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.13.2.tgz#095e8472fd5b46237db50ce486a43f4b86c6cec1" - integrity sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw== dependencies: encodeurl "~1.0.2" escape-html "~1.0.3" parseurl "~1.3.2" send "0.16.2" -set-blocking@~2.0.0: +set-blocking@^2.0.0, set-blocking@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" - integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= setprototypeof@1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04" - integrity sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ= setprototypeof@1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" - integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ== -signal-exit@^3.0.0: +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + +signal-exit@^3.0.0, signal-exit@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" - integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= + +sort-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" + dependencies: + is-plain-obj "^1.0.0" source-map-support@^0.5.6: version "0.5.6" resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.6.tgz#4435cee46b1aab62b8e8610ce60f788091c51c13" - integrity sha512-N4KXEz7jcKqPf2b2vZF11lQIz9W5ZMuUcIOGj243lduidkf2fjkVKJS9vNxVWn3u/uxX38AcE8U9nnH9FPcq+g== dependencies: buffer-from "^1.0.0" source-map "^0.6.0" -source-map@^0.6.0: +source-map@^0.5.0: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" - integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +spawn-wrap@^1.4.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/spawn-wrap/-/spawn-wrap-1.4.2.tgz#cff58e73a8224617b6561abdc32586ea0c82248c" + dependencies: + foreground-child "^1.5.6" + mkdirp "^0.5.0" + os-homedir "^1.0.1" + rimraf "^2.6.2" + signal-exit "^3.0.2" + which "^1.3.0" + +spdx-correct@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz#2ea450aee74f2a89bfb94519c07fcd6f41322977" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.3.tgz#81c0ce8f21474756148bbb5f3bfc0f36bf15d76e" sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" - integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= sqlite3@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/sqlite3/-/sqlite3-4.0.2.tgz#1bbeb68b03ead5d499e42a3a1b140064791c5a64" - integrity sha512-51ferIRwYOhzUEtogqOa/y9supADlAht98bF/gbIi6WkzRJX6Yioldxbzj1MV4yV+LgdKD/kkHwFTeFXOG4htA== dependencies: nan "~2.10.0" node-pre-gyp "^0.10.3" @@ -1735,7 +2518,6 @@ sqlite3@^4.0.2: ssh2-streams@~0.2.0: version "0.2.1" resolved "https://registry.yarnpkg.com/ssh2-streams/-/ssh2-streams-0.2.1.tgz#9c9c9964be60e9644575af328677f64b1e5cbd79" - integrity sha512-3zCOsmunh1JWgPshfhKmBCL3lUtHPoh+a/cyQ49Ft0Q0aF7xgN06b76L+oKtFi0fgO57FLjFztb1GlJcEZ4a3Q== dependencies: asn1 "~0.2.0" semver "^5.1.0" @@ -1744,14 +2526,12 @@ ssh2-streams@~0.2.0: ssh2@^0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/ssh2/-/ssh2-0.6.1.tgz#5dde1a7394bb978b1f9c2f014affee2f5493bd40" - integrity sha512-fNvocq+xetsaAZtBG/9Vhh0GDjw1jQeW7Uq/DPh4fVrJd0XxSfXAqBjOGVk4o2jyWHvyC6HiaPFpfHlR12coDw== dependencies: ssh2-streams "~0.2.0" sshpk@^1.7.0: version "1.14.2" resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.14.2.tgz#c6fc61648a3d9c4e764fd3fcdf4ea105e492ba98" - integrity sha1-xvxhZIo9nE52T9P8306hBeSSupg= dependencies: asn1 "~0.2.3" assert-plus "^1.0.0" @@ -1767,87 +2547,95 @@ sshpk@^1.7.0: "statuses@>= 1.3.1 < 2", "statuses@>= 1.4.0 < 2": version "1.5.0" resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" - integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= statuses@~1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.4.0.tgz#bb73d446da2796106efcc1b601a253d6c46bd087" - integrity sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew== stream-buffers@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/stream-buffers/-/stream-buffers-3.0.2.tgz#5249005a8d5c2d00b3a32e6e0a6ea209dc4f3521" - integrity sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ== streamsearch@~0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" - integrity sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo= + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" string-width@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" - integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= dependencies: code-point-at "^1.0.0" is-fullwidth-code-point "^1.0.0" strip-ansi "^3.0.0" -"string-width@^1.0.2 || 2": +"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" - integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== dependencies: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string_decoder@~0.10.x: + version "0.10.31" + resolved "http://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + string_decoder@~1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== dependencies: safe-buffer "~5.1.0" strip-ansi@^3.0.0, strip-ansi@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" - integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= dependencies: ansi-regex "^2.0.0" strip-ansi@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" - integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= dependencies: ansi-regex "^3.0.0" +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "http://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + strip-json-comments@~2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= supports-color@5.4.0, supports-color@^5.3.0: version "5.4.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" - integrity sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w== dependencies: has-flag "^3.0.0" supports-color@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" - integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= + +supports-color@^5.4.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + dependencies: + has-flag "^3.0.0" tail-stream@^0.3.4: version "0.3.4" resolved "https://registry.yarnpkg.com/tail-stream/-/tail-stream-0.3.4.tgz#bc675a20e92732b1a6a7cc65d6be66f7817fd5c1" - integrity sha1-vGdaIOknMrGmp8xl1r5m94F/1cE= tar@^4: version "4.4.4" resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" - integrity sha512-mq9ixIYfNF9SK0IS/h2HKMu8Q2iaCuhDDsZhdEag/FHv8fOaYld4vN7ouMgcSSt5WKZzPs8atclTcJm36OTh4w== dependencies: chownr "^1.0.1" fs-minipass "^1.2.5" @@ -1857,31 +2645,44 @@ tar@^4: safe-buffer "^5.1.2" yallist "^3.0.2" +test-exclude@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-5.0.0.tgz#cdce7cece785e0e829cd5c2b27baf18bc583cfb7" + dependencies: + arrify "^1.0.1" + minimatch "^3.0.4" + read-pkg-up "^4.0.0" + require-main-filename "^1.0.1" + +timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + tmp@^0.0.33: version "0.0.33" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" - integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== dependencies: os-tmpdir "~1.0.2" +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + topo@1.x.x: version "1.1.0" resolved "https://registry.yarnpkg.com/topo/-/topo-1.1.0.tgz#e9d751615d1bb87dc865db182fa1ca0a5ef536d5" - integrity sha1-6ddRYV0buH3IZdsYL6HKCl71NtU= dependencies: hoek "2.x.x" tough-cookie@~2.3.3: version "2.3.4" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" - integrity sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA== dependencies: punycode "^1.4.1" tough-cookie@~2.4.3: version "2.4.3" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" - integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ== dependencies: psl "^1.1.24" punycode "^1.4.1" @@ -1889,17 +2690,18 @@ tough-cookie@~2.4.3: tree-kill@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/tree-kill/-/tree-kill-1.2.0.tgz#5846786237b4239014f05db156b643212d4c6f36" - integrity sha512-DlX6dR0lOIRDFxI0mjL9IYg6OTncLm/Zt+JiBhE5OlFcAR8yc9S7FFXU9so0oda47frdM/JFsk7UjNt9vscKcg== + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" ts-deferred@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/ts-deferred/-/ts-deferred-1.0.4.tgz#58145ebaeef5b8f2a290b8cec3d060839f9489c7" - integrity sha1-WBReuu71uPKikLjOw9Bgg5+Uicc= ts-node@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-7.0.0.tgz#a94a13c75e5e1aa6b82814b84c68deb339ba7bff" - integrity sha512-klJsfswHP0FuOLsvBZ/zzCfUvakOSSxds78mVeK7I+qP76YWtxf16hEZsp3U+b0kIo82R5UatGFeblYMqabb2Q== dependencies: arrify "^1.0.0" buffer-from "^1.1.0" @@ -1913,19 +2715,16 @@ ts-node@^7.0.0: tslib@^1.8.0, tslib@^1.8.1: version "1.9.3" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.9.3.tgz#d7e4dd79245d85428c4d7e4822a79917954ca286" - integrity sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ== tslint-microsoft-contrib@^5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/tslint-microsoft-contrib/-/tslint-microsoft-contrib-5.1.0.tgz#777c32d51aba16f4565e47aac749a1631176cd9f" - integrity sha512-p7xN6cN6y2REFT/11Xl4OAPdhPLHcsZk2IfA8rFS9wi3hhkY6Shz+yoJ61Z+GJ8L4TsRhIbG/09w3e1sdOHs9g== dependencies: tsutils "^2.12.1" tslint@^5.11.0: version "5.11.0" resolved "https://registry.yarnpkg.com/tslint/-/tslint-5.11.0.tgz#98f30c02eae3cde7006201e4c33cb08b48581eed" - integrity sha1-mPMMAurjzecAYgHkwzywi0hYHu0= dependencies: babel-code-frame "^6.22.0" builtin-modules "^1.1.1" @@ -1943,31 +2742,26 @@ tslint@^5.11.0: tsutils@^2.12.1, tsutils@^2.27.2: version "2.29.0" resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-2.29.0.tgz#32b488501467acbedd4b85498673a0812aca0b99" - integrity sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA== dependencies: tslib "^1.8.1" tunnel-agent@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= dependencies: safe-buffer "^5.0.1" tweetnacl@^0.14.3, tweetnacl@~0.14.0: version "0.14.5" resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= type-detect@^4.0.0: version "4.0.8" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" - integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== type-is@~1.6.15, type-is@~1.6.16: version "1.6.16" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.16.tgz#f89ce341541c672b25ee7ae3c73dee3b2be50194" - integrity sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q== dependencies: media-typer "0.3.0" mime-types "~2.1.18" @@ -1975,7 +2769,6 @@ type-is@~1.6.15, type-is@~1.6.16: typescript-ioc@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/typescript-ioc/-/typescript-ioc-1.2.4.tgz#21290097b163632de58a3abba7553daef8651f49" - integrity sha512-KO+isZO1tmhgKL5RWMU+AZvFGzyk0LnUMBcSLVm2Xo/iZlIyu/HD2o5vdg5kXJTJMs8otbDzOUsPt8/JFr96cw== dependencies: reflect-metadata "^0.1.10" require-glob "^3.2.0" @@ -1983,42 +2776,66 @@ typescript-ioc@^1.2.4: typescript-string-operations@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/typescript-string-operations/-/typescript-string-operations-1.3.1.tgz#461b886cc9ccd4dd16810b1f248b2e6f6580956b" - integrity sha512-DsT4kq8k3WT48EhdI/6DanReYGbX4Wg18z8vSeHH2wMfSFqdjiI40jrVABDH2WZ1RhCt7WoN/iY+LPhxrUHCqw== typescript@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.0.1.tgz#43738f29585d3a87575520a4b93ab6026ef11fdb" - integrity sha512-zQIMOmC+372pC/CCVLqnQ0zSBiY7HHodU7mpQdjiZddek4GMj31I3dUJ7gAs9o65X7mnRma6OokOkc6f9jjfBg== + +uglify-js@^3.1.4: + version "3.4.9" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.4.9.tgz#af02f180c1207d76432e473ed24a28f4a782bae3" + dependencies: + commander "~2.17.1" + source-map "~0.6.1" + +underscore@~1.8.3: + version "1.8.3" + resolved "http://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz#4f3fb53b106e6097fcf9cb4109f2a5e9bdfa5022" unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" - integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + dependencies: + prepend-http "^2.0.0" + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" util-deprecate@~1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" - integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= utils-merge@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" - integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= -uuid@^3.1.0, uuid@^3.3.2: +uuid@^3.0.0, uuid@^3.1.0, uuid@^3.3.2: version "3.3.2" resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" - integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== + +validate-npm-package-license@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +validator@~9.4.1: + version "9.4.1" + resolved "http://registry.npmjs.org/validator/-/validator-9.4.1.tgz#abf466d398b561cd243050112c6ff1de6cc12663" vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" - integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= verror@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= dependencies: assert-plus "^1.0.0" core-util-is "1.0.2" @@ -2027,55 +2844,108 @@ verror@1.10.0: webhdfs@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/webhdfs/-/webhdfs-1.2.0.tgz#c41b08ae33944a0220863bfd4b6719b9aaec1d37" - integrity sha512-h8D/NT7ruDMuGCdJNEJHJh8vDTEtZ5hBL+eRzXTq/INTd92LKOhsTCwlQI+8kTt79qPZq5O8ev7j/Y19VeYCHQ== dependencies: buffer-stream-reader "^0.1.1" extend "^3.0.0" request "^2.74.0" -which@^1.2.9: +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + +which@^1.2.9, which@^1.3.0: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" - integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== dependencies: isexe "^2.0.0" wide-align@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" - integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== dependencies: string-width "^1.0.2 || 2" +wordwrap@~0.0.2: + version "0.0.3" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "http://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +write-file-atomic@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.3.0.tgz#1ff61575c2e2a4e8e510d6fa4e243cce183999ab" + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +ws@^6.0.0: + version "6.1.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-6.1.2.tgz#3cc7462e98792f0ac679424148903ded3b9c3ad8" + dependencies: + async-limiter "~1.0.0" + +xml2js@0.2.8: + version "0.2.8" + resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.2.8.tgz#9b81690931631ff09d1957549faf54f4f980b3c2" + dependencies: + sax "0.5.x" xml2js@^0.4.17: version "0.4.19" resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.19.tgz#686c20f213209e94abf0d1bcf1efaa291c7827a7" - integrity sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q== dependencies: sax ">=0.6.0" xmlbuilder "~9.0.1" -xmlbuilder@~9.0.1: +xmlbuilder@^9.0.7, xmlbuilder@~9.0.1: version "9.0.7" - resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-9.0.7.tgz#132ee63d2ec5565c557e20f4c22df9aca686b10d" - integrity sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0= + resolved "http://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz#132ee63d2ec5565c557e20f4c22df9aca686b10d" + +y18n@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" yallist@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" - integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI= yallist@^3.0.0, yallist@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.2.tgz#8452b4bb7e83c7c188d8041c1a837c773d6d8bb9" - integrity sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k= + +yargs-parser@^9.0.2: + version "9.0.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-9.0.2.tgz#9ccf6a43460fe4ed40a9bb68f48d43b8a68cc077" + dependencies: + camelcase "^4.1.0" + +yargs@11.1.0: + version "11.1.0" + resolved "http://registry.npmjs.org/yargs/-/yargs-11.1.0.tgz#90b869934ed6e871115ea2ff58b03f4724ed2d77" + dependencies: + cliui "^4.0.0" + decamelize "^1.1.1" + find-up "^2.1.0" + get-caller-file "^1.0.1" + os-locale "^2.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1" + yargs-parser "^9.0.2" yn@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/yn/-/yn-2.0.0.tgz#e5adabc8acf408f6385fc76495684c88e6af689a" - integrity sha1-5a2ryKz0CPY4X8dklWhMiOavaJo= diff --git a/src/sdk/pynni/nni/__main__.py b/src/sdk/pynni/nni/__main__.py index d69baa195b..46edf8661a 100644 --- a/src/sdk/pynni/nni/__main__.py +++ b/src/sdk/pynni/nni/__main__.py @@ -34,6 +34,10 @@ logger = logging.getLogger('nni.main') logger.debug('START') +if os.environ.get('COVERAGE_PROCESS_START'): + import coverage + coverage.process_startup() + def augment_classargs(input_class_args, classname): if classname in ClassArgs: for key, value in ClassArgs[classname].items(): diff --git a/src/sdk/pynni/nni/msg_dispatcher_base.py b/src/sdk/pynni/nni/msg_dispatcher_base.py index bcb8cc1a3a..d366ac2b50 100644 --- a/src/sdk/pynni/nni/msg_dispatcher_base.py +++ b/src/sdk/pynni/nni/msg_dispatcher_base.py @@ -46,7 +46,7 @@ def run(self): while True: _logger.debug('waiting receive_message') command, data = receive() - if command is None: + if command is None or command is CommandType.Terminate: break if multi_thread_enabled(): self.pool.map_async(self.handle_request, [(command, data)]) @@ -64,11 +64,6 @@ def handle_request(self, request): _logger.debug('handle request: command: [{}], data: [{}]'.format(command, data)) - if command is CommandType.Terminate: - # if receive Terminate command, exit process - _logger.info('Receive Terminate command from NNI manager, terminating') - exit(0) - data = json_tricks.loads(data) command_handlers = { diff --git a/src/sdk/pynni/setup.py b/src/sdk/pynni/setup.py index 622192bfbe..e7ba29dad2 100644 --- a/src/sdk/pynni/setup.py +++ b/src/sdk/pynni/setup.py @@ -35,7 +35,8 @@ def read(fname): 'hyperopt', 'json_tricks', 'numpy', - 'scipy' + 'scipy', + 'coverage' ], package_data = {'nni': ['**/requirements.txt']}, diff --git a/src/sdk/pynni/ut.sh b/src/sdk/pynni/ut.sh new file mode 100644 index 0000000000..4ee1e38070 --- /dev/null +++ b/src/sdk/pynni/ut.sh @@ -0,0 +1,3 @@ +#!/bin/bash +coverage run setup.py test +coverage html diff --git a/test/.coveragerc b/test/.coveragerc new file mode 100644 index 0000000000..fdcb4e097d --- /dev/null +++ b/test/.coveragerc @@ -0,0 +1,31 @@ +# .coveragerc to control coverage.py +[run] +branch = True +parallel = True +data_file = ${COVERAGE_DATA_FILE} +source = nni, nni_cmd, nni_trial_tool + +concurrency = multiprocessing + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + +ignore_errors = True + +[html] +directory = ${COVERAGE_HTML_DIR} diff --git a/test/it.sh b/test/it.sh new file mode 100644 index 0000000000..bb6abd6a44 --- /dev/null +++ b/test/it.sh @@ -0,0 +1,18 @@ +#!/bin/bash +CWD=${PWD} + +## Export certain environment variables for unittest code to work +export COVERAGE_PROCESS_START=${CWD}/.coveragerc +export COVERAGE_DATA_FILE=${CWD}/coverage/data +export COVERAGE_HTML_DIR=${CWD}/coverhtml + +rm ${COVERAGE_DATA_FILE}* +rm -rf ${COVERAGE_HTML_DIR}/* +mkdir ${CWD}/coverage +mkdir ${COVERAGE_HTML_DIR} + +## ------Run integration test------ +echo "===========================Testing: integration test===========================" +coverage run sdk_test.py +coverage combine +coverage html diff --git a/tools/nni_cmd/nnictl.py b/tools/nni_cmd/nnictl.py index e76c86a5cc..94b1dbdf62 100644 --- a/tools/nni_cmd/nnictl.py +++ b/tools/nni_cmd/nnictl.py @@ -28,6 +28,10 @@ from .constants import * from .tensorboard_utils import * +if os.environ.get('COVERAGE_PROCESS_START'): + import coverage + coverage.process_startup() + def nni_info(*args): if args[0].version: print(pkg_resources.get_distribution('nni').version) From 949aacbdd97562fd7f77ca16c7ba56f6e16c7536 Mon Sep 17 00:00:00 2001 From: Scarlett Li <39592018+scarlett2018@users.noreply.github.com> Date: Fri, 4 Jan 2019 12:32:40 +0800 Subject: [PATCH 09/54] Updating Readme to add the Related Projects like PAI, KubeLauncher and MMdnn (#565) * Adding related projects to Readme --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 4d57b720a0..e114a572b3 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,13 @@ The tool dispatches and runs trial jobs generated by tuning algorithms to search * Researchers and data scientists who want to implement their own AutoML algorithms and compare it with other algorithms. * ML Platform owners who want to support AutoML in their platform. +## Related Projects +Targeting at openness and advancing state-of-art technology, [Microsoft Research (MSR)](https://www.microsoft.com/en-us/research/group/systems-research-group-asia/) had also released few other open source projects. +* [OpenPAI](https://github.com/Microsoft/pai) : an open source platform that provides complete AI model training and resource management capabilities, it is easy to extend and supports on-premise, cloud and hybrid environments in various scale. +* [FrameworkController](https://github.com/Microsoft/frameworkcontroller) : an open source general-purpose Kubernetes Pod Controller that orchestrate all kinds of applications on Kubernetes by a single controller. +* [MMdnn](https://github.com/Microsoft/MMdnn) : A comprehensive, cross-framework solution to convert, visualize and diagnose deep neural network models. The "MM" in MMdnn stands for model management and "dnn" is an acronym for deep neural network. +We encourage researchers and students leverage these projects to accelerate the AI development and research. + ## **Install & Verify** **Install through pip** From e3332641fd2805fd789edf0215a538e38142bfca Mon Sep 17 00:00:00 2001 From: SparkSnail Date: Fri, 4 Jan 2019 16:59:38 +0800 Subject: [PATCH 10/54] Fix remote TrainingService bug, change forEach to "for of" (#564) trial job could not be stopped in remote machine when experiment is stopped, because awit/async does not work normally in forEach, refer https://codeburst.io/javascript-async-await-with-foreach-b6ba62bbf404. --- .../kubernetes/kubernetesTrainingService.ts | 8 ++++---- .../training_service/pai/paiTrainingService.ts | 6 +++--- .../remote_machine/remoteMachineTrainingService.ts | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts b/src/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts index 631b0e98ea..0000ab09a2 100644 --- a/src/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts +++ b/src/nni_manager/training_service/kubernetes/kubernetesTrainingService.ts @@ -81,19 +81,19 @@ abstract class KubernetesTrainingService { } } - public listTrialJobs(): Promise { + public async listTrialJobs(): Promise { const jobs: TrialJobDetail[] = []; - this.trialJobsMap.forEach(async (value: KubernetesTrialJobDetail, key: string) => { + for (const [key, value] of this.trialJobsMap) { if (value.form.jobType === 'TRIAL') { jobs.push(await this.getTrialJob(key)); } - }); + }; return Promise.resolve(jobs); } - public getTrialJob(trialJobId: string): Promise { + public async getTrialJob(trialJobId: string): Promise { const kubernetesTrialJob: TrialJobDetail | undefined = this.trialJobsMap.get(trialJobId); diff --git a/src/nni_manager/training_service/pai/paiTrainingService.ts b/src/nni_manager/training_service/pai/paiTrainingService.ts index c1ef8ccb60..17ac31836e 100644 --- a/src/nni_manager/training_service/pai/paiTrainingService.ts +++ b/src/nni_manager/training_service/pai/paiTrainingService.ts @@ -104,16 +104,16 @@ class PAITrainingService implements TrainingService { public async listTrialJobs(): Promise { const jobs: TrialJobDetail[] = []; - this.trialJobsMap.forEach(async (value: PAITrialJobDetail, key: string) => { + for (const [key, value] of this.trialJobsMap) { if (value.form.jobType === 'TRIAL') { jobs.push(await this.getTrialJob(key)); } - }); + }; return Promise.resolve(jobs); } - public getTrialJob(trialJobId: string): Promise { + public async getTrialJob(trialJobId: string): Promise { if(!this.paiClusterConfig) { throw new Error('PAI Cluster config is not initialized'); } diff --git a/src/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts b/src/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts index 5302d90bbb..9efc231772 100644 --- a/src/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts +++ b/src/nni_manager/training_service/remote_machine/remoteMachineTrainingService.ts @@ -110,15 +110,15 @@ class RemoteMachineTrainingService implements TrainingService { /** * List submitted trial jobs */ - public listTrialJobs(): Promise { + public async listTrialJobs(): Promise { const jobs: TrialJobDetail[] = []; const deferred: Deferred = new Deferred(); - this.trialJobsMap.forEach(async (value: RemoteMachineTrialJobDetail, key: string) => { + for (const [key, value] of this.trialJobsMap) { if (value.form.jobType === 'TRIAL') { jobs.push(await this.getTrialJob(key)); } - }); + }; deferred.resolve(jobs); return deferred.promise; From 1159c855aafbd99d4c0cf1afa8f0a258219ad82e Mon Sep 17 00:00:00 2001 From: Zejun Lin <871886504@qq.com> Date: Sun, 6 Jan 2019 16:51:22 +0800 Subject: [PATCH 11/54] To install the whole nni in an virtual environment (#538) * support venv * adapt venv * adapt venv * adapt venv * adapt venv * new test * new test * new test * support venv * support venv * support venv * support venv * support venv * support venv * support venv * colorful output for mac * colorful output for mac * permission denied in /tmp * permission denied in /tmp * permission denied in /tmp * remove unused variable * final * remove build python --- Makefile | 38 +++++++++++++++++++++----------------- README.md | 2 +- install.sh | 1 - tools/nni_cmd/launcher.py | 26 ++++++++++++++++++-------- 4 files changed, 40 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index a00900788f..b26f2c95df 100644 --- a/Makefile +++ b/Makefile @@ -1,32 +1,38 @@ # Setting variables - +SHELL := /bin/bash PIP_INSTALL := python3 -m pip install --no-cache-dir PIP_UNINSTALL := python3 -m pip uninstall ## Colorful output -_INFO := $(shell echo -e '\e[1;36m') -_WARNING := $(shell echo -e '\e[1;33m') -_END := $(shell echo -e '\e[0m') +_INFO := $(shell echo -e '\033[1;36m') +_WARNING := $(shell echo -e '\033[1;33m') +_END := $(shell echo -e '\033[0m') ## Detect OS UNAME_S := $(shell uname -s) ifeq ($(UNAME_S), Linux) - OS_SPEC := linux + OS_SPEC := linux else ifeq ($(UNAME_S), Darwin) - OS_SPEC := darwin + OS_SPEC := darwin else $(error platform $(UNAME_S) not supported) endif ## Install directories +ROOT_FOLDER ?= $(shell python3 -c 'import site; from pathlib import Path; print(Path(site.getsitepackages()[0]).parents[2])') +IS_SYS_PYTHON ?= $(shell [[ $(ROOT_FOLDER) == /usr* || $(ROOT_FOLDER) == /Library* ]] && echo TRUE || echo FALSE) + ifeq ($(shell id -u), 0) # is root _ROOT := 1 - ROOT_FOLDER ?= $(shell python3 -c 'import site; from pathlib import Path; print(Path(site.getsitepackages()[0]).parents[2])') BASH_COMP_PREFIX ?= /usr/share/bash-completion/completions else # is normal user - ROOT_FOLDER ?= $(shell python3 -c 'import site; from pathlib import Path; print(Path(site.getusersitepackages()).parents[2])') + ifeq (TRUE, $(IS_SYS_PYTHON)) + ROOT_FOLDER := $(shell python3 -c 'import site; from pathlib import Path; print(Path(site.getusersitepackages()).parents[2])') + endif ifndef VIRTUAL_ENV - PIP_MODE ?= --user + ifeq (, $(shell echo $$PATH | grep 'conda')) + PIP_MODE ?= --user + endif endif BASH_COMP_PREFIX ?= ${HOME}/.bash_completion.d endif @@ -38,11 +44,13 @@ BIN_FOLDER ?= $(ROOT_FOLDER)/bin NNI_PKG_FOLDER ?= $(ROOT_FOLDER)/nni ## Dependency information -NNI_NODE_TARBALL ?= /tmp/nni-node-$(OS_SPEC)-x64.tar.xz -NNI_NODE_FOLDER = /tmp/nni-node-$(OS_SPEC)-x64 +NNI_DEPENDENCY_FOLDER = /tmp/$(USER) +$(shell mkdir -p $(NNI_DEPENDENCY_FOLDER)) +NNI_NODE_TARBALL ?= $(NNI_DEPENDENCY_FOLDER)/nni-node-$(OS_SPEC)-x64.tar.xz +NNI_NODE_FOLDER = $(NNI_DEPENDENCY_FOLDER)/nni-node-$(OS_SPEC)-x64 NNI_NODE ?= $(BIN_FOLDER)/node -NNI_YARN_TARBALL ?= /tmp/nni-yarn.tar.gz -NNI_YARN_FOLDER ?= /tmp/nni-yarn +NNI_YARN_TARBALL ?= $(NNI_DEPENDENCY_FOLDER)/nni-yarn.tar.gz +NNI_YARN_FOLDER ?= $(NNI_DEPENDENCY_FOLDER)/nni-yarn NNI_YARN := PATH=$(BIN_FOLDER):$${PATH} $(NNI_YARN_FOLDER)/bin/yarn ## Version number @@ -57,10 +65,6 @@ build: cd src/nni_manager && $(NNI_YARN) && $(NNI_YARN) build #$(_INFO) Building WebUI $(_END) cd src/webui && $(NNI_YARN) && $(NNI_YARN) build - #$(_INFO) Building Python SDK $(_END) - cd src/sdk/pynni && python3 setup.py build - #$(_INFO) Building nnictl $(_END) - cd tools && python3 setup.py build # All-in-one target for non-expert users # Installs NNI as well as its dependencies, and update bashrc to set PATH diff --git a/README.md b/README.md index e114a572b3..58b034fc78 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ We encourage researchers and students leverage these projects to accelerate the **Install through pip** * We support Linux and MacOS in current stage, Ubuntu 16.04 or higher, along with MacOS 10.14.1 are tested and supported. Simply run the following `pip install` in an environment that has `python >= 3.5`. ```bash - python3 -m pip install --user --upgrade nni + python3 -m pip install --upgrade nni ``` * Note: * If you are in docker container (as root), please remove `--user` from the installation command. diff --git a/install.sh b/install.sh index 3d5199e187..5862f7da3d 100644 --- a/install.sh +++ b/install.sh @@ -1,3 +1,2 @@ #!/bin/bash make easy-install -source ~/.bashrc diff --git a/tools/nni_cmd/launcher.py b/tools/nni_cmd/launcher.py index 22984f3fa4..c41d178b42 100644 --- a/tools/nni_cmd/launcher.py +++ b/tools/nni_cmd/launcher.py @@ -32,9 +32,9 @@ from .config_utils import Config, Experiments from .common_utils import get_yml_content, get_json_content, print_error, print_normal, print_warning, detect_process, detect_port from .constants import * -import time import random import site +import time from pathlib import Path def get_log_path(config_file_name): @@ -72,16 +72,26 @@ def start_rest_server(port, platform, mode, config_file_name, experiment_id=None exit(1) print_normal('Starting restful server...') - python_dir = str(Path(site.getusersitepackages()).parents[2]) - entry_file = os.path.join(python_dir, 'nni', 'main.js') - entry_dir = os.path.join(python_dir, 'nni') - local_entry_dir = entry_dir - if not os.path.isfile(entry_file): - python_dir = str(Path(site.getsitepackages()[0]).parents[2]) + # Find nni lib from the following locations in order + sys_wide_python = True + python_sitepackage = site.getsitepackages()[0] + # If system-wide python is used, we will give priority to using user-sitepackage given that nni exists there + if python_sitepackage.startswith('/usr') or python_sitepackage.startswith('/Library'): + local_python_dir = str(Path(site.getusersitepackages()).parents[2]) + entry_file = os.path.join(local_python_dir, 'nni', 'main.js') + entry_dir = os.path.join(local_python_dir, 'nni') + else: + # If this python is not system-wide python, we will use its site-package directly + sys_wide_python = False + + if not sys_wide_python or not os.path.isfile(entry_file): + python_dir = str(Path(python_sitepackage).parents[2]) entry_file = os.path.join(python_dir, 'nni', 'main.js') entry_dir = os.path.join(python_dir, 'nni') + # Nothing is found if not os.path.isfile(entry_file): - raise Exception('Fail to find main.js under both %s and %s!' % (local_entry_dir, entry_dir)) + raise Exception('Fail to find nni under both "%s" and "%s"' % (local_python_dir, python_dir)) + cmds = ['node', entry_file, '--port', str(port), '--mode', platform, '--start_mode', mode] if mode == 'resume': cmds += ['--experiment_id', experiment_id] From b7729f0a56ee703f2ae3dda414b8e58f6dfb749d Mon Sep 17 00:00:00 2001 From: Zejun Lin <871886504@qq.com> Date: Sun, 6 Jan 2019 16:53:19 +0800 Subject: [PATCH 12/54] Make it feasible for annotation whether to add an extra line "nni.get_next_parameter()" or not (#526) * fix bug * add docs * add ut * add ut * add to ci * update doc * update doc * update ut * add ut to ci * add ut to ci * add ut to ci * add ut to ci * add ut to ci * add ut to ci * add ut to ci * add ut to ci * test * test * test * test * test * test * test * test * test * test * revert * refactor * refactor * s * merge * fix annotation for extra line * add deprecation warning --- src/sdk/pynni/nni/smartparam.py | 2 ++ tools/nni_annotation/code_generator.py | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/sdk/pynni/nni/smartparam.py b/src/sdk/pynni/nni/smartparam.py index 6d1086c78e..2b2bba5812 100644 --- a/src/sdk/pynni/nni/smartparam.py +++ b/src/sdk/pynni/nni/smartparam.py @@ -128,4 +128,6 @@ def _get_param(func, name): if name is None: name = '__line{:d}'.format(lineno) key = '{}/{}/{}'.format(module, name, func) + if trial._params is None: + trial.get_next_parameter() return trial.get_current_parameter(key) diff --git a/tools/nni_annotation/code_generator.py b/tools/nni_annotation/code_generator.py index 88e3d91745..05cf2a922e 100644 --- a/tools/nni_annotation/code_generator.py +++ b/tools/nni_annotation/code_generator.py @@ -20,8 +20,8 @@ import ast - import astor +from nni_cmd.common_utils import print_warning # pylint: disable=unidiomatic-typecheck @@ -218,6 +218,10 @@ def _visit_string(self, node): else: return node # not an annotation, ignore it + if string.startswith('@nni.get_next_parameter('): + deprecated_message = "'@nni.get_next_parameter' is deprecated in annotation due to inconvenience. Please remove this line in the trial code." + print_warning(deprecated_message) + if string.startswith('@nni.report_intermediate_result(') \ or string.startswith('@nni.report_final_result(') \ or string.startswith('@nni.get_next_parameter('): From 573f23cee5a46d01fc58486f38b9c11ffd8525f8 Mon Sep 17 00:00:00 2001 From: Zejun Lin <871886504@qq.com> Date: Sun, 6 Jan 2019 16:55:04 +0800 Subject: [PATCH 13/54] fix permision deny (#567) --- tools/nni_cmd/launcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/nni_cmd/launcher.py b/tools/nni_cmd/launcher.py index c41d178b42..2b4b24f0f5 100644 --- a/tools/nni_cmd/launcher.py +++ b/tools/nni_cmd/launcher.py @@ -287,7 +287,7 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen nni_config.set_config('restServerPid', rest_process.pid) # Deal with annotation if experiment_config.get('useAnnotation'): - path = os.path.join(tempfile.gettempdir(), 'nni', 'annotation') + path = os.path.join(tempfile.gettempdir(), os.environ['USER'], 'nni', 'annotation') if not os.path.isdir(path): os.makedirs(path) path = tempfile.mkdtemp(dir=path) From 816dd6035c88c80dafe3eeabaea90f32cb883273 Mon Sep 17 00:00:00 2001 From: xuehui Date: Mon, 7 Jan 2019 17:23:28 +0800 Subject: [PATCH 14/54] Add Metis Tuner (#534) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * update readme * update * fix path * update reference * fix bug in config file * update nni_arch_overview.png * update * update * update * add metis tuner code * 1. fix bug about import 2.update other sdk file * add auto-gbdt-example and remove unused code * add metis_tuner into README * update the README * update README | remove unused variable * fix typo * add sklearn into requirments * Update src/sdk/pynni/nni/metis_tuner/metis_tuner.py add default value in __init__ Co-Authored-By: xuehui1991 * Update docs/HowToChooseTuner.md Co-Authored-By: xuehui1991 * Update docs/HowToChooseTuner.md Co-Authored-By: xuehui1991 * fix typo | add more comments --- docs/HowToChooseTuner.md | 42 +- examples/trials/auto-gbdt/config_metis.yml | 21 + examples/trials/auto-gbdt/requirments.txt | 3 +- .../trials/auto-gbdt/search_space_metis.json | 5 + .../rest_server/restValidationSchemas.ts | 2 +- src/sdk/pynni/nni/constants.py | 4 +- .../metis_tuner/Regression_GMM/CreateModel.py | 58 +++ .../metis_tuner/Regression_GMM/Selection.py | 104 +++++ .../metis_tuner/Regression_GMM/__init__.py | 0 .../metis_tuner/Regression_GP/CreateModel.py | 52 +++ .../Regression_GP/OutlierDetection.py | 99 ++++ .../metis_tuner/Regression_GP/Prediction.py | 37 ++ .../metis_tuner/Regression_GP/Selection.py | 114 +++++ .../nni/metis_tuner/Regression_GP/__init__.py | 0 .../metis_tuner/lib_acquisition_function.py | 202 ++++++++ .../metis_tuner/lib_constraint_summation.py | 116 +++++ src/sdk/pynni/nni/metis_tuner/lib_data.py | 67 +++ src/sdk/pynni/nni/metis_tuner/metis_tuner.py | 440 ++++++++++++++++++ src/sdk/pynni/nni/metis_tuner/requirments.txt | 1 + src/sdk/pynni/requirements.txt | 5 +- tools/nni_cmd/config_schema.py | 10 + 21 files changed, 1377 insertions(+), 5 deletions(-) create mode 100644 examples/trials/auto-gbdt/config_metis.yml create mode 100644 examples/trials/auto-gbdt/search_space_metis.json create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GMM/CreateModel.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GMM/__init__.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GP/CreateModel.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py create mode 100644 src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py create mode 100644 src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py create mode 100644 src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py create mode 100644 src/sdk/pynni/nni/metis_tuner/lib_data.py create mode 100644 src/sdk/pynni/nni/metis_tuner/metis_tuner.py create mode 100644 src/sdk/pynni/nni/metis_tuner/requirments.txt diff --git a/docs/HowToChooseTuner.md b/docs/HowToChooseTuner.md index 48e2ee15d0..e1dc531095 100644 --- a/docs/HowToChooseTuner.md +++ b/docs/HowToChooseTuner.md @@ -11,6 +11,8 @@ For now, NNI has supported the following tuner algorithms. Note that NNI install - [Grid Search](#Grid) - [Hyperband](#Hyperband) - [Network Morphism](#NetworkMorphism) (require pyTorch) + - [Metis Tuner](#MetisTuner) (require sklearn) + ## Supported tuner algorithms @@ -178,7 +180,7 @@ _Usage_: **Network Morphism** -[Network Morphism](7) provides functions to automatically search for architecture of deep learning models. Every child network inherits the knowledge from its parent network and morphs into diverse types of networks, including changes of depth, width and skip-connection. Next, it estimates the value of child network using the history architecture and metric pairs. Then it selects the most promising one to train. More detail can be referred to [here](../src/sdk/pynni/nni/networkmorphism_tuner/README.md). +[Network Morphism][7] provides functions to automatically search for architecture of deep learning models. Every child network inherits the knowledge from its parent network and morphs into diverse types of networks, including changes of depth, width and skip-connection. Next, it estimates the value of child network using the history architecture and metric pairs. Then it selects the most promising one to train. More detail can be referred to [here](../src/sdk/pynni/nni/networkmorphism_tuner/README.md). _Installation_: NetworkMorphism requires [pyTorch](https://pytorch.org/get-started/locally), so users should install it first. @@ -205,6 +207,43 @@ _Usage_: ``` + +**Metis Tuner** + +[Metis][10] offers the following benefits when it comes to tuning parameters: +While most tools only predicts the optimal configuration, Metis gives you two outputs: (a) current prediction of optimal configuration, and (b) suggestion for the next trial. No more guess work! + +While most tools assume training datasets do not have noisy data, Metis actually tells you if you need to re-sample a particular hyper-parameter. + +While most tools have problems of being exploitation-heavy, Metis' search strategy balances exploration, exploitation, and (optional) re-sampling. + +Metis belongs to the class of sequential model-based optimization (SMBO), and it is based on the Bayesian Optimization framework. To model the parameter-vs-performance space, Metis uses both Gaussian Process and GMM. Since each trial can impose a high time cost, Metis heavily trades inference computations with naive trial. At each iteration, Metis does two tasks: +* It finds the global optimal point in the Gaussian Process space. This point represents the optimal configuration. +* It identifies the next hyper-parameter candidate. This is achieved by inferring the potential information gain of exploration, exploitation, and re-sampling. + +Note that the only acceptable types of search space are `choice`, `quniform`, `uniform` and `randint`. + +More details can be found in our paper: https://www.microsoft.com/en-us/research/publication/metis-robustly-tuning-tail-latencies-cloud-systems/ + + +_Installation_: +Metis Tuner requires [sklearn](https://scikit-learn.org/), so users should install it first. User could use `pip3 install sklearn` to install it. + + +_Suggested scenario_: +Similar to TPE and SMAC, Metis is a black-box tuner. If your system takes a long time to finish each trial, Metis is more favorable than other approaches such as random search. Furthermore, Metis provides guidance on the subsequent trial. Here is an [example](../examples/trials/auto-gbdt/search_space_metis.json) about the use of Metis. User only need to send the final result like `accuracy` to tuner, by calling the nni SDK. + +_Usage_: +```yaml + # config.yaml + tuner: + builtinTunerName: MetisTuner + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +``` + + # How to use Assessor that NNI supports? For now, NNI has supported the following assessor algorithms. @@ -273,3 +312,4 @@ _Usage_: [7]: https://arxiv.org/abs/1806.10282 [8]: https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf [9]: http://aad.informatik.uni-freiburg.de/papers/15-IJCAI-Extrapolation_of_Learning_Curves.pdf +[10]:https://www.microsoft.com/en-us/research/publication/metis-robustly-tuning-tail-latencies-cloud-systems/ diff --git a/examples/trials/auto-gbdt/config_metis.yml b/examples/trials/auto-gbdt/config_metis.yml new file mode 100644 index 0000000000..b52d53e69f --- /dev/null +++ b/examples/trials/auto-gbdt/config_metis.yml @@ -0,0 +1,21 @@ +authorName: default +experimentName: example_auto-gbdt-metis +trialConcurrency: 1 +maxExecDuration: 10h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: local +searchSpacePath: search_space_metis.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: MetisTuner + classArgs: + #choice: maximize, minimize + optimize_mode: minimize +trial: + command: python3 main.py + codeDir: . + gpuNum: 0 diff --git a/examples/trials/auto-gbdt/requirments.txt b/examples/trials/auto-gbdt/requirments.txt index 87509da343..182230bed8 100644 --- a/examples/trials/auto-gbdt/requirments.txt +++ b/examples/trials/auto-gbdt/requirments.txt @@ -1 +1,2 @@ -pip install lightgbm +lightgbm +pandas diff --git a/examples/trials/auto-gbdt/search_space_metis.json b/examples/trials/auto-gbdt/search_space_metis.json new file mode 100644 index 0000000000..6bfbc32afa --- /dev/null +++ b/examples/trials/auto-gbdt/search_space_metis.json @@ -0,0 +1,5 @@ +{ + "num_leaves":{"_type":"choice","_value":[31, 28, 24, 20]}, + "learning_rate":{"_type":"choice","_value":[0.01, 0.05, 0.1, 0.2]}, + "bagging_freq":{"_type":"choice","_value":[1, 2, 4, 8, 10]} +} diff --git a/src/nni_manager/rest_server/restValidationSchemas.ts b/src/nni_manager/rest_server/restValidationSchemas.ts index bfb1ff24d2..5b6bd1dab7 100644 --- a/src/nni_manager/rest_server/restValidationSchemas.ts +++ b/src/nni_manager/rest_server/restValidationSchemas.ts @@ -148,7 +148,7 @@ export namespace ValidationSchemas { checkpointDir: joi.string().allow('') }), tuner: joi.object({ - builtinTunerName: joi.string().valid('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch', 'NetworkMorphism'), + builtinTunerName: joi.string().valid('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch', 'NetworkMorphism', 'MetisTuner'), codeDir: joi.string(), classFileName: joi.string(), className: joi.string(), diff --git a/src/sdk/pynni/nni/constants.py b/src/sdk/pynni/nni/constants.py index ba6d27144f..f6cce5adba 100644 --- a/src/sdk/pynni/nni/constants.py +++ b/src/sdk/pynni/nni/constants.py @@ -28,7 +28,8 @@ 'Medianstop': 'nni.medianstop_assessor.medianstop_assessor', 'GridSearch': 'nni.gridsearch_tuner.gridsearch_tuner', 'NetworkMorphism': 'nni.networkmorphism_tuner.networkmorphism_tuner', - 'Curvefitting': 'nni.curvefitting_assessor.curvefitting_assessor' + 'Curvefitting': 'nni.curvefitting_assessor.curvefitting_assessor', + 'MetisTuner': 'nni.metis_tuner.metis_tuner' } ClassName = { @@ -40,6 +41,7 @@ 'BatchTuner': 'BatchTuner', 'GridSearch': 'GridSearchTuner', 'NetworkMorphism':'NetworkMorphismTuner', + 'MetisTuner':'MetisTuner', 'Medianstop': 'MedianstopAssessor', 'Curvefitting': 'CurvefittingAssessor' diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GMM/CreateModel.py b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/CreateModel.py new file mode 100644 index 0000000000..3ed39e0cf8 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/CreateModel.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import sys +from operator import itemgetter + +import sklearn.mixture as mm + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def create_model(samples_x, samples_y_aggregation, percentage_goodbatch=0.34): + ''' + Create the Gaussian Mixture Model + ''' + samples = [samples_x[i] + [samples_y_aggregation[i]] for i in range(0, len(samples_x))] + + # Sorts so that we can get the top samples + samples = sorted(samples, key=itemgetter(-1)) + samples_goodbatch_size = int(len(samples) * percentage_goodbatch) + samples_goodbatch = samples[0:samples_goodbatch_size] + samples_badbatch = samples[samples_goodbatch_size:] + + samples_x_goodbatch = [sample_goodbatch[0:-1] for sample_goodbatch in samples_goodbatch] + #samples_y_goodbatch = [sample_goodbatch[-1] for sample_goodbatch in samples_goodbatch] + samples_x_badbatch = [sample_badbatch[0:-1] for sample_badbatch in samples_badbatch] + + # === Trains GMM clustering models === # + #sys.stderr.write("[%s] Train GMM's GMM model\n" % (os.path.basename(__file__))) + bgmm_goodbatch = mm.BayesianGaussianMixture(n_components=max(1, samples_goodbatch_size - 1)) + bad_n_components = max(1, len(samples_x) - samples_goodbatch_size - 1) + bgmm_badbatch = mm.BayesianGaussianMixture(n_components=bad_n_components) + bgmm_goodbatch.fit(samples_x_goodbatch) + bgmm_badbatch.fit(samples_x_badbatch) + + model = {} + model['clusteringmodel_good'] = bgmm_goodbatch + model['clusteringmodel_bad'] = bgmm_badbatch + return model + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py new file mode 100644 index 0000000000..4507e30886 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import random +import sys + +import nni.metis_tuner.lib_acquisition_function as lib_acquisition_function +import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation +import nni.metis_tuner.lib_data as lib_data + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + + +def _ratio_scores(parameters_value, clusteringmodel_gmm_good, clusteringmodel_gmm_bad): + ''' + The ratio is smaller the better + ''' + ratio = clusteringmodel_gmm_good.score([parameters_value]) / clusteringmodel_gmm_bad.score([parameters_value]) + sigma = 0 + return ratio, sigma + +def selection_r(x_bounds, + x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + num_starting_points=100, + minimize_constraints_fun=None): + ''' + Call selection + ''' + minimize_starting_points = [lib_data.rand(x_bounds, x_types)\ + for i in range(0, num_starting_points)] + outputs = selection(x_bounds, x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + minimize_starting_points, + minimize_constraints_fun) + return outputs + +def selection(x_bounds, + x_types, + clusteringmodel_gmm_good, + clusteringmodel_gmm_bad, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + Select the lowest mu value + ''' + results = lib_acquisition_function.next_hyperparameter_lowest_mu(\ + _ratio_scores, [clusteringmodel_gmm_good, clusteringmodel_gmm_bad],\ + x_bounds, x_types, minimize_starting_points, \ + minimize_constraints_fun=minimize_constraints_fun) + + return results + +def _rand_with_constraints(x_bounds, x_types): + ''' + Random generate the variable with constraints + ''' + outputs = None + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + x_val_withconstraints = lib_constraint_summation.rand(x_bounds_withconstraints, + x_types_withconstraints, + CONSTRAINT_LOWERBOUND, + CONSTRAINT_UPPERBOUND) + if x_val_withconstraints is not None: + outputs = [None] * len(x_bounds) + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + for i, _ in enumerate(outputs): + if outputs[i] is None: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + +def _minimize_constraints_fun_summation(x): + ''' + Minimize constraints fun summation + ''' + summation = sum([x[i] for i in CONSTRAINT_PARAMS_IDX]) + return CONSTRAINT_UPPERBOUND >= summation >= CONSTRAINT_LOWERBOUND diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GMM/__init__.py b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GP/CreateModel.py b/src/sdk/pynni/nni/metis_tuner/Regression_GP/CreateModel.py new file mode 100644 index 0000000000..c1d16475c8 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GP/CreateModel.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import sys +import numpy + +import sklearn.gaussian_process as gp + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def create_model(samples_x, samples_y_aggregation, + n_restarts_optimizer=250, is_white_kernel=False): + ''' + Trains GP regression model + ''' + kernel = gp.kernels.ConstantKernel(constant_value=1, + constant_value_bounds=(1e-12, 1e12)) * \ + gp.kernels.Matern(nu=1.5) + if is_white_kernel is True: + kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12)) + regressor = gp.GaussianProcessRegressor(kernel=kernel, + n_restarts_optimizer=n_restarts_optimizer, + normalize_y=True, + alpha=0) + regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation)) + + model = {} + model['model'] = regressor + model['kernel_prior'] = str(kernel) + model['kernel_posterior'] = str(regressor.kernel_) + model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta) + + return model diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py b/src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py new file mode 100644 index 0000000000..353c56f2b0 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GP/OutlierDetection.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +import argparse, json, os, sys +from multiprocessing.dummy import Pool as ThreadPool + +import nni.metis_tuner.Regression_GP.CreateModel as gp_create_model +import nni.metis_tuner.Regression_GP.Prediction as gp_prediction +import nni.metis_tuner.lib_data as lib_data + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def _outlierDetection_threaded(inputs): + ''' + Detect the outlier + ''' + [samples_idx, samples_x, samples_y_aggregation] = inputs + sys.stderr.write("[%s] DEBUG: Evaluating %dth of %d samples\n"\ + % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) + outlier = None + + # Create a diagnostic regression model which removes the sample that we want to evaluate + diagnostic_regressor_gp = gp_create_model.createModel(\ + samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\ + samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:]) + mu, sigma = gp_prediction.predict(samples_x[samples_idx], diagnostic_regressor_gp['model']) + + # 2.33 is the z-score for 98% confidence level + if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma): + outlier = {"samples_idx": samples_idx, + "expected_mu": mu, + "expected_sigma": sigma, + "difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)} + return outlier + +def outlierDetection_threaded(samples_x, samples_y_aggregation): + ''' + Use Multi-thread to detect the outlier + ''' + outliers = [] + + threads_inputs = [[samples_idx, samples_x, samples_y_aggregation]\ + for samples_idx in range(0, len(samples_x))] + threads_pool = ThreadPool(min(4, len(threads_inputs))) + threads_results = threads_pool.map(_outlierDetection_threaded, threads_inputs) + threads_pool.close() + threads_pool.join() + + for threads_result in threads_results: + if threads_result is not None: + outliers.append(threads_result) + else: + print("error here.") + + outliers = None if len(outliers) == 0 else outliers + return outliers + +def outlierDetection(samples_x, samples_y_aggregation): + ''' + ''' + outliers = [] + for samples_idx in range(0, len(samples_x)): + #sys.stderr.write("[%s] DEBUG: Evaluating %d of %d samples\n" + # \ % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) + diagnostic_regressor_gp = gp_create_model.createModel(\ + samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\ + samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:]) + mu, sigma = gp_prediction.predict(samples_x[samples_idx], + diagnostic_regressor_gp['model']) + # 2.33 is the z-score for 98% confidence level + if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma): + outliers.append({"samples_idx": samples_idx, + "expected_mu": mu, + "expected_sigma": sigma, + "difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)}) + + outliers = None if len(outliers) == 0 else outliers + return outliers + + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py b/src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py new file mode 100644 index 0000000000..82d3d0353f --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import sys + +import numpy + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + + +def predict(parameters_value, regressor_gp): + ''' + Predict by Gaussian Process Model + ''' + parameters_value = numpy.array(parameters_value).reshape(-1, len(parameters_value)) + mu, sigma = regressor_gp.predict(parameters_value, return_std=True) + + return mu[0], sigma[0] + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py b/src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py new file mode 100644 index 0000000000..9c8e384a3d --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import os +import random +import sys + +import nni.metis_tuner.lib_acquisition_function as lib_acquisition_function +import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation +import nni.metis_tuner.lib_data as lib_data +import nni.metis_tuner.Regression_GP.Prediction as gp_prediction + +sys.path.insert(1, os.path.join(sys.path[0], '..')) + +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + + +def selection_r(acquisition_function, + samples_y_aggregation, + x_bounds, + x_types, + regressor_gp, + num_starting_points=100, + minimize_constraints_fun=None): + ''' + Selecte R value + ''' + minimize_starting_points = [lib_data.rand(x_bounds, x_types) \ + for i in range(0, num_starting_points)] + outputs = selection(acquisition_function, samples_y_aggregation, + x_bounds, x_types, regressor_gp, + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + return outputs + +def selection(acquisition_function, + samples_y_aggregation, + x_bounds, x_types, + regressor_gp, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + selection + ''' + outputs = None + + sys.stderr.write("[%s] Exercise \"%s\" acquisition function\n" \ + % (os.path.basename(__file__), acquisition_function)) + + if acquisition_function == "ei": + outputs = lib_acquisition_function.next_hyperparameter_expected_improvement(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types, \ + samples_y_aggregation, minimize_starting_points, \ + minimize_constraints_fun=minimize_constraints_fun) + elif acquisition_function == "lc": + outputs = lib_acquisition_function.next_hyperparameter_lowest_confidence(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types,\ + minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun) + elif acquisition_function == "lm": + outputs = lib_acquisition_function.next_hyperparameter_lowest_mu(\ + gp_prediction.predict, [regressor_gp], x_bounds, x_types,\ + minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun) + return outputs + +def _rand_with_constraints(x_bounds, x_types): + ''' + Random generate with constraints + ''' + outputs = None + + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + x_val_withconstraints = lib_constraint_summation.rand(x_bounds_withconstraints, + x_types_withconstraints, + CONSTRAINT_LOWERBOUND, + CONSTRAINT_UPPERBOUND) + if x_val_withconstraints is not None: + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + + for i, _ in enumerate(outputs): + if outputs[i] is None: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + + +def _minimize_constraints_fun_summation(x): + ''' + Minimize the constraints fun summation + ''' + summation = sum([x[i] for i in CONSTRAINT_PARAMS_IDX]) + return CONSTRAINT_UPPERBOUND >= summation >= CONSTRAINT_LOWERBOUND diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py b/src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py b/src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py new file mode 100644 index 0000000000..1caf8c814a --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py @@ -0,0 +1,202 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import sys +import numpy + +from scipy.stats import norm +from scipy.optimize import minimize + +import nni.metis_tuner.lib_data as lib_data + + +def next_hyperparameter_expected_improvement(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + samples_y_aggregation, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + "Expected Improvement" acquisition function + ''' + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_expected_improvement, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, + fun_prediction_args, + x_bounds, + x_types, + samples_y_aggregation, + minimize_constraints_fun)) + + if (best_acquisition_value is None) or \ + (res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or \ + (minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "ei"} + + return outputs + +def _expected_improvement(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, samples_y_aggregation, + minimize_constraints_fun): + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + expected_improvement = sys.maxsize + if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True): + mu, sigma = fun_prediction(x, *fun_prediction_args) + + loss_optimum = min(samples_y_aggregation) + scaling_factor = -1 + + # In case sigma equals zero + with numpy.errstate(divide="ignore"): + Z = scaling_factor * (mu - loss_optimum) / sigma + expected_improvement = scaling_factor * (mu - loss_optimum) * \ + norm.cdf(Z) + sigma * norm.pdf(Z) + expected_improvement = 0.0 if sigma == 0.0 else expected_improvement + + # We want expected_improvement to be as large as possible + # (i.e., as small as possible for minimize(...)) + expected_improvement = -1 * expected_improvement + return expected_improvement + + +def next_hyperparameter_lowest_confidence(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + "Lowest Confidence" acquisition function + ''' + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_lowest_confidence, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, + fun_prediction_args, + x_bounds, + x_types, + minimize_constraints_fun)) + + if (best_acquisition_value) is None or (res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "lc"} + return outputs + +def _lowest_confidence(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, minimize_constraints_fun): + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + ci = sys.maxsize + if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True): + mu, sigma = fun_prediction(x, *fun_prediction_args) + ci = (sigma * 1.96 * 2) / mu + # We want ci to be as large as possible + # (i.e., as small as possible for minimize(...), + # because this would mean lowest confidence + ci = -1 * ci + + return ci + + +def next_hyperparameter_lowest_mu(fun_prediction, + fun_prediction_args, + x_bounds, x_types, + minimize_starting_points, + minimize_constraints_fun=None): + ''' + "Lowest Mu" acquisition function + ''' + best_x = None + best_acquisition_value = None + x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] + x_bounds_minmax = numpy.array(x_bounds_minmax) + + for starting_point in numpy.array(minimize_starting_points): + res = minimize(fun=_lowest_mu, + x0=starting_point.reshape(1, -1), + bounds=x_bounds_minmax, + method="L-BFGS-B", + args=(fun_prediction, fun_prediction_args, \ + x_bounds, x_types, minimize_constraints_fun)) + + if (best_acquisition_value is None) or (res.fun < best_acquisition_value): + res.x = numpy.ndarray.tolist(res.x) + res.x = lib_data.match_val_type(res.x, x_bounds, x_types) + if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True): + best_acquisition_value = res.fun + best_x = res.x + + outputs = None + if best_x is not None: + mu, sigma = fun_prediction(best_x, *fun_prediction_args) + outputs = {'hyperparameter': best_x, 'expected_mu': mu, + 'expected_sigma': sigma, 'acquisition_func': "lm"} + return outputs + + +def _lowest_mu(x, fun_prediction, fun_prediction_args, + x_bounds, x_types, minimize_constraints_fun): + ''' + Calculate the lowest mu + ''' + # This is only for step-wise optimization + x = lib_data.match_val_type(x, x_bounds, x_types) + + mu = sys.maxsize + if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True): + mu, _ = fun_prediction(x, *fun_prediction_args) + return mu + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py b/src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py new file mode 100644 index 0000000000..1e9daaee95 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py @@ -0,0 +1,116 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import math +import random + +from operator import itemgetter + + +def check_feasibility(x_bounds, lowerbound, upperbound): + ''' + This can have false positives. + For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7. + ''' + # x_bounds should be sorted, so even for "discrete_int" type, + # the smallest and the largest number should the first and the last element + x_bounds_lowerbound = sum([x_bound[0] for x_bound in x_bounds]) + x_bounds_upperbound = sum([x_bound[-1] for x_bound in x_bounds]) + + # return ((x_bounds_lowerbound <= lowerbound) and (x_bounds_upperbound >= lowerbound)) or \ + # ((x_bounds_lowerbound <= upperbound) and (x_bounds_upperbound >= upperbound)) + return (x_bounds_lowerbound <= lowerbound <= x_bounds_upperbound) or \ + (x_bounds_lowerbound <= upperbound <= x_bounds_upperbound) + +def rand(x_bounds, x_types, lowerbound, upperbound, max_retries=100): + ''' + Key idea is that we try to move towards upperbound, by randomly choose one + value for each parameter. However, for the last parameter, + we need to make sure that its value can help us get above lowerbound + ''' + outputs = None + + if check_feasibility(x_bounds, lowerbound, upperbound) is True: + # Order parameters by their range size. We want the smallest range first, + # because the corresponding parameter has less numbers to choose from + x_idx_sorted = [] + for i, _ in enumerate(x_bounds): + if x_types[i] == "discrete_int": + x_idx_sorted.append([i, len(x_bounds[i])]) + elif (x_types[i] == "range_int") or (x_types[i] == "range_continuous"): + x_idx_sorted.append([i, math.floor(x_bounds[i][1] - x_bounds[i][0])]) + x_idx_sorted = sorted(x_idx_sorted, key=itemgetter(1)) + + for _ in range(max_retries): + budget_allocated = 0 + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(x_idx_sorted): + x_idx = x_idx_sorted[i][0] + # The amount of unallocated space that we have + budget_max = upperbound - budget_allocated + # NOT the Last x that we need to assign a random number + if i < (len(x_idx_sorted) - 1): + if x_bounds[x_idx][0] <= budget_max: + if x_types[x_idx] == "discrete_int": + # Note the valid integer + temp = [] + for j in x_bounds[x_idx]: + if j <= budget_max: + temp.append(j) + # Randomly pick a number from the integer array + if temp: + outputs[x_idx] = temp[random.randint(0, len(temp) - 1)] + + elif (x_types[x_idx] == "range_int") or \ + (x_types[x_idx] == "range_continuous"): + outputs[x_idx] = random.randint(x_bounds[x_idx][0], + min(x_bounds[x_idx][-1], budget_max)) + + else: + # The last x that we need to assign a random number + randint_lowerbound = lowerbound - budget_allocated + randint_lowerbound = 0 if randint_lowerbound < 0 else randint_lowerbound + + # This check: + # is our smallest possible value going to overflow the available budget space, + # and is our largest possible value going to underflow the lower bound + if (x_bounds[x_idx][0] <= budget_max) and \ + (x_bounds[x_idx][-1] >= randint_lowerbound): + if x_types[x_idx] == "discrete_int": + temp = [] + for j in x_bounds[x_idx]: + # if (j <= budget_max) and (j >= randint_lowerbound): + if randint_lowerbound <= j <= budget_max: + temp.append(j) + if temp: + outputs[x_idx] = temp[random.randint(0, len(temp) - 1)] + elif (x_types[x_idx] == "range_int") or \ + (x_types[x_idx] == "range_continuous"): + outputs[x_idx] = random.randint(randint_lowerbound, + min(x_bounds[x_idx][1], budget_max)) + if outputs[x_idx] is None: + break + else: + budget_allocated += outputs[x_idx] + if None not in outputs: + break + return outputs + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/lib_data.py b/src/sdk/pynni/nni/metis_tuner/lib_data.py new file mode 100644 index 0000000000..d24aeed678 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/lib_data.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import math +import random + + +def match_val_type(vals, vals_bounds, vals_types): + ''' + Update values in the array, to match their corresponding type + ''' + vals_new = [] + + for i, _ in enumerate(vals_types): + if vals_types[i] == "discrete_int": + # Find the closest integer in the array, vals_bounds + vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i]))) + elif vals_types[i] == "range_int": + # Round down to the nearest integer + vals_new.append(math.floor(vals[i])) + elif vals_types[i] == "range_continuous": + # Don't do any processing for continous numbers + vals_new.append(vals[i]) + else: + return None + + return vals_new + + +def rand(x_bounds, x_types): + ''' + Random generate variable value within their bounds + ''' + outputs = [] + + for i, _ in enumerate(x_bounds): + if x_types[i] == "discrete_int": + temp = x_bounds[i][random.randint(0, len(x_bounds[i]) - 1)] + outputs.append(temp) + elif x_types[i] == "range_int": + temp = random.randint(x_bounds[i][0], x_bounds[i][1]) + outputs.append(temp) + elif x_types[i] == "range_continuous": + temp = random.uniform(x_bounds[i][0], x_bounds[i][1]) + outputs.append(temp) + else: + return None + + return outputs + \ No newline at end of file diff --git a/src/sdk/pynni/nni/metis_tuner/metis_tuner.py b/src/sdk/pynni/nni/metis_tuner/metis_tuner.py new file mode 100644 index 0000000000..a6a0608c25 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/metis_tuner.py @@ -0,0 +1,440 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import copy +import logging +import os +import random +import statistics +import sys + +from enum import Enum, unique +from multiprocessing.dummy import Pool as ThreadPool + +from nni.tuner import Tuner + +import nni.metis_tuner.lib_data as lib_data +import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation +import nni.metis_tuner.Regression_GP.CreateModel as gp_create_model +import nni.metis_tuner.Regression_GP.Selection as gp_selection +import nni.metis_tuner.Regression_GP.Prediction as gp_prediction +import nni.metis_tuner.Regression_GP.OutlierDetection as gp_outlier_detection +import nni.metis_tuner.Regression_GMM.CreateModel as gmm_create_model +import nni.metis_tuner.Regression_GMM.Selection as gmm_selection + +logger = logging.getLogger("Metis_Tuner_AutoML") + +@unique +class OptimizeMode(Enum): + ''' + Optimize Mode class + ''' + Minimize = 'minimize' + Maximize = 'maximize' + + +NONE_TYPE = '' +CONSTRAINT_LOWERBOUND = None +CONSTRAINT_UPPERBOUND = None +CONSTRAINT_PARAMS_IDX = [] + + +class MetisTuner(Tuner): + ''' + Metis Tuner + ''' + + def __init__(self, optimize_mode="maximize", no_resampling=True, no_candidates=True, + selection_num_starting_points=10, cold_start_num=10): + ''' + optimize_mode: is a string that including two mode "maximize" and "minimize" + + no_resampling: True or False. Should Metis consider re-sampling as part of the search strategy? + If you are confident that the training dataset is noise-free, then you do not need re-sampling. + + no_candidates: True or False. Should Metis suggest parameters for the next benchmark? + If you do not plan to do more benchmarks, Metis can skip this step. + + selection_num_starting_points: how many times Metis should try to find the global optimal in the search space? + The higher the number, the longer it takes to output the solution. + + cold_start_num: Metis need some trial result to get cold start. when the number of trial result is less than + cold_start_num, Metis will randomly sample hyper-parameter for trial. + ''' + self.samples_x = [] + self.samples_y = [] + self.samples_y_aggregation = [] + self.space = None + self.no_resampling = no_resampling + self.no_candidates = no_candidates + self.optimize_mode = optimize_mode + self.key_order = [] + self.cold_start_num = cold_start_num + self.selection_num_starting_points = selection_num_starting_points + self.minimize_constraints_fun = None + self.minimize_starting_points = None + + + def update_search_space(self, search_space): + ''' + Update the self.x_bounds and self.x_types by the search_space.json + ''' + self.x_bounds = [[] for i in range(len(search_space))] + self.x_types = [NONE_TYPE for i in range(len(search_space))] + + for key in search_space: + self.key_order.append(key) + + key_type = {} + if isinstance(search_space, dict): + for key in search_space: + key_type = search_space[key]['_type'] + key_range = search_space[key]['_value'] + try: + idx = self.key_order.index(key) + except Exception as ex: + logger.exception(ex) + raise RuntimeError("The format search space contains \ + some key that didn't define in key_order.") + + if key_type == 'quniform': + if key_range[2] == 1: + self.x_bounds[idx] = [key_range[0], key_range[1]] + self.x_types[idx] = 'range_int' + else: + bounds = [] + for value in range(key_range[0], key_range[1], key_range[2]): + bounds.append(value) + self.x_bounds[idx] = bounds + self.x_types[idx] = 'discrete_int' + elif key_type == 'randint': + self.x_bounds[idx] = [0, key_range[0]] + self.x_types[idx] = 'range_int' + elif key_type == 'uniform': + self.x_bounds[idx] = [key_range[0], key_range[1]] + self.x_types[idx] = 'range_continuous' + elif key_type == 'choice': + self.x_bounds[idx] = key_range + self.x_types[idx] = 'discrete_int' + else: + logger.info("Metis Tuner doesn't support this kind of variable.") + raise RuntimeError("Metis Tuner doesn't support this kind of variable.") + else: + logger.info("The format of search space is not a dict.") + raise RuntimeError("The format of search space is not a dict.") + + self.minimize_starting_points = _rand_init(self.x_bounds, self.x_types, \ + self.selection_num_starting_points) + + + def _pack_output(self, init_parameter): + ''' + Pack the output + ''' + output = {} + for i, param in enumerate(init_parameter): + output[self.key_order[i]] = param + return output + + + def generate_parameters(self, parameter_id): + ''' + This function is for generate parameters to trial. + If the number of trial result is lower than cold start number, + metis will first random generate some parameters. + Otherwise, metis will choose the parameters by the Gussian Process Model and the Gussian Mixture Model. + ''' + if self.samples_x or len(self.samples_x) < self.cold_start_num: + init_parameter = _rand_init(self.x_bounds, self.x_types, 1)[0] + results = self._pack_output(init_parameter) + else: + results = self._selection(self.samples_x, self.samples_y_aggregation, self.samples_y, + self.x_bounds, self.x_types, + threshold_samplessize_resampling=(None if self.no_resampling is True else 50), + no_candidates=self.no_candidates, + minimize_starting_points=self.minimize_starting_points, + minimize_constraints_fun=self.minimize_constraints_fun) + + logger.info("Generate paramageters:\n", str(results)) + return results + + + def receive_trial_result(self, parameter_id, parameters, value): + ''' + Tuner receive result from trial. + An value example as follow: + value: 99.5% + ''' + value = self.extract_scalar_reward(value) + if self.optimize_mode == OptimizeMode.Maximize: + value = -value + + logger.info("Received trial result.") + logger.info("value is :", str(value)) + logger.info("parameter is : ", str(parameters)) + + # parse parameter to sample_x + sample_x = [0 for i in range(len(self.key_order))] + for key in parameters: + idx = self.key_order.index(key) + sample_x[idx] = parameters[key] + + # parse value to sample_y + temp_y = [] + if sample_x in self.samples_x: + idx = self.samples_x.index(sample_x) + temp_y = self.samples_y[idx] + temp_y.append(value) + self.samples_y[idx] = temp_y + + # calculate y aggregation + median = get_median(temp_y) + self.samples_y_aggregation[idx] = median + else: + self.samples_x.append(sample_x) + self.samples_y.append([value]) + + # calculate y aggregation + self.samples_y_aggregation.append([value]) + + + def _selection(self, samples_x, samples_y_aggregation, samples_y, + x_bounds, x_types, max_resampling_per_x=3, + threshold_samplessize_exploitation=12, + threshold_samplessize_resampling=50, no_candidates=False, + minimize_starting_points=None, minimize_constraints_fun=None): + + next_candidate = None + candidates = [] + samples_size_all = sum([len(i) for i in samples_y]) + samples_size_unique = len(samples_y) + + # ===== STEP 1: Compute the current optimum ===== + #sys.stderr.write("[%s] Predicting the optimal configuration from the current training dataset...\n" % (os.path.basename(__file__))) + gp_model = gp_create_model.create_model(samples_x, samples_y_aggregation) + lm_current = gp_selection.selection("lm", samples_y_aggregation, x_bounds, + x_types, gp_model['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + if not lm_current: + return None + + if no_candidates is False: + candidates.append({'hyperparameter': lm_current['hyperparameter'], + 'expected_mu': lm_current['expected_mu'], + 'expected_sigma': lm_current['expected_sigma'], + 'reason': "exploitation_gp"}) + + # ===== STEP 2: Get recommended configurations for exploration ===== + #sys.stderr.write("[%s] Getting candidates for exploration...\n" + #% \(os.path.basename(__file__))) + results_exploration = gp_selection.selection("lc", samples_y_aggregation, + x_bounds, x_types, gp_model['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if results_exploration is not None: + if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0: + candidates.append({'hyperparameter': results_exploration['hyperparameter'], + 'expected_mu': results_exploration['expected_mu'], + 'expected_sigma': results_exploration['expected_sigma'], + 'reason': "exploration"}) + logger.info("DEBUG: 1 exploration candidate selected\n") + #sys.stderr.write("[%s] DEBUG: 1 exploration candidate selected\n" % (os.path.basename(__file__))) + else: + logger.info("DEBUG: No suitable exploration candidates were") + # sys.stderr.write("[%s] DEBUG: No suitable exploration candidates were \ + # found\n" % (os.path.basename(__file__))) + + # ===== STEP 3: Get recommended configurations for exploitation ===== + if samples_size_all >= threshold_samplessize_exploitation: + #sys.stderr.write("[%s] Getting candidates for exploitation...\n" % (os.path.basename(__file__))) + print("Getting candidates for exploitation...\n") + try: + gmm = gmm_create_model.create_model(samples_x, samples_y_aggregation) + results_exploitation = gmm_selection.selection(x_bounds, + x_types, + gmm['clusteringmodel_good'], + gmm['clusteringmodel_bad'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if results_exploitation is not None: + if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0: + candidates.append({'hyperparameter': results_exploitation['hyperparameter'],\ + 'expected_mu': results_exploitation['expected_mu'],\ + 'expected_sigma': results_exploitation['expected_sigma'],\ + 'reason': "exploitation_gmm"}) + logger.info("DEBUG: 1 exploitation_gmm candidate selected\n") + else: + logger.info("DEBUG: No suitable exploitation_gmm candidates were found\n") + + except ValueError as exception: + # The exception: ValueError: Fitting the mixture model failed + # because some components have ill-defined empirical covariance + # (for instance caused by singleton or collapsed samples). + # Try to decrease the number of components, or increase reg_covar. + logger.info("DEBUG: No suitable exploitation_gmm candidates were found due to exception.") + logger.info(exception) + + # ===== STEP 4: Get a list of outliers ===== + if (threshold_samplessize_resampling is not None) and \ + (samples_size_unique >= threshold_samplessize_resampling): + logger.info("Getting candidates for re-sampling...\n") + results_outliers = gp_outlier_detection.outlierDetection_threaded(samples_x, samples_y_aggregation) + + if results_outliers is not None: + temp = len(candidates) + + for results_outlier in results_outliers: + if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x: + candidates.append({'hyperparameter': samples_x[results_outlier['samples_idx']],\ + 'expected_mu': results_outlier['expected_mu'],\ + 'expected_sigma': results_outlier['expected_sigma'],\ + 'reason': "resampling"}) + logger.info("DEBUG: %d re-sampling candidates selected\n") + else: + logger.info("DEBUG: No suitable resampling candidates were found\n") + + if candidates: + # ===== STEP 5: Compute the information gain of each candidate towards the optimum ===== + logger.info("Evaluating information gain of %d candidates...\n") + next_improvement = 0 + + threads_inputs = [[candidate, samples_x, samples_y, x_bounds, x_types, minimize_constraints_fun, minimize_starting_points] for candidate in candidates] + threads_pool = ThreadPool(4) + # Evaluate what would happen if we actually sample each candidate + threads_results = threads_pool.map(_calculate_lowest_mu_threaded, threads_inputs) + threads_pool.close() + threads_pool.join() + + for threads_result in threads_results: + if threads_result['expected_lowest_mu'] < lm_current['expected_mu']: + # Information gain + temp_improvement = threads_result['expected_lowest_mu'] - lm_current['expected_mu'] + + if next_improvement > temp_improvement: + logger.infor("DEBUG: \"next_candidate\" changed: \ + lowest mu might reduce from %f (%s) to %f (%s), %s\n" %\ + lm_current['expected_mu'], str(lm_current['hyperparameter']),\ + threads_result['expected_lowest_mu'],\ + str(threads_result['candidate']['hyperparameter']),\ + threads_result['candidate']['reason']) + + next_improvement = temp_improvement + next_candidate = threads_result['candidate'] + else: + # ===== STEP 6: If we have no candidates, randomly pick one ===== + logger.info("DEBUG: No candidates from exploration, exploitation,\ + and resampling. We will random a candidate for next_candidate\n") + + next_candidate = _rand_with_constraints(x_bounds, x_types) \ + if minimize_starting_points is None else minimize_starting_points[0] + next_candidate = lib_data.match_val_type(next_candidate, x_bounds, x_types) + expected_mu, expected_sigma = gp_prediction.predict(next_candidate, gp_model['model']) + next_candidate = {'hyperparameter': next_candidate, 'reason': "random", + 'expected_mu': expected_mu, 'expected_sigma': expected_sigma} + + outputs = self._pack_output(lm_current['hyperparameter']) + return outputs + + +def _rand_with_constraints(x_bounds, x_types): + outputs = None + x_bounds_withconstraints = [x_bounds[i] for i in CONSTRAINT_PARAMS_IDX] + x_types_withconstraints = [x_types[i] for i in CONSTRAINT_PARAMS_IDX] + + x_val_withconstraints = lib_constraint_summation.rand(x_bounds_withconstraints,\ + x_types_withconstraints, CONSTRAINT_LOWERBOUND, CONSTRAINT_UPPERBOUND) + if not x_val_withconstraints: + outputs = [None] * len(x_bounds) + + for i, _ in enumerate(CONSTRAINT_PARAMS_IDX): + outputs[CONSTRAINT_PARAMS_IDX[i]] = x_val_withconstraints[i] + + for i, output in enumerate(outputs): + if not output: + outputs[i] = random.randint(x_bounds[i][0], x_bounds[i][1]) + return outputs + + +def _calculate_lowest_mu_threaded(inputs): + [candidate, samples_x, samples_y, x_bounds, x_types, minimize_constraints_fun, minimize_starting_points] = inputs + + sys.stderr.write("[%s] Evaluating information gain of %s (%s)...\n" % \ + (os.path.basename(__file__), candidate['hyperparameter'], candidate['reason'])) + outputs = {"candidate": candidate, "expected_lowest_mu": None} + + for expected_mu in [candidate['expected_mu'] + 1.96 * candidate['expected_sigma'], + candidate['expected_mu'] - 1.96 * candidate['expected_sigma']]: + temp_samples_x = copy.deepcopy(samples_x) + temp_samples_y = copy.deepcopy(samples_y) + + try: + idx = temp_samples_x.index(candidate['hyperparameter']) + # This handles the case of re-sampling a potential outlier + temp_samples_y[idx].append(expected_mu) + except ValueError: + temp_samples_x.append(candidate['hyperparameter']) + temp_samples_y.append([expected_mu]) + + # Aggregates multiple observation of the sample sampling points + temp_y_aggregation = [statistics.median(temp_sample_y) for temp_sample_y in temp_samples_y] + temp_gp = gp_create_model.create_model(temp_samples_x, temp_y_aggregation) + temp_results = gp_selection.selection("lm", temp_y_aggregation, + x_bounds, x_types, temp_gp['model'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + + if outputs["expected_lowest_mu"] is None or outputs["expected_lowest_mu"] > temp_results['expected_mu']: + outputs["expected_lowest_mu"] = temp_results['expected_mu'] + + return outputs + + +def _num_past_samples(x, samples_x, samples_y): + try: + idx = samples_x.index(x) + return len(samples_y[idx]) + except ValueError: + logger.info("x not in sample_x") + return 0 + + +def _rand_init(x_bounds, x_types, selection_num_starting_points): + ''' + Random sample some init seed within bounds. + ''' + return [lib_data.rand(x_bounds, x_types) for i \ + in range(0, selection_num_starting_points)] + + +def get_median(temp_list): + ''' + Return median + ''' + num = len(temp_list) + temp_list.sort() + print(temp_list) + if num % 2 == 0: + median = (temp_list[int(num/2)] + temp_list[int(num/2) - 1]) / 2 + else: + median = temp_list[int(num/2)] + return median diff --git a/src/sdk/pynni/nni/metis_tuner/requirments.txt b/src/sdk/pynni/nni/metis_tuner/requirments.txt new file mode 100644 index 0000000000..044bdd7586 --- /dev/null +++ b/src/sdk/pynni/nni/metis_tuner/requirments.txt @@ -0,0 +1 @@ +sklearn \ No newline at end of file diff --git a/src/sdk/pynni/requirements.txt b/src/sdk/pynni/requirements.txt index 89de05d1c4..3adfb06cf5 100644 --- a/src/sdk/pynni/requirements.txt +++ b/src/sdk/pynni/requirements.txt @@ -4,4 +4,7 @@ json_tricks # hyperopt tuner numpy scipy -hyperopt \ No newline at end of file +hyperopt + +# metis tuner +sklearn diff --git a/tools/nni_cmd/config_schema.py b/tools/nni_cmd/config_schema.py index 0cc3824865..14c4bd3635 100644 --- a/tools/nni_cmd/config_schema.py +++ b/tools/nni_cmd/config_schema.py @@ -68,6 +68,16 @@ Optional('n_output_node'): int, }, Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999), +},{ + 'builtinTunerName': 'MetisTuner', + 'classArgs': { + Optional('optimize_mode'): Or('maximize', 'minimize'), + Optional('no_resampling'): bool, + Optional('no_candidates'): bool, + Optional('selection_num_starting_points'): int, + Optional('cold_start_num'): int, + }, + Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999), },{ 'codeDir': os.path.exists, 'classFileName': str, From 6b8d3852010924ee185c1b3f749878fb3aa073c4 Mon Sep 17 00:00:00 2001 From: SparkSnail Date: Tue, 8 Jan 2019 10:05:03 +0800 Subject: [PATCH 15/54] Change WARNING to INFO (#574) change the warning level to info level when expand relative path add nnictl --version log update readme.md --- README.md | 11 ++++++----- docs/NNICTLDOC.md | 14 +++++++++++++- tools/nni_cmd/launcher_utils.py | 4 ++-- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 58b034fc78..1d56023093 100644 --- a/README.md +++ b/README.md @@ -83,11 +83,12 @@ You can use these commands to get more information about the experiment commands description 1. nnictl experiment show show the information of experiments 2. nnictl trial ls list all of trial jobs -3. nnictl log stderr show stderr log content -4. nnictl log stdout show stdout log content -5. nnictl stop stop an experiment -6. nnictl trial kill kill a trial job by id -7. nnictl --help get help information about nnictl +3. nnictl top monitor the status of running experiments +4. nnictl log stderr show stderr log content +5. nnictl log stdout show stdout log content +6. nnictl stop stop an experiment +7. nnictl trial kill kill a trial job by id +8. nnictl --help get help information about nnictl ----------------------------------------------------------------------- ``` diff --git a/docs/NNICTLDOC.md b/docs/NNICTLDOC.md index a2d9e11fb7..112ebe201b 100644 --- a/docs/NNICTLDOC.md +++ b/docs/NNICTLDOC.md @@ -17,6 +17,7 @@ nnictl log nnictl webui nnictl tensorboard nnictl top +nnictl --version ``` ### Manage an experiment * __nnictl create__ @@ -362,4 +363,15 @@ nnictl top | Name, shorthand | Required|Default | Description | | ------ | ------ | ------ |------ | - | id| False| |ID of the experiment you want to set| \ No newline at end of file + | id| False| |ID of the experiment you want to set| + +### Check nni version + +* __nnictl --version__ + * Description + + Describe the current version of nni installed. + + * Usage + + nnictl --version \ No newline at end of file diff --git a/tools/nni_cmd/launcher_utils.py b/tools/nni_cmd/launcher_utils.py index 151c3bfece..33f1cdbeda 100644 --- a/tools/nni_cmd/launcher_utils.py +++ b/tools/nni_cmd/launcher_utils.py @@ -21,7 +21,7 @@ import os import json from .config_schema import LOCAL_CONFIG_SCHEMA, REMOTE_CONFIG_SCHEMA, PAI_CONFIG_SCHEMA, KUBEFLOW_CONFIG_SCHEMA, FRAMEWORKCONTROLLER_CONFIG_SCHEMA -from .common_utils import get_json_content, print_error, print_warning +from .common_utils import get_json_content, print_error, print_warning, print_normal def expand_path(experiment_config, key): '''Change '~' to user home directory''' @@ -32,7 +32,7 @@ def parse_relative_path(root_path, experiment_config, key): '''Change relative path to absolute path''' if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)): absolute_path = os.path.join(root_path, experiment_config.get(key)) - print_warning('expand %s: %s to %s ' % (key, experiment_config[key], absolute_path)) + print_normal('expand %s: %s to %s ' % (key, experiment_config[key], absolute_path)) experiment_config[key] = absolute_path def parse_time(experiment_config): From 0a3cc459d497ed838455c4954472dbdde32939b0 Mon Sep 17 00:00:00 2001 From: Lee Date: Tue, 8 Jan 2019 13:06:08 +0800 Subject: [PATCH 16/54] Fix some bugs in doc and log (#561) * fix some bugs in doc and log * The learning rate focus more on validation sets accuracy than training sets accuracy. --- .../mnist-batch-tune-keras/mnist-keras.py | 2 +- examples/trials/mnist-keras/mnist-keras.py | 2 +- .../FashionMNIST/FashionMNIST_keras.py | 6 ++--- .../FashionMNIST/FashionMNIST_pytorch.py | 2 +- .../network_morphism/cifar10/cifar10_keras.py | 4 ++-- .../trials/network_morphism/requirements.txt | 2 +- examples/trials/network_morphism/utils.py | 22 ++++++++++++++----- 7 files changed, 26 insertions(+), 14 deletions(-) diff --git a/examples/trials/mnist-batch-tune-keras/mnist-keras.py b/examples/trials/mnist-batch-tune-keras/mnist-keras.py index 133a52b25a..9012feb454 100644 --- a/examples/trials/mnist-batch-tune-keras/mnist-keras.py +++ b/examples/trials/mnist-batch-tune-keras/mnist-keras.py @@ -84,7 +84,7 @@ def on_epoch_end(self, epoch, logs={}): Run on end of each epoch ''' LOG.debug(logs) - nni.report_intermediate_result(logs['acc']) + nni.report_intermediate_result(logs["val_acc"]) def train(args, params): ''' diff --git a/examples/trials/mnist-keras/mnist-keras.py b/examples/trials/mnist-keras/mnist-keras.py index 27e26e152b..f26dd8c389 100644 --- a/examples/trials/mnist-keras/mnist-keras.py +++ b/examples/trials/mnist-keras/mnist-keras.py @@ -84,7 +84,7 @@ def on_epoch_end(self, epoch, logs={}): Run on end of each epoch ''' LOG.debug(logs) - nni.report_intermediate_result(logs['acc']) + nni.report_intermediate_result(logs["val_acc"]) def train(args, params): ''' diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py index 01c77c4405..1795b2928a 100644 --- a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py @@ -40,7 +40,7 @@ datefmt="%m/%d %I:%M:%S %p", ) # set the logger format -logger = logging.getLogger("fashion_mnist-network-morphism-keras") +logger = logging.getLogger("FashionMNIST-network-morphism-keras") # restrict gpu usage background @@ -152,7 +152,7 @@ def on_epoch_end(self, epoch, logs=None): if logs is None: logs = dict() logger.debug(logs) - nni.report_intermediate_result(logs["acc"]) + nni.report_intermediate_result(logs["val_acc"]) # Training @@ -184,7 +184,7 @@ def train_eval(): # trial report final acc to tuner _, acc = net.evaluate(x_test, y_test) - logger.debug("Final result is: %d", acc) + logger.debug("Final result is: %.3f", acc) nni.report_final_result(acc) diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py index 37ee2670e7..90a26cc697 100644 --- a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py @@ -42,7 +42,7 @@ ) # pylint: disable=W0603 # set the logger format -logger = logging.getLogger("FashionMNIST-network-morphism") +logger = logging.getLogger("FashionMNIST-network-morphism-pytorch") def get_args(): diff --git a/examples/trials/network_morphism/cifar10/cifar10_keras.py b/examples/trials/network_morphism/cifar10/cifar10_keras.py index 04e10cf8af..453ef4cd3b 100644 --- a/examples/trials/network_morphism/cifar10/cifar10_keras.py +++ b/examples/trials/network_morphism/cifar10/cifar10_keras.py @@ -152,7 +152,7 @@ def on_epoch_end(self, epoch, logs=None): if logs is None: logs = dict() logger.debug(logs) - nni.report_intermediate_result(logs["acc"]) + nni.report_intermediate_result(logs["val_acc"]) # Training @@ -184,7 +184,7 @@ def train_eval(): # trial report final acc to tuner _, acc = net.evaluate(x_test, y_test) - logger.debug("Final result is: %d", acc) + logger.debug("Final result is: %.3f", acc) nni.report_final_result(acc) diff --git a/examples/trials/network_morphism/requirements.txt b/examples/trials/network_morphism/requirements.txt index c7c29b39fd..3576f56502 100644 --- a/examples/trials/network_morphism/requirements.txt +++ b/examples/trials/network_morphism/requirements.txt @@ -2,5 +2,5 @@ numpy==1.14.2 tensorflow==1.12.0 torchvision==0.2.1 Keras==2.2.2 -nni==0.3.0 +nni==0.4.1 torch==0.4.1 diff --git a/examples/trials/network_morphism/utils.py b/examples/trials/network_morphism/utils.py index 5c98793615..24869227db 100644 --- a/examples/trials/network_morphism/utils.py +++ b/examples/trials/network_morphism/utils.py @@ -1,8 +1,20 @@ -"""Some helper functions for PyTorch, including: - - get_mean_and_std: calculate the mean and std value of dataset. - - msr_init: net parameter initialization. - - progress_bar: progress bar mimic xlua.progress. -""" +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + import numpy as np import torch import torch.nn as nn From 95d194781105297ba87e3e643cbc08cc4765a96d Mon Sep 17 00:00:00 2001 From: fishyds Date: Tue, 8 Jan 2019 13:53:20 +0800 Subject: [PATCH 17/54] Fix a race condidtion issue in trial_keeper for reading log from pipe (#578) * Fix a race condidtion issue in trial_keeper for reading log from pipe --- tools/nni_trial_tool/log_utils.py | 17 ++++++++++++----- tools/nni_trial_tool/trial_keeper.py | 2 +- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/tools/nni_trial_tool/log_utils.py b/tools/nni_trial_tool/log_utils.py index e4e63731d7..b1e6e66bf5 100644 --- a/tools/nni_trial_tool/log_utils.py +++ b/tools/nni_trial_tool/log_utils.py @@ -129,13 +129,15 @@ def __init__(self, logger, log_level=logging.INFO): self.pipeReader = os.fdopen(self.fdRead) self.orig_stdout = sys.__stdout__ self._is_read_completed = False + self.process_exit = False def _populateQueue(stream, queue): ''' Collect lines from 'stream' and put them in 'quque'. ''' time.sleep(5) - while True: + while True: + cur_process_exit = self.process_exit try: line = self.queue.get(True, 5) try: @@ -144,9 +146,10 @@ def _populateQueue(stream, queue): self.orig_stdout.flush() except Exception as e: pass - except Exception as e: - self._is_read_completed = True - break + except Exception as e: + if cur_process_exit == True: + self._is_read_completed = True + break self.pip_log_reader_thread = threading.Thread(target = _populateQueue, args = (self.pipeReader, self.queue)) @@ -175,4 +178,8 @@ def close(self): def is_read_completed(self): """Return if read is completed """ - return self._is_read_completed \ No newline at end of file + return self._is_read_completed + + def set_process_exit(self): + self.process_exit = True + return self.process_exit \ No newline at end of file diff --git a/tools/nni_trial_tool/trial_keeper.py b/tools/nni_trial_tool/trial_keeper.py index 079d7b58f3..1675f8bf49 100644 --- a/tools/nni_trial_tool/trial_keeper.py +++ b/tools/nni_trial_tool/trial_keeper.py @@ -65,7 +65,7 @@ def main_loop(args): while True: retCode = process.poll() # child worker process exits and all stdout data is read - if retCode is not None and log_pipe_stdout.is_read_completed == True: + if retCode is not None and log_pipe_stdout.set_process_exit() and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if args.pai_hdfs_output_dir is not None: # Copy local directory to hdfs for OpenPAI From f437107d404de5fda5350da45c751672d98c4bb2 Mon Sep 17 00:00:00 2001 From: Lijiao <35484733+lvybriage@users.noreply.github.com> Date: Tue, 8 Jan 2019 13:55:01 +0800 Subject: [PATCH 18/54] [WebUI] Fix issue#458 about final result as dict (#563) * [WebUI] Fix issue#458 about final result as dict * Fix comments * fix bug --- src/webui/src/components/Overview.tsx | 4 + src/webui/src/components/TrialsDetail.tsx | 58 ++- .../src/components/overview/Progress.tsx | 16 +- .../src/components/trial-detail/TableList.tsx | 461 ++++++++++++------ src/webui/src/static/const.ts | 45 +- src/webui/src/static/function.ts | 20 +- src/webui/src/static/interface.ts | 23 +- src/webui/src/static/style/search.scss | 14 + 8 files changed, 462 insertions(+), 179 deletions(-) diff --git a/src/webui/src/components/Overview.tsx b/src/webui/src/components/Overview.tsx index 24f49714e9..7a0ed8cadb 100644 --- a/src/webui/src/components/Overview.tsx +++ b/src/webui/src/components/Overview.tsx @@ -209,6 +209,10 @@ class Overview extends React.Component<{}, OverviewState> { profile.failTrial += 1; break; + case 'RUNNING': + profile.runTrial += 1; + break; + case 'USER_CANCELED': case 'SYS_CANCELED': profile.stopTrial += 1; diff --git a/src/webui/src/components/TrialsDetail.tsx b/src/webui/src/components/TrialsDetail.tsx index 7b5d8760dc..0b614104df 100644 --- a/src/webui/src/components/TrialsDetail.tsx +++ b/src/webui/src/components/TrialsDetail.tsx @@ -1,10 +1,10 @@ import * as React from 'react'; import axios from 'axios'; import { MANAGER_IP } from '../static/const'; -import { Row, Col, Tabs, Input, Select } from 'antd'; +import { Row, Col, Tabs, Input, Select, Button } from 'antd'; const Option = Select.Option; -import { TableObj, Parameters, DetailAccurPoint, TooltipForAccuracy } from '../static/interface'; -import { getFinalResult } from '../static/function'; +import { TableObjFianl, Parameters, DetailAccurPoint, TooltipForAccuracy } from '../static/interface'; +import { getFinalResult, getFinal } from '../static/function'; import Accuracy from './overview/Accuracy'; import Duration from './trial-detail/Duration'; import Title1 from './overview/Title1'; @@ -16,8 +16,8 @@ import '../static/style/trialsDetail.scss'; interface TrialDetailState { accSource: object; accNodata: string; - tableListSource: Array; - searchResultSource: Array; + tableListSource: Array; + searchResultSource: Array; isHasSearch: boolean; experimentStatus: string; entriesTable: number; @@ -30,6 +30,8 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { public interTableList = 1; public interAllTableList = 2; + public tableList: TableList | null; + constructor(props: {}) { super(props); @@ -40,7 +42,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { searchResultSource: [], experimentStatus: '', entriesTable: 20, - isHasSearch: false + isHasSearch: false, }; } // trial accuracy graph @@ -132,7 +134,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { .then(res => { if (res.status === 200) { const trialJobs = res.data; - const trialTable: Array = []; + const trialTable: Array = []; Object.keys(trialJobs).map(item => { // only succeeded trials have finalMetricData let desc: Parameters = { @@ -167,7 +169,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { if (trialJobs[item].logPath !== undefined) { desc.logPath = trialJobs[item].logPath; } - const acc = getFinalResult(trialJobs[item].finalMetricData); + const acc = getFinal(trialJobs[item].finalMetricData); trialTable.push({ key: trialTable.length, sequenceId: trialJobs[item].sequenceId, @@ -185,7 +187,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { Object.keys(searchResultSource).map(index => { temp.push(searchResultSource[index].id); }); - const searchResultList: Array = []; + const searchResultList: Array = []; for (let i = 0; i < temp.length; i++) { Object.keys(trialTable).map(key => { const item = trialTable[key]; @@ -217,7 +219,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { .then(res => { if (res.status === 200) { const trialJobs = res.data; - const trialTable: Array = []; + const trialTable: Array = []; Object.keys(trialJobs).map(item => { // only succeeded trials have finalMetricData let desc: Parameters = { @@ -252,7 +254,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { if (trialJobs[item].logPath !== undefined) { desc.logPath = trialJobs[item].logPath; } - const acc = getFinalResult(trialJobs[item].finalMetricData); + const acc = getFinal(trialJobs[item].finalMetricData); trialTable.push({ key: trialTable.length, sequenceId: trialJobs[item].sequenceId, @@ -308,7 +310,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } else { window.clearInterval(this.interAllTableList); const { tableListSource } = this.state; - const searchResultList: Array = []; + const searchResultList: Array = []; Object.keys(tableListSource).map(key => { const item = tableListSource[key]; if (item.sequenceId.toString() === targetValue || item.id.includes(targetValue)) { @@ -364,6 +366,10 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } } + test = () => { + alert('TableList component was not properly initialized.'); + } + componentDidMount() { this._isMounted = true; @@ -429,13 +435,26 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { entries - {/* Search: */} - + + + + + + {/* Search: */} + + + { updateList={this.drawTableList} searchResult={searchResultSource} isHasSearch={isHasSearch} + ref={(tabList) => this.tableList = tabList} /> ); diff --git a/src/webui/src/components/overview/Progress.tsx b/src/webui/src/components/overview/Progress.tsx index f81af7931c..a0858403da 100644 --- a/src/webui/src/components/overview/Progress.tsx +++ b/src/webui/src/components/overview/Progress.tsx @@ -242,45 +242,45 @@ class Progressed extends React.Component { maxString={`MaxTrialNumber: ${trialProfile.MaxTrialNum}`} /> -

Best Default Metric

+

best metric

{bestAccuracy}
-

Time Spent

+

spent

{convertTime(trialProfile.execDuration)}
-

Remaining Time

+

remaining

{remaining}
-

MaxDuration

-
{convertTime(trialProfile.maxDuration)}
+

running

+
{trialNumber.runTrial}
-

Succeed Trial

+

succeed

{trialNumber.succTrial}
-

Stopped Trial

+

stopped

{trialNumber.stopTrial}
-

Failed Trial

+

failed

{trialNumber.failTrial}
diff --git a/src/webui/src/components/trial-detail/TableList.tsx b/src/webui/src/components/trial-detail/TableList.tsx index 827259ab81..5334af727b 100644 --- a/src/webui/src/components/trial-detail/TableList.tsx +++ b/src/webui/src/components/trial-detail/TableList.tsx @@ -2,11 +2,13 @@ import * as React from 'react'; import axios from 'axios'; import JSONTree from 'react-json-tree'; import ReactEcharts from 'echarts-for-react'; -import { Row, Table, Button, Popconfirm, Modal, message } from 'antd'; -import { MANAGER_IP, trialJobStatus } from '../../static/const'; +import { Row, Table, Button, Popconfirm, Modal, message, Checkbox } from 'antd'; +const CheckboxGroup = Checkbox.Group; +import { MANAGER_IP, trialJobStatus, COLUMN, COLUMN_INDEX } from '../../static/const'; import { convertDuration } from '../../static/function'; -import { TableObj, TrialJob } from '../../static/interface'; +import { TableObjFianl, TrialJob } from '../../static/interface'; import LogPath from '../logPath/LogPath'; +import '../../static/style/search.scss'; require('../../static/style/tableStatus.css'); require('../../static/style/logPath.scss'); require('../../static/style/search.scss'); @@ -22,8 +24,8 @@ echarts.registerTheme('my_theme', { interface TableListProps { entries: number; - tableSource: Array; - searchResult: Array; + tableSource: Array; + searchResult: Array; updateList: Function; isHasSearch: boolean; } @@ -31,6 +33,14 @@ interface TableListProps { interface TableListState { intermediateOption: object; modalVisible: boolean; + isObjFinal: boolean; + isShowColumn: boolean; + columnSelected: Array; // user select columnKeys +} + +interface ColumnIndex { + name: string; + index: number; } class TableList extends React.Component { @@ -41,7 +51,10 @@ class TableList extends React.Component { this.state = { intermediateOption: {}, - modalVisible: false + modalVisible: false, + isObjFinal: false, + isShowColumn: false, + columnSelected: COLUMN, }; } @@ -79,6 +92,14 @@ class TableList extends React.Component { } } + hideShowColumnModal = () => { + if (this._isMounted) { + this.setState({ + isShowColumn: false + }); + } + } + intermediateGraphOption = (intermediateArr: number[], id: string) => { const sequence: number[] = []; const lengthInter = intermediateArr.length; @@ -143,6 +164,67 @@ class TableList extends React.Component { }); } + // click add column btn, just show the modal of addcolumn + addColumn = () => { + // show user select check button + if (this._isMounted) { + this.setState({ + isShowColumn: true + }); + } + } + + // checkbox for coloumn + selectedColumn = (checkedValues: Array) => { + let count = 6; + const want: Array = []; + const finalKeys: Array = []; + const wantResult: Array = []; + Object.keys(checkedValues).map(m => { + switch (checkedValues[m]) { + case 'Trial No': + case 'id': + case 'duration': + case 'status': + case 'Operation': + case 'Default': + case 'Intermediate Result': + break; + default: + finalKeys.push(checkedValues[m]); + } + }); + + Object.keys(finalKeys).map(n => { + want.push({ + name: finalKeys[n], + index: count++ + }); + }); + + Object.keys(checkedValues).map(item => { + const temp = checkedValues[item]; + Object.keys(COLUMN_INDEX).map(key => { + const index = COLUMN_INDEX[key]; + if (index.name === temp) { + want.push(index); + } + }); + }); + + want.sort((a: ColumnIndex, b: ColumnIndex) => { + return a.index - b.index; + }); + + Object.keys(want).map(i => { + wantResult.push(want[i].name); + }); + + if (this._isMounted) { + this.setState(() => ({ columnSelected: wantResult })); + } + } + componentDidMount() { this._isMounted = true; } @@ -154,7 +236,27 @@ class TableList extends React.Component { render() { const { entries, tableSource, searchResult, isHasSearch } = this.props; - const { intermediateOption, modalVisible } = this.state; + const { intermediateOption, modalVisible, isShowColumn, columnSelected, + } = this.state; + + let showTitle = COLUMN; + if (tableSource.length >= 1) { + const temp = tableSource[0].acc; + if (temp !== undefined && typeof temp === 'object') { + if (this._isMounted) { + // concat default column and finalkeys + const item = Object.keys(temp); + const want: Array = []; + Object.keys(item).map(key => { + if (item[key] !== 'default') { + want.push(item[key]); + } + }); + showTitle = COLUMN.concat(want); + } + } + } + let bgColor = ''; const trialJob: Array = []; trialJobStatus.map(item => { @@ -163,146 +265,202 @@ class TableList extends React.Component { value: item }); }); - - const columns = [{ - title: 'Trial No.', - dataIndex: 'sequenceId', - key: 'sequenceId', - width: 120, - className: 'tableHead', - sorter: (a: TableObj, b: TableObj) => (a.sequenceId as number) - (b.sequenceId as number) - }, { - title: 'Id', - dataIndex: 'id', - key: 'id', - width: 60, - className: 'tableHead idtitle', - // the sort of string - sorter: (a: TableObj, b: TableObj): number => a.id.localeCompare(b.id), - render: (text: string, record: TableObj) => { - return ( -
{record.id}
- ); - } - }, { - title: 'Duration', - dataIndex: 'duration', - key: 'duration', - width: 140, - // the sort of number - sorter: (a: TableObj, b: TableObj) => (a.duration as number) - (b.duration as number), - render: (text: string, record: TableObj) => { - let duration; - if (record.duration !== undefined && record.duration > 0) { - duration = convertDuration(record.duration); - } else { - duration = 0; - } - return ( -
{duration}
- ); - }, - }, { - title: 'Status', - dataIndex: 'status', - key: 'status', - width: 150, - className: 'tableStatus', - render: (text: string, record: TableObj) => { - bgColor = record.status; - return ( - {record.status} - ); - }, - filters: trialJob, - onFilter: (value: string, record: TableObj) => record.status.indexOf(value) === 0, - sorter: (a: TableObj, b: TableObj): number => a.status.localeCompare(b.status) - }, { - title: 'Default Metric', - dataIndex: 'acc', - key: 'acc', - width: 200, - sorter: (a: TableObj, b: TableObj) => (a.acc as number) - (b.acc as number), - render: (text: string, record: TableObj) => { - const accuracy = record.acc; - let wei = 0; - if (accuracy) { - if (accuracy.toString().indexOf('.') !== -1) { - wei = accuracy.toString().length - accuracy.toString().indexOf('.') - 1; - } - } - return ( -
- { - record.acc - ? - wei > 6 + const showColumn: Array = []; + Object.keys(columnSelected).map(key => { + const item = columnSelected[key]; + switch (item) { + case 'Trial No': + showColumn.push({ + title: 'Trial No.', + dataIndex: 'sequenceId', + key: 'sequenceId', + width: 120, + className: 'tableHead', + sorter: + (a: TableObjFianl, b: TableObjFianl) => + (a.sequenceId as number) - (b.sequenceId as number) + }); + break; + case 'id': + showColumn.push({ + title: 'Id', + dataIndex: 'id', + key: 'id', + width: 60, + className: 'tableHead idtitle', + // the sort of string + sorter: (a: TableObjFianl, b: TableObjFianl): number => a.id.localeCompare(b.id), + render: (text: string, record: TableObjFianl) => { + return ( +
{record.id}
+ ); + } + }); + break; + case 'duration': + showColumn.push({ + title: 'Duration', + dataIndex: 'duration', + key: 'duration', + width: 140, + // the sort of number + sorter: (a: TableObjFianl, b: TableObjFianl) => (a.duration as number) - (b.duration as number), + render: (text: string, record: TableObjFianl) => { + let duration; + if (record.duration !== undefined && record.duration > 0) { + duration = convertDuration(record.duration); + } else { + duration = 0; + } + return ( +
{duration}
+ ); + }, + }); + break; + case 'status': + showColumn.push({ + title: 'Status', + dataIndex: 'status', + key: 'status', + width: 150, + className: 'tableStatus', + render: (text: string, record: TableObjFianl) => { + bgColor = record.status; + return ( + {record.status} + ); + }, + filters: trialJob, + onFilter: (value: string, record: TableObjFianl) => record.status.indexOf(value) === 0, + sorter: (a: TableObjFianl, b: TableObjFianl): number => a.status.localeCompare(b.status) + }); + break; + case 'Default': + showColumn.push({ + title: 'Default Metric', + dataIndex: 'acc', + key: 'acc', + width: 200, + sorter: (a: TableObjFianl, b: TableObjFianl) => { + if (a.acc !== undefined && b.acc !== undefined) { + return JSON.parse(a.acc.default) - JSON.parse(b.acc.default); + } else { + return NaN; + } + }, + render: (text: string, record: TableObjFianl) => { + let accuracy; + if (record.acc !== undefined) { + accuracy = record.acc.default; + } + let wei = 0; + if (accuracy) { + if (accuracy.toString().indexOf('.') !== -1) { + wei = accuracy.toString().length - accuracy.toString().indexOf('.') - 1; + } + } + return ( +
+ { + record.acc && record.acc.default + ? + wei > 6 + ? + JSON.parse(record.acc.default).toFixed(6) + : + record.acc.default + : + '--' + } +
+ ); + } + }); + break; + case 'Operation': + showColumn.push({ + title: 'Operation', + dataIndex: 'operation', + key: 'operation', + width: 90, + render: (text: string, record: TableObjFianl) => { + let trialStatus = record.status; + let flagKill = false; + if (trialStatus === 'RUNNING') { + flagKill = true; + } else { + flagKill = false; + } + return ( + flagKill ? - record.acc.toFixed(6) + ( + + + + ) : - record.acc - : - '--' + ( + + ) + ); + }, + }); + break; + + case 'Intermediate Result': + showColumn.push({ + title: 'Intermediate Result', + dataIndex: 'intermediate', + key: 'intermediate', + width: '16%', + render: (text: string, record: TableObjFianl) => { + return ( + + ); + }, + }); + break; + default: + showColumn.push({ + title: item, + dataIndex: item, + key: item, + width: 150, + render: (text: string, record: TableObjFianl) => { + return ( +
+ { + record.acc + ? + record.acc[item] + : + '--' + } +
+ ); } - - ); + }); } - }, { - title: 'Operation', - dataIndex: 'operation', - key: 'operation', - width: 90, - render: (text: string, record: TableObj) => { - let trialStatus = record.status; - let flagKill = false; - if (trialStatus === 'RUNNING') { - flagKill = true; - } else { - flagKill = false; - } - return ( - flagKill - ? - ( - - - - ) - : - ( - - ) - ); - }, - }, { - title: 'Intermediate Result', - dataIndex: 'intermediate', - key: 'intermediate', - width: '16%', - render: (text: string, record: TableObj) => { - return ( - - ); - }, - } - ]; + }); - const openRow = (record: TableObj) => { + const openRow = (record: TableObjFianl) => { let isHasParameters = true; if (record.description.parameters.error) { isHasParameters = false; @@ -341,12 +499,13 @@ class TableList extends React.Component {
+ {/* Intermediate Result Modal */} { /> + {/* Add Column Modal */} + + + ); } diff --git a/src/webui/src/static/const.ts b/src/webui/src/static/const.ts index 0606685960..294679e7d7 100644 --- a/src/webui/src/static/const.ts +++ b/src/webui/src/static/const.ts @@ -1,6 +1,6 @@ -export const MANAGER_IP = `/api/v1/nni`; -export const DOWNLOAD_IP = `/logs`; -export const trialJobStatus = [ +const MANAGER_IP = `/api/v1/nni`; +const DOWNLOAD_IP = `/logs`; +const trialJobStatus = [ 'UNKNOWN', 'WAITING', 'RUNNING', @@ -10,12 +10,47 @@ export const trialJobStatus = [ 'SYS_CANCELED', 'EARLY_STOPPED' ]; -export const CONTROLTYPE = [ +const CONTROLTYPE = [ 'SEARCH_SPACE', 'TRIAL_CONCURRENCY', 'MAX_EXEC_DURATION' ]; -export const MONACO = { +const MONACO = { readOnly: true, automaticLayout: true }; +const COLUMN_INDEX = [ + { + name: 'Trial No', + index: 1 + }, + { + name: 'id', + index: 2 + }, + { + name: 'duration', + index: 3 + }, + { + name: 'status', + index: 4 + }, + { + name: 'Default', + index: 5 + }, + { + name: 'Operation', + index: 10000 + }, + { + name: 'Intermediate Result', + index: 10001 + } +]; +const COLUMN = ['Trial No', 'id', 'duration', 'status', 'Default', 'Operation', 'Intermediate Result']; +export { + MANAGER_IP, DOWNLOAD_IP, trialJobStatus, + CONTROLTYPE, MONACO, COLUMN, COLUMN_INDEX +}; diff --git a/src/webui/src/static/function.ts b/src/webui/src/static/function.ts index 197d96fb12..fc872e8bdf 100644 --- a/src/webui/src/static/function.ts +++ b/src/webui/src/static/function.ts @@ -1,4 +1,4 @@ -import { FinalResult } from './interface'; +import { FinalResult, FinalType } from './interface'; const convertTime = (num: number) => { if (num % 3600 === 0) { @@ -28,6 +28,7 @@ const convertDuration = (num: number) => { }; // get final result value +// draw Accuracy point graph const getFinalResult = (final: FinalResult) => { let acc; let showDefault = 0; @@ -46,6 +47,21 @@ const getFinalResult = (final: FinalResult) => { } }; +// get final result value // acc obj +const getFinal = (final: FinalResult) => { + let showDefault: FinalType; + if (final) { + showDefault = JSON.parse(final[0].data); + if (typeof showDefault === 'number') { + showDefault = { default: showDefault }; + } + return showDefault; + } else { + return undefined; + } +}; + export { - convertTime, convertDuration, getFinalResult + convertTime, convertDuration, getFinalResult, + getFinal }; diff --git a/src/webui/src/static/interface.ts b/src/webui/src/static/interface.ts index 573b588dbf..1d13263f86 100644 --- a/src/webui/src/static/interface.ts +++ b/src/webui/src/static/interface.ts @@ -1,16 +1,34 @@ +// draw accuracy graph data interface interface TableObj { key: number; sequenceId: number; id: string; duration: number; status: string; - acc?: number; + acc?: number; // draw accuracy graph description: Parameters; color?: string; } + +interface TableObjFianl { + key: number; + sequenceId: number; + id: string; + duration: number; + status: string; + acc?: FinalType; + description: Parameters; + color?: string; +} + +interface FinalType { + default: string; +} + interface ErrorParameter { error?: string; } + interface Parameters { parameters: ErrorParameter; logPath?: string; @@ -93,5 +111,6 @@ export { TableObj, Parameters, Experiment, AccurPoint, TrialNumber, TrialJob, DetailAccurPoint, TooltipForAccuracy, - ParaObj, VisualMapValue, Dimobj, FinalResult + ParaObj, VisualMapValue, Dimobj, FinalResult, + TableObjFianl, FinalType }; diff --git a/src/webui/src/static/style/search.scss b/src/webui/src/static/style/search.scss index 5412fe1b97..d72521bc67 100644 --- a/src/webui/src/static/style/search.scss +++ b/src/webui/src/static/style/search.scss @@ -1,3 +1,4 @@ +/* some buttons about trial-detail table */ .allList{ width: 96%; margin: 0 auto; @@ -17,4 +18,17 @@ } } +.titleColumn{ + .ant-checkbox-group-item{ + display: block; + } +} + +.applyfooter{ + /* apply button style */ + .apply{ + text-align: right; + } +} + From e6eb6eaba2dbc6bcd3dda4f26c7d03c553475c45 Mon Sep 17 00:00:00 2001 From: SparkSnail Date: Tue, 8 Jan 2019 14:28:53 +0800 Subject: [PATCH 19/54] support frameworkcontroller log (#572) support frameworkcontroller log --- .../mnist/config_frameworkcontroller.yml | 41 +++++++++++++++++++ src/sdk/pynni/nni/platform/local.py | 4 +- 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 examples/trials/mnist/config_frameworkcontroller.yml diff --git a/examples/trials/mnist/config_frameworkcontroller.yml b/examples/trials/mnist/config_frameworkcontroller.yml new file mode 100644 index 0000000000..f634630754 --- /dev/null +++ b/examples/trials/mnist/config_frameworkcontroller.yml @@ -0,0 +1,41 @@ +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: frameworkcontroller +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + gpuNum: 0 +trial: + codeDir: . + taskRoles: + - name: worker + taskNum: 1 + command: python3 mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 +frameworkcontrollerConfig: + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} \ No newline at end of file diff --git a/src/sdk/pynni/nni/platform/local.py b/src/sdk/pynni/nni/platform/local.py index afc7a878b0..783b0d02a0 100644 --- a/src/sdk/pynni/nni/platform/local.py +++ b/src/sdk/pynni/nni/platform/local.py @@ -36,7 +36,7 @@ os.makedirs(_outputdir) _nni_platform = os.environ['NNI_PLATFORM'] -if _nni_platform not in ['pai', 'kubeflow']: +if _nni_platform not in ['pai', 'kubeflow', 'frameworkcontroller']: _log_file_path = os.path.join(_outputdir, 'trial.log') init_logger(_log_file_path) @@ -77,7 +77,7 @@ def get_next_parameter(): return params def send_metric(string): - if _nni_platform in ['pai', 'kubeflow']: + if _nni_platform in ['pai', 'kubeflow', 'frameworkcontroller']: data = (string).encode('utf8') assert len(data) < 1000000, 'Metric too long' print('NNISDK_ME%s' % (data)) From 358efb26b6d7576bc8179a37c190e5f916773b5e Mon Sep 17 00:00:00 2001 From: Yan Ni Date: Tue, 8 Jan 2019 15:29:48 +0800 Subject: [PATCH 20/54] Dev weight sharing (#568) (#576) * Dev weight sharing (#568) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * simple weight sharing * update gitignore file * change tuner codedir to relative path * add python cache files to gitignore list * move extract scalar reward logic from dispatcher to tuner * update tuner code corresponding to last commit * update doc for receive_trial_result api change * add numpy to package whitelist of pylint * distinguish param value from return reward for tuner.extract_scalar_reward * update pylintrc * add comments to dispatcher.handle_report_metric_data * update install for mac support * fix root mode bug on Makefile * Quick fix bug: nnictl port value error (#245) * fix port bug * Dev exp stop more (#221) * Exp stop refactor (#161) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * fix setup.py (#115) * Add DAG model configuration format for SQuAD example. * Explain config format for SQuAD QA model. * Add more detailed introduction about the evolution algorithm. * Fix install.sh add add trial log path (#109) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * show trial log path * update document * fix install.sh * set default vallue for maxTrialNum and maxExecDuration * fix nnictl * Dev smac (#116) * support package install (#91) * fix nnictl bug * support package install * update * update package install logic * Fix package install issue (#95) * fix nnictl bug * fix pakcage install * support SMAC as a tuner on nni (#81) * update doc * update doc * update doc * update hyperopt installation * update doc * update doc * update description in setup.py * update setup.py * modify encoding * encoding * add encoding * remove pymc3 * update doc * update builtin tuner spec * support smac in sdk, fix logging issue * support smac tuner * add optimize_mode * update config in nnictl * add __init__.py * update smac * update import path * update setup.py: remove entry_point * update rest server validation * fix bug in nnictl launcher * support classArgs: optimize_mode * quick fix bug * test travis * add dependency * add dependency * add dependency * add dependency * create smac python package * fix trivial points * optimize import of tuners, modify nnictl accordingly * fix bug: incorrect algorithm_name * trivial refactor * for debug * support virtual * update doc of SMAC * update smac requirements * update requirements * change debug mode * update doc * update doc * refactor based on comments * fix comments * modify example config path to relative path and increase maxTrialNum (#94) * modify example config path to relative path and increase maxTrialNum * add document * support conda (#90) (#110) * support install from venv and travis CI * support install from venv and travis CI * support install from venv and travis CI * support conda * support conda * modify example config path to relative path and increase maxTrialNum * undo messy commit * undo messy commit * Support pip install as root (#77) * Typo on #58 (#122) * PAI Training Service implementation (#128) * PAI Training service implementation **1. Implement PAITrainingService **2. Add trial-keeper python module, and modify setup.py to install the module **3. Add PAItrainingService rest server to collect metrics from PAI container. * fix datastore for multiple final result (#129) * Update NNI v0.2 release notes (#132) Update NNI v0.2 release notes * Update setup.py Makefile and documents (#130) * update makefile and setup.py * update makefile and setup.py * update document * update document * Update Makefile no travis * update doc * update doc * fix convert from ss to pcs (#133) * Fix bugs about webui (#131) * Fix webui bugs * Fix tslint * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * Merge branch V0.2 to Master (#143) * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * fix bug (#147) * Refactor nnictl and add config_pai.yml (#144) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * add config_pai.yml * refactor nnictl create logic and add colorful print * fix nnictl stop logic * add annotation for config_pai.yml * add document for start experiment * fix config.yml * fix document * Fix trial keeper wrongly exit issue (#152) * Fix trial keeper bug, use actual exitcode to exit rather than 1 * Fix bug of table sort (#145) * Update doc for PAIMode and v0.2 release notes (#153) * Update v0.2 documentation regards to release note and PAI training service * Update document to describe NNI docker image * fix antd (#159) * refactor experiment stopping logic * support change concurrency * remove trialJobs.ts * trivial changes * fix bugs * fix bug * support updating maxTrialNum * Modify IT scripts for supporting multiple experiments * Update ci (#175) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * modify CI cuz of refracting exp stop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * file saving * fix issues from code merge * remove $(INSTALL_PREFIX)/nni/nni_manager before install * fix indent * fix merge issue * socket close * update port * fix merge error * modify ci logic in nnimanager * fix ci * fix bug * change suspended to done * update ci (#229) * update ci * update ci * update ci (#232) * update ci * update ci * update azure-pipelines * update azure-pipelines * update ci (#233) * update ci * update ci * update azure-pipelines * update azure-pipelines * update azure-pipelines * run.py (#238) * Nnupdate ci (#239) * run.py * test ci * Nnupdate ci (#240) * run.py * test ci * test ci * Udci (#241) * run.py * test ci * test ci * test ci * update ci (#242) * run.py * test ci * test ci * test ci * update ci * revert install.sh (#244) * run.py * test ci * test ci * test ci * update ci * revert install.sh * add comments * remove assert * trivial change * trivial change * update Makefile (#246) * update Makefile * update Makefile * quick fix for ci (#248) * add update trialNum and fix bugs (#261) * Add builtin tuner to CI (#247) * update Makefile * update Makefile * add builtin-tuner test * add builtin-tuner test * refractor ci * update azure.yml * add built-in tuner test * fix bugs * Doc refactor (#258) * doc refactor * image name refactor * Refactor nnictl to support listing stopped experiments. (#256) Refactor nnictl to support listing stopped experiments. * Show experiment parameters more beautifully (#262) * fix error on example of RemoteMachineMode (#269) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * Update docker file to use latest nni release (#263) * fix bug about execDuration and endTime (#270) * fix bug about execDuration and endTime * modify time interval to 30 seconds * refactor based on Gems's suggestion * for triggering ci * Refactor dockerfile (#264) * refactor Dockerfile * Support nnictl tensorboard (#268) support tensorboard * Sdk update (#272) * Rename get_parameters to get_next_parameter * annotations add get_next_parameter * updates * updates * updates * updates * updates * add experiment log path to experiment profile (#276) * refactor extract reward from dict by tuner * update Makefile for mac support, wait for aka.ms support * refix Makefile for colorful echo * unversion config.yml with machine information * sync graph.py between tuners & trial of ga_squad * sync graph.py between tuners & trial of ga_squad * copy weight shared ga_squad under weight_sharing folder * mv ga_squad code back to master * simple tuner & trial ready * Fix nnictl multiThread option * weight sharing with async dispatcher simple example ready * update for ga_squad * fix bug * modify multihead attention name * add min_layer_num to Graph * fix bug * update share id calc * fix bug * add save logging * fix ga_squad tuner bug * sync bug fix for ga_squad tuner * fix same hash_id bug * add lock to simple tuner in weight sharing * Add readme to simple weight sharing * update * update * add paper link * update * reformat with autopep8 * add documentation for weight sharing * test for weight sharing * delete irrelevant files * move details of weight sharing in to code comments * Dev weight sharing update doc (#577) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * simple weight sharing * update gitignore file * change tuner codedir to relative path * add python cache files to gitignore list * move extract scalar reward logic from dispatcher to tuner * update tuner code corresponding to last commit * update doc for receive_trial_result api change * add numpy to package whitelist of pylint * distinguish param value from return reward for tuner.extract_scalar_reward * update pylintrc * add comments to dispatcher.handle_report_metric_data * update install for mac support * fix root mode bug on Makefile * Quick fix bug: nnictl port value error (#245) * fix port bug * Dev exp stop more (#221) * Exp stop refactor (#161) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * fix setup.py (#115) * Add DAG model configuration format for SQuAD example. * Explain config format for SQuAD QA model. * Add more detailed introduction about the evolution algorithm. * Fix install.sh add add trial log path (#109) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * show trial log path * update document * fix install.sh * set default vallue for maxTrialNum and maxExecDuration * fix nnictl * Dev smac (#116) * support package install (#91) * fix nnictl bug * support package install * update * update package install logic * Fix package install issue (#95) * fix nnictl bug * fix pakcage install * support SMAC as a tuner on nni (#81) * update doc * update doc * update doc * update hyperopt installation * update doc * update doc * update description in setup.py * update setup.py * modify encoding * encoding * add encoding * remove pymc3 * update doc * update builtin tuner spec * support smac in sdk, fix logging issue * support smac tuner * add optimize_mode * update config in nnictl * add __init__.py * update smac * update import path * update setup.py: remove entry_point * update rest server validation * fix bug in nnictl launcher * support classArgs: optimize_mode * quick fix bug * test travis * add dependency * add dependency * add dependency * add dependency * create smac python package * fix trivial points * optimize import of tuners, modify nnictl accordingly * fix bug: incorrect algorithm_name * trivial refactor * for debug * support virtual * update doc of SMAC * update smac requirements * update requirements * change debug mode * update doc * update doc * refactor based on comments * fix comments * modify example config path to relative path and increase maxTrialNum (#94) * modify example config path to relative path and increase maxTrialNum * add document * support conda (#90) (#110) * support install from venv and travis CI * support install from venv and travis CI * support install from venv and travis CI * support conda * support conda * modify example config path to relative path and increase maxTrialNum * undo messy commit * undo messy commit * Support pip install as root (#77) * Typo on #58 (#122) * PAI Training Service implementation (#128) * PAI Training service implementation **1. Implement PAITrainingService **2. Add trial-keeper python module, and modify setup.py to install the module **3. Add PAItrainingService rest server to collect metrics from PAI container. * fix datastore for multiple final result (#129) * Update NNI v0.2 release notes (#132) Update NNI v0.2 release notes * Update setup.py Makefile and documents (#130) * update makefile and setup.py * update makefile and setup.py * update document * update document * Update Makefile no travis * update doc * update doc * fix convert from ss to pcs (#133) * Fix bugs about webui (#131) * Fix webui bugs * Fix tslint * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * Merge branch V0.2 to Master (#143) * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * fix bug (#147) * Refactor nnictl and add config_pai.yml (#144) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * add config_pai.yml * refactor nnictl create logic and add colorful print * fix nnictl stop logic * add annotation for config_pai.yml * add document for start experiment * fix config.yml * fix document * Fix trial keeper wrongly exit issue (#152) * Fix trial keeper bug, use actual exitcode to exit rather than 1 * Fix bug of table sort (#145) * Update doc for PAIMode and v0.2 release notes (#153) * Update v0.2 documentation regards to release note and PAI training service * Update document to describe NNI docker image * fix antd (#159) * refactor experiment stopping logic * support change concurrency * remove trialJobs.ts * trivial changes * fix bugs * fix bug * support updating maxTrialNum * Modify IT scripts for supporting multiple experiments * Update ci (#175) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * modify CI cuz of refracting exp stop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * file saving * fix issues from code merge * remove $(INSTALL_PREFIX)/nni/nni_manager before install * fix indent * fix merge issue * socket close * update port * fix merge error * modify ci logic in nnimanager * fix ci * fix bug * change suspended to done * update ci (#229) * update ci * update ci * update ci (#232) * update ci * update ci * update azure-pipelines * update azure-pipelines * update ci (#233) * update ci * update ci * update azure-pipelines * update azure-pipelines * update azure-pipelines * run.py (#238) * Nnupdate ci (#239) * run.py * test ci * Nnupdate ci (#240) * run.py * test ci * test ci * Udci (#241) * run.py * test ci * test ci * test ci * update ci (#242) * run.py * test ci * test ci * test ci * update ci * revert install.sh (#244) * run.py * test ci * test ci * test ci * update ci * revert install.sh * add comments * remove assert * trivial change * trivial change * update Makefile (#246) * update Makefile * update Makefile * quick fix for ci (#248) * add update trialNum and fix bugs (#261) * Add builtin tuner to CI (#247) * update Makefile * update Makefile * add builtin-tuner test * add builtin-tuner test * refractor ci * update azure.yml * add built-in tuner test * fix bugs * Doc refactor (#258) * doc refactor * image name refactor * Refactor nnictl to support listing stopped experiments. (#256) Refactor nnictl to support listing stopped experiments. * Show experiment parameters more beautifully (#262) * fix error on example of RemoteMachineMode (#269) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * Update docker file to use latest nni release (#263) * fix bug about execDuration and endTime (#270) * fix bug about execDuration and endTime * modify time interval to 30 seconds * refactor based on Gems's suggestion * for triggering ci * Refactor dockerfile (#264) * refactor Dockerfile * Support nnictl tensorboard (#268) support tensorboard * Sdk update (#272) * Rename get_parameters to get_next_parameter * annotations add get_next_parameter * updates * updates * updates * updates * updates * add experiment log path to experiment profile (#276) * refactor extract reward from dict by tuner * update Makefile for mac support, wait for aka.ms support * refix Makefile for colorful echo * unversion config.yml with machine information * sync graph.py between tuners & trial of ga_squad * sync graph.py between tuners & trial of ga_squad * copy weight shared ga_squad under weight_sharing folder * mv ga_squad code back to master * simple tuner & trial ready * Fix nnictl multiThread option * weight sharing with async dispatcher simple example ready * update for ga_squad * fix bug * modify multihead attention name * add min_layer_num to Graph * fix bug * update share id calc * fix bug * add save logging * fix ga_squad tuner bug * sync bug fix for ga_squad tuner * fix same hash_id bug * add lock to simple tuner in weight sharing * Add readme to simple weight sharing * update * update * add paper link * update * reformat with autopep8 * add documentation for weight sharing * test for weight sharing * delete irrelevant files * move details of weight sharing in to code comments * add example section * Dev weight sharing update (#579) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * simple weight sharing * update gitignore file * change tuner codedir to relative path * add python cache files to gitignore list * move extract scalar reward logic from dispatcher to tuner * update tuner code corresponding to last commit * update doc for receive_trial_result api change * add numpy to package whitelist of pylint * distinguish param value from return reward for tuner.extract_scalar_reward * update pylintrc * add comments to dispatcher.handle_report_metric_data * update install for mac support * fix root mode bug on Makefile * Quick fix bug: nnictl port value error (#245) * fix port bug * Dev exp stop more (#221) * Exp stop refactor (#161) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * fix setup.py (#115) * Add DAG model configuration format for SQuAD example. * Explain config format for SQuAD QA model. * Add more detailed introduction about the evolution algorithm. * Fix install.sh add add trial log path (#109) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * show trial log path * update document * fix install.sh * set default vallue for maxTrialNum and maxExecDuration * fix nnictl * Dev smac (#116) * support package install (#91) * fix nnictl bug * support package install * update * update package install logic * Fix package install issue (#95) * fix nnictl bug * fix pakcage install * support SMAC as a tuner on nni (#81) * update doc * update doc * update doc * update hyperopt installation * update doc * update doc * update description in setup.py * update setup.py * modify encoding * encoding * add encoding * remove pymc3 * update doc * update builtin tuner spec * support smac in sdk, fix logging issue * support smac tuner * add optimize_mode * update config in nnictl * add __init__.py * update smac * update import path * update setup.py: remove entry_point * update rest server validation * fix bug in nnictl launcher * support classArgs: optimize_mode * quick fix bug * test travis * add dependency * add dependency * add dependency * add dependency * create smac python package * fix trivial points * optimize import of tuners, modify nnictl accordingly * fix bug: incorrect algorithm_name * trivial refactor * for debug * support virtual * update doc of SMAC * update smac requirements * update requirements * change debug mode * update doc * update doc * refactor based on comments * fix comments * modify example config path to relative path and increase maxTrialNum (#94) * modify example config path to relative path and increase maxTrialNum * add document * support conda (#90) (#110) * support install from venv and travis CI * support install from venv and travis CI * support install from venv and travis CI * support conda * support conda * modify example config path to relative path and increase maxTrialNum * undo messy commit * undo messy commit * Support pip install as root (#77) * Typo on #58 (#122) * PAI Training Service implementation (#128) * PAI Training service implementation **1. Implement PAITrainingService **2. Add trial-keeper python module, and modify setup.py to install the module **3. Add PAItrainingService rest server to collect metrics from PAI container. * fix datastore for multiple final result (#129) * Update NNI v0.2 release notes (#132) Update NNI v0.2 release notes * Update setup.py Makefile and documents (#130) * update makefile and setup.py * update makefile and setup.py * update document * update document * Update Makefile no travis * update doc * update doc * fix convert from ss to pcs (#133) * Fix bugs about webui (#131) * Fix webui bugs * Fix tslint * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * Merge branch V0.2 to Master (#143) * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * fix bug (#147) * Refactor nnictl and add config_pai.yml (#144) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * add config_pai.yml * refactor nnictl create logic and add colorful print * fix nnictl stop logic * add annotation for config_pai.yml * add document for start experiment * fix config.yml * fix document * Fix trial keeper wrongly exit issue (#152) * Fix trial keeper bug, use actual exitcode to exit rather than 1 * Fix bug of table sort (#145) * Update doc for PAIMode and v0.2 release notes (#153) * Update v0.2 documentation regards to release note and PAI training service * Update document to describe NNI docker image * fix antd (#159) * refactor experiment stopping logic * support change concurrency * remove trialJobs.ts * trivial changes * fix bugs * fix bug * support updating maxTrialNum * Modify IT scripts for supporting multiple experiments * Update ci (#175) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * modify CI cuz of refracting exp stop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * file saving * fix issues from code merge * remove $(INSTALL_PREFIX)/nni/nni_manager before install * fix indent * fix merge issue * socket close * update port * fix merge error * modify ci logic in nnimanager * fix ci * fix bug * change suspended to done * update ci (#229) * update ci * update ci * update ci (#232) * update ci * update ci * update azure-pipelines * update azure-pipelines * update ci (#233) * update ci * update ci * update azure-pipelines * update azure-pipelines * update azure-pipelines * run.py (#238) * Nnupdate ci (#239) * run.py * test ci * Nnupdate ci (#240) * run.py * test ci * test ci * Udci (#241) * run.py * test ci * test ci * test ci * update ci (#242) * run.py * test ci * test ci * test ci * update ci * revert install.sh (#244) * run.py * test ci * test ci * test ci * update ci * revert install.sh * add comments * remove assert * trivial change * trivial change * update Makefile (#246) * update Makefile * update Makefile * quick fix for ci (#248) * add update trialNum and fix bugs (#261) * Add builtin tuner to CI (#247) * update Makefile * update Makefile * add builtin-tuner test * add builtin-tuner test * refractor ci * update azure.yml * add built-in tuner test * fix bugs * Doc refactor (#258) * doc refactor * image name refactor * Refactor nnictl to support listing stopped experiments. (#256) Refactor nnictl to support listing stopped experiments. * Show experiment parameters more beautifully (#262) * fix error on example of RemoteMachineMode (#269) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * Update docker file to use latest nni release (#263) * fix bug about execDuration and endTime (#270) * fix bug about execDuration and endTime * modify time interval to 30 seconds * refactor based on Gems's suggestion * for triggering ci * Refactor dockerfile (#264) * refactor Dockerfile * Support nnictl tensorboard (#268) support tensorboard * Sdk update (#272) * Rename get_parameters to get_next_parameter * annotations add get_next_parameter * updates * updates * updates * updates * updates * add experiment log path to experiment profile (#276) * refactor extract reward from dict by tuner * update Makefile for mac support, wait for aka.ms support * refix Makefile for colorful echo * unversion config.yml with machine information * sync graph.py between tuners & trial of ga_squad * sync graph.py between tuners & trial of ga_squad * copy weight shared ga_squad under weight_sharing folder * mv ga_squad code back to master * simple tuner & trial ready * Fix nnictl multiThread option * weight sharing with async dispatcher simple example ready * update for ga_squad * fix bug * modify multihead attention name * add min_layer_num to Graph * fix bug * update share id calc * fix bug * add save logging * fix ga_squad tuner bug * sync bug fix for ga_squad tuner * fix same hash_id bug * add lock to simple tuner in weight sharing * Add readme to simple weight sharing * update * update * add paper link * update * reformat with autopep8 * add documentation for weight sharing * test for weight sharing * delete irrelevant files * move details of weight sharing in to code comments * add example section * update weight sharing tutorial * Dev weight sharing (#581) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * simple weight sharing * update gitignore file * change tuner codedir to relative path * add python cache files to gitignore list * move extract scalar reward logic from dispatcher to tuner * update tuner code corresponding to last commit * update doc for receive_trial_result api change * add numpy to package whitelist of pylint * distinguish param value from return reward for tuner.extract_scalar_reward * update pylintrc * add comments to dispatcher.handle_report_metric_data * update install for mac support * fix root mode bug on Makefile * Quick fix bug: nnictl port value error (#245) * fix port bug * Dev exp stop more (#221) * Exp stop refactor (#161) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * fix setup.py (#115) * Add DAG model configuration format for SQuAD example. * Explain config format for SQuAD QA model. * Add more detailed introduction about the evolution algorithm. * Fix install.sh add add trial log path (#109) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * show trial log path * update document * fix install.sh * set default vallue for maxTrialNum and maxExecDuration * fix nnictl * Dev smac (#116) * support package install (#91) * fix nnictl bug * support package install * update * update package install logic * Fix package install issue (#95) * fix nnictl bug * fix pakcage install * support SMAC as a tuner on nni (#81) * update doc * update doc * update doc * update hyperopt installation * update doc * update doc * update description in setup.py * update setup.py * modify encoding * encoding * add encoding * remove pymc3 * update doc * update builtin tuner spec * support smac in sdk, fix logging issue * support smac tuner * add optimize_mode * update config in nnictl * add __init__.py * update smac * update import path * update setup.py: remove entry_point * update rest server validation * fix bug in nnictl launcher * support classArgs: optimize_mode * quick fix bug * test travis * add dependency * add dependency * add dependency * add dependency * create smac python package * fix trivial points * optimize import of tuners, modify nnictl accordingly * fix bug: incorrect algorithm_name * trivial refactor * for debug * support virtual * update doc of SMAC * update smac requirements * update requirements * change debug mode * update doc * update doc * refactor based on comments * fix comments * modify example config path to relative path and increase maxTrialNum (#94) * modify example config path to relative path and increase maxTrialNum * add document * support conda (#90) (#110) * support install from venv and travis CI * support install from venv and travis CI * support install from venv and travis CI * support conda * support conda * modify example config path to relative path and increase maxTrialNum * undo messy commit * undo messy commit * Support pip install as root (#77) * Typo on #58 (#122) * PAI Training Service implementation (#128) * PAI Training service implementation **1. Implement PAITrainingService **2. Add trial-keeper python module, and modify setup.py to install the module **3. Add PAItrainingService rest server to collect metrics from PAI container. * fix datastore for multiple final result (#129) * Update NNI v0.2 release notes (#132) Update NNI v0.2 release notes * Update setup.py Makefile and documents (#130) * update makefile and setup.py * update makefile and setup.py * update document * update document * Update Makefile no travis * update doc * update doc * fix convert from ss to pcs (#133) * Fix bugs about webui (#131) * Fix webui bugs * Fix tslint * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * Merge branch V0.2 to Master (#143) * webui logpath and document (#135) * Add webui document and logpath as a href * fix tslint * fix comments by Chengmin * Pai training service bug fix and enhancement (#136) * Add NNI installation scripts * Update pai script, update NNI_out_dir * Update NNI dir in nni sdk local.py * Create .nni folder in nni sdk local.py * Add check before creating .nni folder * Fix typo for PAI_INSTALL_NNI_SHELL_FORMAT * Improve annotation (#138) * Improve annotation * Minor bugfix * Selectively install through pip (#139) Selectively install through pip * update setup.py * fix paiTrainingService bugs (#137) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * Add documentation for NNI PAI mode experiment (#141) * Add documentation for NNI PAI mode * Fix typo based on PR comments * Exit with subprocess return code of trial keeper * Remove additional exit code * Fix typo based on PR comments * update doc for smac tuner (#140) * Revert "Selectively install through pip (#139)" due to potential pip install issue (#142) * Revert "Selectively install through pip (#139)" This reverts commit 1d174836d3146a0363e9c9c88094bf9cff865faa. * Add exit code of subprocess for trial_keeper * Update README, add link to PAImode doc * fix bug (#147) * Refactor nnictl and add config_pai.yml (#144) * fix nnictl bug * add hdfs host validation * fix bugs * fix dockerfile * fix install.sh * update install.sh * fix dockerfile * Set timeout for HDFSUtility exists function * remove unused TODO * fix sdk * add optional for outputDir and dataDir * refactor dockerfile.base * Remove unused import in hdfsclientUtility * add config_pai.yml * refactor nnictl create logic and add colorful print * fix nnictl stop logic * add annotation for config_pai.yml * add document for start experiment * fix config.yml * fix document * Fix trial keeper wrongly exit issue (#152) * Fix trial keeper bug, use actual exitcode to exit rather than 1 * Fix bug of table sort (#145) * Update doc for PAIMode and v0.2 release notes (#153) * Update v0.2 documentation regards to release note and PAI training service * Update document to describe NNI docker image * fix antd (#159) * refactor experiment stopping logic * support change concurrency * remove trialJobs.ts * trivial changes * fix bugs * fix bug * support updating maxTrialNum * Modify IT scripts for supporting multiple experiments * Update ci (#175) * Update RemoteMachineMode.md (#63) * Remove unused classes for SQuAD QA example. * Remove more unused functions for SQuAD QA example. * Fix default dataset config. * Add Makefile README (#64) * update document (#92) * Edit readme.md * updated a word * Update GetStarted.md * Update GetStarted.md * refact readme, getstarted and write your trial md. * Update README.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Update WriteYourTrial.md * Fix nnictl bugs and add new feature (#75) * fix nnictl bug * fix nnictl create bug * add experiment status logic * add more information for nnictl * fix Evolution Tuner bug * refactor code * fix code in updater.py * fix nnictl --help * fix classArgs bug * update check response.status_code logic * remove Buffer warning (#100) * update readme in ga_squad * update readme * fix typo * Update README.md * Update README.md * Update README.md * Add support for debugging mode * modify CI cuz of refracting exp stop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * update CI for expstop * file saving * fix issues from code merge * remove $(INSTALL_PREFIX)/nni/nni_manager before install * fix indent * fix merge issue * socket close * update port * fix merge error * modify ci logic in nnimanager * fix ci * fix bug * change suspended to done * update ci (#229) * update ci * update ci * update ci (#232) * update ci * update ci * update azure-pipelines * update azure-pipelines * update ci (#233) * update ci * update ci * update azure-pipelines * update azure-pipelines * update azure-pipelines * run.py (#238) * Nnupdate ci (#239) * run.py * test ci * Nnupdate ci (#240) * run.py * test ci * test ci * Udci (#241) * run.py * test ci * test ci * test ci * update ci (#242) * run.py * test ci * test ci * test ci * update ci * revert install.sh (#244) * run.py * test ci * test ci * test ci * update ci * revert install.sh * add comments * remove assert * trivial change * trivial change * update Makefile (#246) * update Makefile * update Makefile * quick fix for ci (#248) * add update trialNum and fix bugs (#261) * Add builtin tuner to CI (#247) * update Makefile * update Makefile * add builtin-tuner test * add builtin-tuner test * refractor ci * update azure.yml * add built-in tuner test * fix bugs * Doc refactor (#258) * doc refactor * image name refactor * Refactor nnictl to support listing stopped experiments. (#256) Refactor nnictl to support listing stopped experiments. * Show experiment parameters more beautifully (#262) * fix error on example of RemoteMachineMode (#269) * add pycharm project files to .gitignore list * update pylintrc to conform vscode settings * fix RemoteMachineMode for wrong trainingServicePlatform * Update docker file to use latest nni release (#263) * fix bug about execDuration and endTime (#270) * fix bug about execDuration and endTime * modify time interval to 30 seconds * refactor based on Gems's suggestion * for triggering ci * Refactor dockerfile (#264) * refactor Dockerfile * Support nnictl tensorboard (#268) support tensorboard * Sdk update (#272) * Rename get_parameters to get_next_parameter * annotations add get_next_parameter * updates * updates * updates * updates * updates * add experiment log path to experiment profile (#276) * refactor extract reward from dict by tuner * update Makefile for mac support, wait for aka.ms support * refix Makefile for colorful echo * unversion config.yml with machine information * sync graph.py between tuners & trial of ga_squad * sync graph.py between tuners & trial of ga_squad * copy weight shared ga_squad under weight_sharing folder * mv ga_squad code back to master * simple tuner & trial ready * Fix nnictl multiThread option * weight sharing with async dispatcher simple example ready * update for ga_squad * fix bug * modify multihead attention name * add min_layer_num to Graph * fix bug * update share id calc * fix bug * add save logging * fix ga_squad tuner bug * sync bug fix for ga_squad tuner * fix same hash_id bug * add lock to simple tuner in weight sharing * Add readme to simple weight sharing * update * update * add paper link * update * reformat with autopep8 * add documentation for weight sharing * test for weight sharing * delete irrelevant files * move details of weight sharing in to code comments * add example section * update weight sharing tutorial * fix divide by zero risk * update tuner thread exception handling * fix bug for async test --- docs/AdvancedNAS.md | 87 ++++ docs/img/weight_sharing.png | Bin 0 -> 71354 bytes examples/trials/ga_squad/trial.py | 6 +- .../weight_sharing/ga_squad/attention.py | 171 +++++++ .../weight_sharing/ga_squad/config_remote.yml | 31 ++ .../trials/weight_sharing/ga_squad/data.py | 269 ++++++++++ .../weight_sharing/ga_squad/download.sh | 6 + .../weight_sharing/ga_squad/evaluate.py | 174 +++++++ .../trials/weight_sharing/ga_squad/graph.py | 336 +++++++++++++ .../weight_sharing/ga_squad/graph_to_tf.py | 342 +++++++++++++ .../trials/weight_sharing/ga_squad/rnn.py | 118 +++++ .../weight_sharing/ga_squad/train_model.py | 263 ++++++++++ .../trials/weight_sharing/ga_squad/trial.py | 461 ++++++++++++++++++ .../trials/weight_sharing/ga_squad/util.py | 76 +++ .../ga_customer_tuner/customer_tuner.py | 2 +- .../ga_customer_tuner/README.md | 15 + .../ga_customer_tuner/__init__.py | 0 .../ga_customer_tuner/customer_tuner.py | 224 +++++++++ .../weight_sharing/ga_customer_tuner/graph.py | 336 +++++++++++++ src/sdk/pynni/nni/common.py | 3 +- src/sdk/pynni/nni/msg_dispatcher.py | 1 + src/sdk/pynni/nni/msg_dispatcher_base.py | 25 +- src/sdk/pynni/nni/tuner.py | 1 + test/async_sharing_test/config.yml | 25 + test/async_sharing_test/main.py | 57 +++ test/async_sharing_test/simple_tuner.py | 66 +++ 26 files changed, 3086 insertions(+), 9 deletions(-) create mode 100644 docs/AdvancedNAS.md create mode 100644 docs/img/weight_sharing.png create mode 100644 examples/trials/weight_sharing/ga_squad/attention.py create mode 100644 examples/trials/weight_sharing/ga_squad/config_remote.yml create mode 100644 examples/trials/weight_sharing/ga_squad/data.py create mode 100644 examples/trials/weight_sharing/ga_squad/download.sh create mode 100644 examples/trials/weight_sharing/ga_squad/evaluate.py create mode 100644 examples/trials/weight_sharing/ga_squad/graph.py create mode 100644 examples/trials/weight_sharing/ga_squad/graph_to_tf.py create mode 100644 examples/trials/weight_sharing/ga_squad/rnn.py create mode 100644 examples/trials/weight_sharing/ga_squad/train_model.py create mode 100644 examples/trials/weight_sharing/ga_squad/trial.py create mode 100644 examples/trials/weight_sharing/ga_squad/util.py create mode 100644 examples/tuners/weight_sharing/ga_customer_tuner/README.md create mode 100644 examples/tuners/weight_sharing/ga_customer_tuner/__init__.py create mode 100644 examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py create mode 100644 examples/tuners/weight_sharing/ga_customer_tuner/graph.py create mode 100644 test/async_sharing_test/config.yml create mode 100644 test/async_sharing_test/main.py create mode 100644 test/async_sharing_test/simple_tuner.py diff --git a/docs/AdvancedNAS.md b/docs/AdvancedNAS.md new file mode 100644 index 0000000000..65ecd34100 --- /dev/null +++ b/docs/AdvancedNAS.md @@ -0,0 +1,87 @@ +# Tutorial for Advanced Neural Architecture Search +Currently many of the NAS algorithms leverage the technique of **weight sharing** among trials to accelerate its training process. For example, [ENAS][1] delivers 1000x effiency with '_parameter sharing between child models_', compared with the previous [NASNet][2] algorithm. Other NAS algorithms such as [DARTS][3], [Network Morphism][4], and [Evolution][5] is also leveraging, or has the potential to leverage weight sharing. + +This is a tutorial on how to enable weight sharing in NNI. + +## Weight Sharing among trials +Currently we recommend sharing weights through NFS (Network File System), which supports sharing files across machines, and is light-weighted, (relatively) efficient. We also welcome contributions from the community on more efficient techniques. + +### Weight Sharing through NFS file +With the NFS setup (see below), trial code can share model weight through loading & saving files. Here we recommend that user feed the tuner with the storage path: +```yaml +tuner: + codeDir: path/to/customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + ... + save_dir_root: /nfs/storage/path/ +``` +And let tuner decide where to save & load weights and feed the paths to trials through `nni.get_next_parameters()`: + +![weight_sharing_design](./img/weight_sharing.png) + + For example, in tensorflow: +```python +# save models +saver = tf.train.Saver() +saver.save(sess, os.path.join(params['save_path'], 'model.ckpt')) +# load models +tf.init_from_checkpoint(params['restore_path']) +``` +where `'save_path'` and `'restore_path'` in hyper-parameter can be managed by the tuner. + +### NFS Setup +In NFS, files are physically stored on a server machine, and trials on the client machine can read/write those files in the same way that they access local files. + +#### Install NFS on server machine +First, install NFS server: +```bash +sudo apt-get install nfs-kernel-server +``` +Suppose `/tmp/nni/shared` is used as the physical storage, then run: +```bash +sudo mkdir -p /tmp/nni/shared +sudo echo "/tmp/nni/shared *(rw,sync,no_subtree_check,no_root_squash)" >> /etc/exports +sudo service nfs-kernel-server restart +``` +You can check if the above directory is successfully exported by NFS using `sudo showmount -e localhost` + +#### Install NFS on client machine +First, install NFS client: +```bash +sudo apt-get install nfs-common +``` +Then create & mount the mounted directory of shared files: +```bash +sudo mkdir -p /mnt/nfs/nni/ +sudo mount -t nfs 10.10.10.10:/tmp/nni/shared /mnt/nfs/nni +``` +where `10.10.10.10` should be replaced by the real IP of NFS server machine in practice. + +## Asynchornous Dispatcher Mode for trial dependency control +The feature of weight sharing enables trials from different machines, in which most of the time **read after write** consistency must be assured. After all, the child model should not load parent model before parent trial finishes training. To deal with this, users can enable **asynchronous dispatcher mode** with `multiThread: true` in `config.yml` in NNI, where the dispatcher assign a tuner thread each time a `NEW_TRIAL` request comes in, and the tuner thread can decide when to submit a new trial by blocking and unblocking the thread itself. For example: +```python + def generate_parameters(self, parameter_id): + self.thread_lock.acquire() + indiv = # configuration for a new trial + self.events[parameter_id] = threading.Event() + self.thread_lock.release() + if indiv.parent_id is not None: + self.events[indiv.parent_id].wait() + + def receive_trial_result(self, parameter_id, parameters, reward): + self.thread_lock.acquire() + # code for processing trial results + self.thread_lock.release() + self.events[parameter_id].set() +``` + +## Examples +For details, please refer to this [simple weight sharing example](../test/async_sharing_test). We also provided a [practice example](../examples/trials/weight_sharing/ga_squad) for reading comprehension, based on previous [ga_squad](../examples/trials/ga_squad) example. + +[1]: https://arxiv.org/abs/1802.03268 +[2]: https://arxiv.org/abs/1707.07012 +[3]: https://arxiv.org/abs/1806.09055 +[4]: https://arxiv.org/abs/1806.10282 +[5]: https://arxiv.org/abs/1703.01041 \ No newline at end of file diff --git a/docs/img/weight_sharing.png b/docs/img/weight_sharing.png new file mode 100644 index 0000000000000000000000000000000000000000..dbf087d020d1f27858ddcddbf46dece6087f289a GIT binary patch literal 71354 zcmX_oWmFtZv@L`XG`RcV4uK%Sox$CM1$TD~GC_m8yGwxJ8r%ZG-QC^oHQ&AW{b1JW zE;*$&ZKxh)_^aAEm%z%1}`6L!qGF{e%Ai1qE%ig*FT%KszZ*ia=G4 z672&I@6CkegrT6SqmZ5qVS#4^d$5)h6cjG{+aL50^`#&b)N6s1n6Rq5-ccJ|GEr~F z%h>D1{nMp`+2bg#jnmRSiVl0r&=-Fy$#N|z2~GG=+KVuALds)$XpxDCpm$-kvH1z= zTVz7tS^N4p1>|GDAn(2>KR%K!`-oBUa($!9RM*DQWLD_4^UOE)DC_2;5L|jtYUjsr zg;0__&dSMYw5zG)K~SqC4n2sS;-Ni79z$kSA}#_RFSa3a$knE#1rn^qpeknQKS`??nXpK8n!&y=rGwNy_sz)2Rml2)pOqeddlaMU35j3M`lEw(T5POlis&eE%ZZGSBo zxwjM*i=D!vD`v0YxyZWOG)z{lXZFcKIWyhrR$&!qo7%;yU=TdGhaGx@GA-5+Og_Fk zsAim)6--{ufT80fDt|tG7uO)xG}I(4XhJDfV1PdZoPYH7wR~DpN4XB zEh;gz^W-~pWovJvZsc>(4D+o&YL!uE{Y5$9x7CwXP%V3DTZ5*o!37O*1H3$KGYR4vlzs=C>n}r<2qTsd9__y0Rj4mXTw34pPBwu4*`E~-2d816@6JxSmF2QSwKOA=5Bo5sdViGkr zvlvyC*Qyr4LYP3xael74Wb$E~E*`$cq7((?avl40b76F}|JC8Ak`dC8h(&O#V9G@* zF)o-aX2|?~q7N=n@%q3vajek3P8j;CyIv7%LaDQbBYJS9J1|l=P_JL_cxxqHQQ1tL&g)VXWC%mq#JzwJk;Tgbg0aXzki<9HkrV>!{W)= z{$d+E_b_*w;c_XMBAwArSpWPx)&wJ7$h7~9F?ZW)0)yTGqC%nZv~?vJMTPH^%PN?t zwJCVZ#8roL)^A>ufjRTDVg&I6sjlI2JR=3P&U>qUv2*2|RPziT7p~X@=>ej8`--ns z4$jAMnWp4_OZlsiI>1E9yA{eNlm_8@BH#>WI-mBsJNEil52h*^@;L0MHs&XC2ZQ>e zhrN9TCfSsj&lJo)Jp||Xr5bWrx*F`%pM4$bj=AsYiL@MLGji9mQ!fJ*!`yR=URzh1 zs%6XV0beTXGX)Pi zxq@*L3twh04(>)$1bz=(4=R=NO$z)JXN0g{rX%HQgmcEjGz`_LdWTSy#pGhw0hhjX zN{nJQ*{E}<7?yBE1Yk-JHoF2wa?C_Gve?Btd>s6N$11)Nb3DwRhV)z|3A+Xe3uI^x zI~v#ILM^DvjEt-9qft>wZ&;!1ig`?8@0b{+Vlt-xd}>;NB_MuOtS2Cnh&_N+fs*3LXz5&(anEr6Bt*nCpw%+>jJJ9I1E#lybL$x_o#>g| z7adc2w`dR6W3rP)ejPiizF;#|?XCSL2DxN^K5{j&M5FhIrHj@%G$b+f?6Aa3vt^;Z z%tQCSGLg34&X}J!7hCmi8Jk9g#NYk=TOU&IBS8)}h@@Fcx@Eeeq+3AWmAEySs`@MJ zU!WLTEiT~AEPTHN!yX+4|4rl$@4DMEf#6w{SJe-*w|)br(L zUCj4RV2DJ_vQ<|DDTJv7?94@nH|**-$4FQ5o)2zaik@sP17dYC}JWNI6;k~uS zX)5-){btozveOi}s#;X@2j?OC-}xN1k|oFV^@Ewq7ivaGfROjeniVg}GVN2on=QP4 zHgnW_NTj4KuCMCwh<%g%M^#^EzE-_Etpm2u)qQvKjI-J)6AZ!2lX3x4SH4DdAVE2l z)WXl;Cw$lx6=sKn^P=py@2d@+%La|$`qR*IZze>;ZSf|U7uQXPzw}JsxN4M(T!m{-3XR94&$o_GW+N(x`3-~n4+KV~1pUgMk=hkr@mt&QCfxuQ; zm-Cz=*l1H+a+7UUMwc0Qu;ahf?5`~7Qn1u@(~4qsY3iwRK8ipTEVFR!5C^Qu+Ber^ ze4=&npaeUgXi`4q-|!wJ#{@_}CX$AY9wYgHT%;)XT6xPJrbfMNF?!X9<%-&>nEm51 zGu&2bTW2I?KD-q0@RfXCn9*a5T;}Kqp4!2*UbMEXJoE!UL(q7M53PEud3UK37Ji~_ zFaT~GTLldhVUt|O(k+6x1Sd3{?PY}p6odwN^dPiY)Ew|tYI~d=ZjtV_wLIc%&z?o4 zm~q#tv-Q!mMc0NujnWAbOqzojZ*k_?Y7c`JDsF+2m z3!Pm(-a|*Zy4dny;XWKt+!5H%mp_&8OJiE*lDN_+K22ncx(;}t>ZQz(dvCoyU{HT1 zn=hY%Feq?m#-{pgIWm?WXgw^8)PLay!lF{tC%kXAbeWlY8 z2yukTYKty#$;BafB_0<&KH#pUf_v-r`??h4j6qf452j$-J^?!@5w2`Mqg z+Y4Awn&l9KcS_EQ=S}om!I6x(j`|~&i=USM-nBevkv#(O!sM!$I_KM`j4w$&B)y_~ z-R2%AlH0lP;@^V1WA-jD7S1XjV;zINv6=I?=$b25ogX)KhjU&iT5K4CFx*l=_YZER z)JybrN3{p`Unr_J`$bunZ1^P$yV{DovC}Iqusfn-i|maTXJkytbsCc#ZP~DWM!%ZL z1$jecv*lyZ*~aaD5%(iu^N0e&@KFgosB#5Q$ok;C+n~`A!(Sh&G3!~Rkw_WEt7YkJ z9ByC5aTU<_8^QchA4FN!1vf>U6{Jt;>yx&t+%Qk9^B!0GV2c0Ou?`qJSGD*?z@Qm_9BSR43_OS`#b@b;~ z_(_9=ZL@7@y;vaIpxA%bmL(i{4+fFM?Y~U=UDPN5rHo^mTae@HRL zXPkEb?Kl{3Zr0N#M0IGnnKUqB%&o|F)?xo`T$ohyHFa|g?Z-IDU2kW9cA$JfBd$*s zRZc$5E&LLUG%<0CHr@z)WgJ^~+O*xjPp5RemTr;fHFNaA1G3UKn{%y_K6hvt4vt*n zmV}b-7X9%q)b#AE<1M2?LB=21Q3vreJfv3^aBFhz<>i z9cRGF5b~9!G$n~;Plh5%31&K`q|tncXa9RCNfbIpxKOCxf}X|oRFaq`Z)MlyaAk`f z&osg(yt7PL%6#JOUB-%?#e3Cnd5Lghqg_%jJeoDxop_T?b|CR2HJJ|0o@Q74(u(M~ zw^XZzrF`&t&|v4B7pPib3GmA?vXZCiCLeE=&mUg;r?-;<10y%Dhl3*}xez;~bO{jN z5l1aCKhe8{_MHCbl-2BBvmcCIQhKUFnpIxPH*;LNsw`8dAAMX(dZIr6Eq$i6kaavp-Ddl5-4eWQuha!Hn)$H_7h4nu*XneIE-Umkh=sGbd!+ayRQS8AfKDRm7ItIGW?5>gP`7rt6_`Ln! z#8^PR!lsS5)8rs2Lje$Mj_aewIYd#!v1^kBDNY)~L3IAEeIl-P&CMV*X^Jy)n-Z7O zYiNsNq{j^SQZ5PW!0?~~hyZ2mr}8#1C}&!Zt+3vZv2eUXQ-ou@83@#crZF`_gm+gc zBy`X7xsOZDYB62{UpMd`+_~#{6(yIT( zN{+804^b+nzKP43bkiYyAv52Dv=ne)KZZ7E)O?4a4<={hO8jEBMs@vCOvFfoBSC2t zpp6Hy%H_2|wJ!l&wjQl2=5m28ul-wHm_1#^^fgx)i=t^iWv?E9z{k-Sx+`;$)GaZJ z_*GFh^EP0v?KHZ!^7gftfl0SNtz+AS2Kn)Tl?CFZ-k{Ieb)Yd>?pqu1)-D;2#KpBq z%(4GpRmp`FH(#aBY^bh_Kdbewi*Uxx<$fStp&a|Js>jAA#M^RPL2UN?voq2A{6iPz z6A`g0qqH5^>$KmUhK6En)QwdZO^;zvjCck=QXZ{+j&5}gg>c!bIu_T<(;r<;Oa zMOO3AX;FL^#D=YSk#Tk%S+ECO*v*|7j1k~I)l`_po_hX-nrDUj$Chm+P<*6=GB@g0 zuRGgiYFdY{3H)Ic*n;7J@)}8?LJWL{L=op*PSsHihE*)w6S2&zO@#SjlJD12PJJx{ z4sfK8SPG&TZ+ZSIw7y{f^T07MXg^I_>O+-<=ycU?dds;+l$@TMkmAb(Y#Pc!Q8L|b z7cI6LW}*+LD&{j4Vo)hULs$+jr#4c=*C3@wDBK~U5aITvGTA}(Y zKeo~lDUVS&sA<=W{y-E*HKGg5`wt1nuEqBvF*|^c23ia6y0c{5DTtk{aiEh;1XP7o zl3v93<9_w*Nk>%(a#aS*<($f4Ze8iO!AlM~TxE+6v4D}*%_gpr@1;vf6j(g>eDFOm zVcYB?rtQ0ucFNFdO;{42=#1|iACjR?N*HNP4V`CegNwh@gCXc_pFL5!X;ezs;mA?K zE)2Y+J|X4t-V*1&E6v4`Sa+yKO84bZex|lVqhMJzvcVZsFVee@z~jD-6w4z1Tp;;v zc$&^9s&&QTq4_bhNj;+V3jL6OC-={;?P(b1h<&!amUEaEo{iVePa&C=AB{VS70X?w z73N0^H@&aOZi|K32Z9p>B$OU{c%V(xFMh$->$G(ey7e^Y29Z&bN=Z6c_M^b3p;=Wj zB_bSoh$NmnTReQ7{tZV0fI2At#WsxBJ@v9cGW4+|PHbiwmlo=fuVW!GO_{RiMUU|3 zQwZ%7ESV2EPBtFc5;gTc?gRH9XJknz*9}!0;<<8C=Z;>?Y-%!Tz!z-tZ{74gcXsRkZ|qS{kuhY5H40_`TH@k;-bZuznM zcD6TCAtN@==Hqf;LVm9ga?ytOysjMhnP}sE{K6tA7@a8%oG0PSBxGTYU3%e&@Ge&nzm?PY2w>!nwQBY`0#!XXmz$hbBGKR>2F+)DDyJ zZ1UDvwXE8=J%sV%8yeLSRCzU49;@}kUO{P#uRAJPd+405dwx6}xLIHF85k*m9^}*w zC4ON?C&6n&wjM62-zDC0)LJrv4FgoovrtmiE;)xj!UEGFZgwh)IB3CTvRLI}S4wp~qG->az~Ml6NT)ht(y2!eMFa@Jic-jrysK zB|)MjZ$upEp9E3jDeY+6GS(vFJHbL$#=~KAe(;$Irpl1``t)nh?P3Z!@o3grhp^;L#sVFmSX@61ihb_|ruV|2NfRJ&)7Jo)Y#4$4WIj0C1U23lh}KlPm!De1hA(Dh*) z6d^Gyr-blv$*I*3>TuplKABuEusC=XqJToHWgD+tyYU{JpHsAMTQlR|Iz5q z1u2#bx*=rfBOdQ_sEP0*sr*^^8f%YP5^@xEm(*=@Swu zZPw}Qebxf8kz|);tfd%8ZISy1$r6*!D0+17-MeA9ef^f_vX?9N_n6BcHzt*7#cKZO zLfzMa!q7I1aKQU7>a-!B;T0rGcicoS0#ZsG$1S2()k4J?UaWm-+D-D}Qw%3^ZiSgb zKv?lZ=xV-RNtroMsckh2p; zlaA&;~3Uv{e)+BW5UoM=+YW#$-zL_DBhP0{4Nu>CfB1 zc0Qj8rE7h10=fnE?>q`~F9th~bisYQ-2$G z-EV(m0KmI!9-F6Y_R=a2SRkFccR=>&ZxdfvU+YKtb;Rn!#!endUhEdtk%^mJCLDG@ zTy*O~RRejxhK?co-4EF85*>P5*A9ruIep-%FsVqZ{B4VxnhqWy0u0!oB95}f(kr>X zVJKf0R&sj&Ub*e=ft@xP|LLta_065e1lRa47Mt|LFyDRhxma=qDxyv_BG z0YiH9hzKCPf9L-^g|`a}=bs}>%_)PnWcmXS-zTJ{9(-8J&5c2e{dkdZq}dN+6;yK5 zSFk)&GBQeAqsP-9CM0wnI)uxMKu=`qb#p!Zd*RW}T z&s34>K>m)RhXX{3u+b&px8Qq9zg{2@0BafgL?j(vwt&y8UY;VYXTrEFl4ypk`?Z+r zPuS?PwA@0L`d6L*HjVmSy^g$EHN84%U(+qmw5hlHDh9Wrh=A!gT%K2~R@+3}nLNyQ z-NxiJv>QNA`C~qP&3~UbZufmII>K~onQZVYiDw+Gek#FKLD5ekWB3olk&9J0{U^z3$`{v%qSv@00Wvn-$qqmqGS8c5 zXgwT}VFatzC!OzLbR1cIrws=x$g6kg2_1<44?-tX&LKl3V<}b>34>uLmiln{fsUJX z5C~Z44E@Cr-?8Hi;_Tpygs^;PQDVY~&cB#d zLW1mdVDQc_NQC>xORPZx9{kW*R)v`I;^miV`Z{n(5fI}%2WV3Q_@p;VnvPUXc2)?Q z2-tqjiw^**H8BCyQaJk!o%@RUSRMcUA)`(Sxns}QVL@%ol<5ieb+*6D!NJ>wUIbnW ztunOzr^vUtt0ly=uaHYB#G<{$17&znf(Uob77-Q{1+&M1wotruBT11#>}(Np0-Cbz z8i>?mp2P~Y#2i4U3E;Rc4_^sDr1*+3O$fI%t z82+5So7uPX0WB%q-tRyi0py$i8qYKn+<@hIteDp5*ef$*bB}miVw-}tLDLnZnRcAR zl(6!}dx_vA0f^FET@HIUU`ujB#YI9A0AA{sz^{}805pZqMnZcE7`)W8_r3O|_IzHm z(`mWkx?Kb!q|37IR;&|^WzTBXvD?xIeyM{=p!5Jjp4m|yP#PZccc^bR-K2JnHA%&$ z+X~NDiL9w1@B5^o9^!48EkE&{0Ds}ifZ4-ONyd&}kvL%8V5nYaAo?(2yL-x*f#by7 z7onK0@$xw|>Hs3E5MfP$i5uTpS6%Lu*DtP5qGNjVKhjsDErf%fe_ueq>KFLRT^**P zNdCF{*+UM(mJoRU;cV#NCC(z+%sTG*yc|igzVqU#g{y2j_r}>m_!|UQ>e9<lZ5+9Q1O~U&`xn`{#wi3K|jm@bNkYEz$+&i5C##p-2~_>~E-Yf+HB` zq_{oQt4EYfT*|kti+++ogopvbnqYIMFazN~NCL`imBSIa&`|h6C__1_qax%{sX^WP zti-(b-vG|BU=7D*i_ZMjmo+S!d_H)|Tm{s&?C}Q!IPpf`rQqfQSgB6T?P-e}v;9F!@jvkmqD8=>C<8)M0X0CtOg4!*|{dAjrRI zB_AP4e(QK>Vab?z>Gyj`8WsIvDnbEHF_@VwJc?tXQuFugiBjb=*v(b}` zu@0)@HEAlN-oR?jdu1xM=!NzXf0NLNb=x?D`zvIRZhFd`1FkB zH)K9LfFU6Q8-{07ZAlI;zC3-IH!}`D6|r2EP5phN)}52IR39qNzVMLyA~QQX_SX_) zM&1#10>RDcB_ZrFenC$a#XkwseKnoGUDvo#5QS-3eZ~O#o$|lFWRIgdMk8ja{_;~s zS-L&~3Bzj8@lo^tvzWm7U{1=im4yc>?u93Rwea+LOYC#-a(*upJ=~gwpCg_)%+RF z8cIJvkda53?J1P1h%0L*!vHvqHbuFkXkogY6lxg8!!H~kWZh{uR9glF}i zJ33D4neB#~tt*2cqKkb34Fo@y~(nZs~aDaaf#!#YYT?fox`xp_ENtwHuC8xWvAISHfKIA0GW4 zyrXPrL!33EA)l|(&l;)B)}i;fJo2uE>3)33YT!RdllZ1HWs-np@%Q7O>#LOCeV%Q- z|NbGl9R}?8AJh!}s8Hwt4T;N~c#hsDG?;wkYPgPtjX^=GNf*0%ch6IYld^OJbtepf z1D!B)<;)GV0~&=^Xf$sC!F8%M6INKGfJ>8qUt&{WX~$##7;nc#y{Gl0;RDxXz}`zy z78XC_zbD)$O zk}xm+_!vxWdzJU2lwSOd`&MglN*GWh^Yp;EDnn&t8ZY`YOBQByC2=xPW7Of@u4`7P z%&XdRY1KR}DfbZb`H=KmHqW7Z|49O#=aq8AJn4`)B@I4&_~E@9npL!T-3>pDJ3V@Q zmCXh?<}|y1W@V)a!|Y-4_LR+kZeEp3q8$QIg-RE1m6{7((Z~3UNQt_tMVSCDTemX&~x5fZqo4IOuz5y zHCfvFy=6~Gv!GH#CKg|E{i`YYS|p}HWu8HoPxm{vhJBOCfGNXhe3}!Rd+>6-ofgyE z5^^}(iMU$GoBd+xX18|czf{pQ4PwW4CA@B0P;EVZBDzxRWuGiZ%AV~4M?#o-BDd1~_uhqc@9d=tZh|)#P_vI2>7Bk0cyysE@vr5eWG&2V?@4BZx!bMd@ zqmoSPZgg4$Ux49F5cxPw@Yi3%sWE}MB2#z5%+cvFw=r`fE)4$0Lk}>o4Nv2LS&DvD@Cj)SH!q-9DGZDv(xWHjUxXs(7zm3FqAOPE z$p5A4DI65#(U1iyKE3N4f&qSM;Gu;@a~n91BR2}G6Cze57V3by%|s_1Ih=)Av<%zZKauimKd zYk#=uP!cf4%<;S1EbGANxZZo7U2m_@#+uK@>6wOtZYX@)DwQ!pxKqYiaX4(E>2_O# zz1TTK!}2?c4YAZ@69~^9?;c5ftg&Oh|7qQl0T7Q1yM~N~AD=d-$U*sh-!JDGzu!pT z6J~L3$3b-tjc`T8vmueuas%vAw{SwxdqOwpt=)0V!9TxHXRIkz45ZjQ?%v&kJ&>p2 z1=tDtYMd~o27&l-n4%UgjRl+ni>9jA$@7f?zFIgdm&G1eXgiDgEdUH3p+PE?$ckE^ zA<0D|QU%Mpjs3BHPb`U_K-*Ume_dh`oGo`PwbV}wIp zV#)y21j$GZL{qNMhWb=A`1gHeG!1Mwe*-(6-*4=E8pve+5RxhMd-QLxr$>9esUMRI zqE%67zdM8Sk>Z>oVOn=WL?DHUZ>Gtzg&qH3V@M0ds30)=*4lzJQlO|cF8mi#kb-wO z@22_u%61)oB@#t@ZUWZ;9D&pPj}$jD*90=H%~3H$HgkjomiphlRw;`t#u%;W7d;Kh z_y}E%S*m*|)_1wELbXg4c?N+ju~3&3@nl+Y-oGRVF!jGZezv3jTRzarv)L?ICK}*h z9}pcmi5ad7iS1i)UCiWCI0Hd1r*Nx~e)5;6xJ+PJzFp-092k$>fJ#QZBQR3@B32acDv*|e6<2`Y zCFd~nPT6_MsW)H5&dtp|sJADP^EDfZ)Gf8Kg4}y_S=I~XFBw(;Nc27{FJE&;dj99+ z{k~aN40E=>$+oL#mIiKdtaN-2*D^&(w&*gDls>dzQ#Ac=rW3N`kA(yTLaos1#U#Y1 z)Cn#JB%mz02MlG5i^1}dHAt`bvGg?ILXWIMAu0k^is{$8@GF64Tu%MTR=BE;hv{M2 zIMtu;O$kK2KT`PjjP&gPW8$EV$X3nR0pDcXp?Nz!)FCG-)o6SIY&VhQOu>i5Fh^gp z&@r2O3KOLY4}D-aGLvC`1-YUnod|p-0CHjbrwmSBxAqDwkWcvhGzVdSczYR~GGCkG zn;2+<6qrJSrr!zBm#d_7HN5WJV)*mZjWho5wt@S>b`SHZqUVzahz=G;YRCWpd=eX?nHw2hizrmY12jMl# zMu%Ft=myaZx(E0yvC?Mha9vza=E-ZYU%cG!ZzC7$3sk&M(5&1!9hn6L-^NJx@zUv~AIhv`E`j!;cC@k$vJbXJ7MesdRu(AY}IQ7@=qJ z<)?cC)9gG#W=uJkHn@7-)kjk6#o7r9!sB&u)yX5@(-QLmETAL6RBeeK_Gyow&lRRm zx7tT=@R^5=5xz8r#j4H~@_~Z3q`h4`M&Hy8r7a7m`g9w$U81U==MA96LU4)N%p$Kr z`2sWO_YWnx;bD|gC?bRv=gT}s z*NETH*86{--Oeb8QLBvi1&+L06c zZ)`9I=8x-RoO-kMY2C$6U0@qF=f9FG&W zhF-RburUj!KU!udrATRtEf=x7`7uRe0UVhA{?`_ZN3m^(VlLD^Ng?Fdn_`l}`{g%y z_M&n=iM!*0`(L5e*s*r|+F^Hw{^i|HjX=9ii%~*T7b2YpfbOy_5ZGZK-wp*+DGZ9b zFx?8=Uyhz;rWr$TEbJuOEJ|fPS~px9_m3Z*oK2Q0ICPy^{Sd8xox8cze#3B$d(3W+ ztpR1Gx8_A0pX%(zrAz@8x!>)Qvs#qRKcKKIaCGjcZp&Kfes z(EHB#j;&+uZk3YbN>3~I!mu&?i*PYnOp%ER6r^lqD!ae;8GbkW8~1a!Ue;q`Ofr31 z-Rw%_u5ZZm=N3De;at10$Uc*J8Jo1qx*tUoizPHuG(n9PrKh*Uk-8Vqxv>XlU4R(Q z1_0@=Xy`2wKq(W|RwdrwX<4wUNZEA789e0#aBG7cg|Ma&c?mJ<*^Hb zqOJGm@Dorm63R9%gGtqtq~He={)4l>0;{UNe-~w?AU6ta16HoKRPYE8r;@VKW0sgx z1i8!gqufU;S-3b~Ll{Hg>9Ecpppkgo7%@~W+pfE-p4!6LM8KesvGNDK-5Z_vCK`t| zr`2i^(ci{D*WLzaEaDCYM@<~eafVkJ2gM^=P-T!-9WD}NO$x3wd)Mr>?(T$5#?0aO zt2(2JJP9rS(7v}{!Q!r~XqzI)ln=JUcSvn^5edoSZrxjyAaZ4W7;{bqD$3bhHI0 z#2b*{gm5e|l*mv8#q)}!pS^O75idC$Ro;_h)>ULOuaq3NXg)s&?c8@qE#3@k9GnG3 zOYbZ*{Uf@)r#p+W9gL z&P;VQP!I1)iq`ch6PM5Rvyt0?2sLCuJ(D4I_vfDHUyA(llxnAu6+i)mNLwvXU+ub^ zj9v5Cm_VjiIi?6${JThXy~7c%v0)>+ zL=+-*P_%8fx4mAZYD!K30*G=BIJXgYv-~6z0atd`2I=c&0_b`6KW4`|X?RfPcT*Pv z$sn}Vyss-qKV{(Q5NYl9Ak;y3VGGCl6}g|p;OVd<9VmxEZT$x5Gc)Qd zQ3GDQw{)Dxou&si@v(>>)l7IEg#fHh$@&Wc&@|IAs)qbL3!-|(Dh>7JB?_IES*A%^ zg`e#a%h^ts%tYOF8D*&w)x`ku8{CV-pJV+VE)!SoQx#8?b&mMh4Ykj2QHSSz!vC$O z05yEF(vy?5>w2Tv;d$qI?ZrOpAW;fdC@A{ z%>Fe5uG`!b;XFS#A|9Ht0~lBl1*Ko`(>;AZ$g8`Hw;p}%0`d4a>B@wK3UD?)o(H=l zPmAfGRndn@ea}`B1NwwR|3Xa^+Fs5tSU;3;p{|=rg?~zq5E5nhUynV*o`7pri(%3p zXZO4n37l$QKgR5@xyKnd7gLf81W2HRdQ>!v<1-cIuJlw}i)!dbWz`Y)2($0{W%OUxl5$L{U4b$ z(;0x;9M+Q+kMK*(O8udoC|B+fB9t-JRQjLM_Y4aY<;JkW6L=P|GSNow$DW0H=m z)e9jw$QWr3&<1PChOCj_Iir^X9NsUj3lS=v*xPKG&!-N4Hr4zWkqang5$(pu)M9r< zGq5sc8v3LX-r;er_vCh$9pJ9ywP1#$&`7QG2p*STUok^l&l(Wk3^sj<+{OiFWRH#> zCFeZ6tc-fevO*S#X^qp@pcslBd7`nq?)&21A0F23Y-67wC}a*dyjHOj)&_?U$`wh< zVEz1BX-5`3x`L17X$E`=vHQw}v+% zh{5OmhD@y>+F*-c$fB?x5L#KIZS)%)zDWlbvVX4r)5z8)3GZ}THtV8EEs=o#;S_r` zX{38gK#wlSS^S|i|0b3G+6rbR4~K42(=z@cAfEJlQpuO~_Q>6^|1*&}$ByOU+b7^sP=-2`CkfXB|< zzWIIsWd`^qP`$WlbGvR|LYw@0qz16AU%hWc0J^xPk^1?sR?#`$1O+*jj9gREA$1 za;Nm@Y{Z1jQ$<$`mHn;$Ws+SP1_Z+o6F6xZ{rDp!LQ0Kk9b6=R>rq1tgq)%ORJKS( zHgj=vx`-WDP^=)b2@+t`eumG!`{MK^^yrHrjmfGYDIHEo-wV=>us+4-!TlEr>XR2) za|x#?*Fdfze(A{nsYYKUX3vjTVge9=4W-gYpshxtV}%}TND0RVMy_jKQ^a`c1|^`{fyKE(k`lTghwFId?Qr`tym)BO{UizL5i;*!i5dVzcYBBzNi`C zMrTF8U0^$O5tt=lb?lF-F7N{9aEfH7f|U?>_LN?C(h29DIh8k!=@~YZcGfrt3$O~< zHfX9p^{GF|w~0mo>{1yt`sZ~4d%;E)pQzF2p+|Kkp#aet;GO$GJ_s@Jcs(Y>31g>8MwLL z$lZS28hRLNd%_{$N?oa<%T~f@Jqh14G)iYO+wL~&1lY9gJ zWL(OWun*vpjFcRrwi_{&(}mHu7TR$C6H`F<5AJ))E?)Id z8rrcS#}4UdajB17L5nEPC0kV$?~Yo{r7eBmbt$Q!v>o7#T1xW)6DIW;yr)#E7^dl~ z#p@~Kfrz+GEa)4TTZ{q57r(+vz~3`Kvi2biIz7N`96ErzqKO<05W*8L!JKsh*1i9_}AD?|Y+S+SNpQr;&D5 z>i{G&6AP?oFwQ?Lactcyta~JP-w{wx{@tD_7Y5qUIBoOeyeO7W%BAnta; z3kbCk#1vw<%#8-H*1qy21TW^j|3r!v?f5zDBpSl5rahPs++8Vz9M@``oS?f-`2A>s zG;Ism7QcceP4UzS)FO)Z;zM2haDX#|j%C4AS$$Nbx?H2$m4Hzlz(xh)vAU$;d7rp` zy6W^-@Y*eOu3*Hvpd}o!99VX58!YfgZ7T1CP}t9MAVWa1?hTuyVWxqc>^mQC54o z!V!5^EXn7dH~dvx97YNz0^S07)vC|1W3VCIaAL2n*9#5e^@l&@HpGkB4W^XnXRb|+ zeW5f%3YPA$->X8FTLg|1Z;+3vRNeq_qGbyT*KD1V@eiefm{Wdn09@a}oH%P*E+1H{ zu2w@#CeRmeQ8v0YZtkNEmmWHyE3vl3pt$&Nh=4-;Dm@oeZ!f0O4&Q zeYLT&vre$XCQ3I?*RtV`CyT|=)AZ}(p{-%P4;gy|{ZH?0I)J{<%tJc-v%6x%G*xo+ z`J$#jom#lZf)?XBChR~6lc1xK2e{>md^_bggmdVlGLPht!Wy5O5n_bB++GBj#Pq)= zNpt9PjY_K6rUv?pl5{&l^AxK!K$rX_ANAAD+eMlZWX?b&Ulf zuQpSHN9D0=sf#W|%p2D@!fq2hS>1Zf*=EaPbVkmu<=P<=_q|h!i>W3x$cTs!FLTKX_Z6o7wa4rQ5dXl{$;0a$8VLf~hnH z=_K>a<)s1RQ*QOJaYSlVx3{wT^qr#lHsq7D+AHV!+=v$Ff;^ z{2*>Zb$+N8kL!65)a4!qNM6#E3>FYo=dJbwhFBgN#aCgBk7fj-uDVSdR5{f)w`Cu)Ixu~tQOYQ9 zkhkItahJ{b&8QfP@84T!nNuuC0BeOX(>pM?CiZT1J0AC9LWPoHbo+U1!J3J+B*}eye@<5kzo&Ep_9bl=hA&}55&(S(8#59+b5@g}kx70<}h%GTsi3DtyBYy$67!|F$JVd#o+>Y*oEoB1U&aY1g$JCl!t7rZz3P`iL-E#K2 zCq6zUz@2&J^@Ig7=_Cb%2=o7rvg;Z~BuI{N%vt5u-f|UG{~j%11+u`{ldSZ>1TACQ zDXOYtu&`XKvN7C5H`%NbYd9~GYP}ta`9y7?N~d&px1@liboZg9Te=&hyBnkq-3rnvpfm_5jY>CyASrO?@cX`X zzq{@~E*Iy0XXc&RQ+q$pvoBy&^dk$WOKD@@SWB8K_W-m3omYfXOK-UcJhm(%C6=)J zy)OBOjjHTIhJr6tTe+G-c?X_Og308Wn?dH|6x_8O6r}St0qXE4hB1d5jt;GDAY^$g zj1Nh$m*m;X9#*y*4{Uybc&Ms|zQRA&{q_sTP7yPowtk#Q$Kw+ajICL|KlaqJz9KPc zaKoT*@62p3te%6`5D$YDCtMUArK4-1ATjBTw5}A4-ZD%(H-76mFD}3T;>Ii5XlO2U3 z3Vg9{$g;5LX+>o%eNgj)I|vU~@kn+=(P1pFtf4ntTese=9+Z!}uveu1a6bIX`Eup} zF(oT7R#AJ=ZYqhSTq43Oa+#Gvi@zbHe)nswS)^2-ktPe3qxd z2X0Wr)eq^4$L~_EEvNgcy!ECuY|0K7HSz zU;VwV0NKYyw5yqKr2QugR30R-6y3SY;A! z#rxD`r@x|;nPE3qmreroNXOHkADZ%Ol6e%e)Ofh1`8wLi2Un&J(@Lk8o83m3{5m_^ z>mhDp@@%s@4DK4t0wueLp2$DiRdn3&GZF?r<)J|%bc0v9_3pQ|f?16H&zI_5*e~x! z#f6tmD@`f-J8paZR?|8D)qhSS4;vFHZ@;rBsl0s<$;c|8#l)b4KwBhdmG-ugm-&AM z-Ci0*h#;TDX@s9+BqZh6q|R%(9BSy|dnM9}Z>CA|IX-QQX-StPrRC6WB+TGB$Iowm z10u!H#uP!p!atJ6ez7Y>$>gxJIL&^Bazo;mRS#{HJu%tia$L?*QKY^@XH5s1yZ#r` zR;(}4WnriYf*r}TcSk~9>Q#SYE*LxV^v<+-9Bv6U<$1wsqt0M6%_2~O@~-)!51GwM zMouF~kvv<;w^-6A_K}RE;gVRtYR{p^@?hJy7{gf>9JK$XSA9em0)X1&^7@jVGX7i-@?78Vz&1?AXmsH$$RaHhP0+KN);(n3) zwN*f{ZQ)rRZ>hqS@CQ6?o@+S$y~!`0rw3PMy7TjYta2+iPcgUN;M)PfmQO%??Kv}R zMo`n`L8E4&%vK4Js!BRT#07pCEq+Y*+kQeyG($6G-x?5GeaH0{mtcj2l>{!NYn$wy zJyoxAeAglO!JWo6x@(-E&h;h|!Cg<;N&@W!EHW_ zwxObNT1_q{>EiDD_kE2s-D1rbL8qqr*bkJ5ZiT$_F&bIoW*lu*=cH~yVRlbswDGeD zz!iZsFbU{^;J^?#FP>KxNWP|-?D!KMuS8H60@aMCrQ%kNbyXeB()5evVGwar2-bJ` zJ&Sntmc?+FBbn6gld8tus zpMl618#@`N8Sk#ylP*;Zz-tMZGUndI5xo}d81K9)UezB8n}<#JuW5U;P+zxrJ)@9|N2)=o9*MBbzOUm)aOjB~&^)la*gDs?YV@A%SK z(oH|oEUzhPW&T=C#lgsS1=oYMFw#OtP0x*IvRIXf9?eKY3>ws_0bEoBy`VYtWL$46 z6yM}F5h7fbXYg_;hL^%k=`x5|MDz?vlFH1MMKXfWbGvPntcDdl6WWY88DdRAEjUOc zQ>TRG{3^}PMudkY{F@{s7jzpilHwy1>=3;H+&v{ZtnH{wKT@zEOoG zDL%uq?oPc#t*!00lmY9_k2@CXQ7kM{P2H@>BgZ5{Q&pKBVE%(ngxd)o&Y13~&XtU2!TSCup^0e!8?rB| zS_MwT(#J<;EJv7%-eROOIZ`E+e~4Kk_@-n(mB9ebhq~!_pVR_0F|yUr5W1vK2uD6! zf;*rnUo2~wEQ^uXB7$?uJzD4_o{-4ZaBE%nn-t5$Nrdpy(CgAy#GLNH+>jHXf9TV! zKR~?TWU~+(9@KfroizsZF5*U>XvsF_W0)j&%EYb@7N(w6aiuG@bLgE~-%U?h%Qi4~j>R0G0DOZjO}3PnMQ@h(hI z+>HM{3GkFMZH^hxl>*P{>PTrYc0R>5Ttf zIN6-H#7voj(H2q@4iUP3md9iqy4K)huT=ve>M#9x^hu?{1zi>buwNCw0I>0EoL>zEmEDX5Uo$*; z^vQ(zO->Y!bo9kOvoqk&UVpB>U)nXuPK-*k1ur6I9Zc;LGNEBrE=PR( z3EN<+)(RQcs@T}NB3JaI)5Rbbiig>Va{feh7dY>yiYTbhY$=s4{`FJ`J;f+-xJ7pb zO)V|8Tu+@F-KxWgfco#&Aq~-oBVh3bwqw%qr7`%=X?+3}W1E<q9sZBsDU;nX?7!lcHC}R)p%rpF;rfjW>hy*kad{Gf@Ra;hAjcp6b-qi^7-e zotQK}@r@KLNMEsVhLjk9NrYb!wtaMTysWyR*g`|oUuDtq9^^+iZae+^a-~GajpQ>x z-ixUb7x2y!_XnXgy$Gxb9oR;tA&{zmmU3;Hs2;*?>Cg80r9-|jDvj64_^-u&TW)tq z_L1-BINACpC3je#KozzKr&`@hkHV6(HxPyx!|9R3NY5Rnx5ozHSdOlz``}9p_1%&7 zl?!-bEy6}W4w5f#%%2}dKomlA?OI*e7^_GaFoW9!xsk|&hX8*b+016vhbRWD?U+y z8|bDo;JTo_#G&uYYYiHkJEP1&!o6|*YDy~+0sHDw42e&UY~P9l7(=CPAfj1uNE5h% z^(2WsWL1m}bc0xr6AnV$5=uF0Lo1IdTbgsn>%oiI4}uNi{jkC%Ya_(NCzr01$GEH| zV^JR`?g#T4Y-&!%Q2Ig6$M=)t73Q)+GZ0Z}VKz)d@2uIaVLzU@9DarPIKrCPGnjJx zGV)ch$&5lH15fAdkdeUhOpEa|y-^eK866Fnp2_uaGwZe#n#_-bE3YtOxzK1B?t}PP zy zO>e?quGN?cP?vD>u~r$I4M^ea%jRNQj?M;CUhKrdLX?9Cw^WV=`Y)6yXf4*=k-ERl zy?^B)_B;V6>3fSXzMMf)S8$$+;scLkgJRp` z>M^jUt(D{{)+3{{{iAxLSew>Z{MVImQ?+d6>`T7j-YFBRRTsOcBk>4L_q$iF=ZRDX ztpo-facbgFr`7+)9~){1gdqkdMH~B{H!dAl5uGjuiobVcOMk}sQ1GG)Ux+-et0J{% z;lI8&{mR1&p=h9L;}xsFLbur# z=lBx4XR~}ZCvq*05W8k%AE1FZ`l|Ja$n{$Mm zehWCWGa9Qq;3Nd!AoQdD;PPdLquVEI1c^tRDD^0++8h*GB)Z7SKEHt*^>Rm{%x_$}}f?^S^%WEK@z^X5+gjNUcaAd~;&J_v^rSrUY;fjd! za9&-_RFjUXoKArwW<{lB^;RU8*a-*`hvR1g6pQ#vza59^(>~JJKN-2sBHynaJ8O*j zrmAleM;k%4(XAf`8Oa@kO`D6Dd-q)U%rTvfx%MkZd%$n<_k2@S_azs%Rty{PBV(?Q z9$bGy+cutp8c1OR}sv5nAyKDKc`e)sM z`)DjHQrID)od(bvOImJqVlQrWu4UWUy8~u+#Mo310u@`2urC(foA@NZ31v<^EQVWr z(z0C6R26J7%yGCKpopto=DfN&sSl(MyvwT>ZMCQBKDxG=_l~gp3otI=MnzMwa(k2; z7;_ePz;)T|JHB#@prPU!NHpdTJES!3932d3w~Ushr)GV3T(v$iUA4wvw7geCmyP)$ z7Hlxsku(=>7FZjZ!$LmPMTxQ_Zyj!Dq0q2=%$?yPT5cBcaBL#nx-7Qc0y(f2jAveM zDPlIBPx>hhY$sJ1$ezSq`0L)t{z*-Z6z@I1soxS6F#6t%IW__=Kza2FJ~j@^%4WT3 zVwWT~CIB^jFl}~Lk$6C8Jt`tweIR#*XhJo|aR{I^ZkERuP6AfKYCJhZ7|`5H2787FUaWXn=?Z3Q-C^}#h&vcA(2);?eb{(5 zuSkrxIMj4l&G??v z;|Re{Uhl%)e)jv0hi{Cj^qSyt;KvW!#B8!{zN&l`tA3}YPRqrR`J{&fivbr2ViEP@ zvsX(C{muJVA1E%d42jCS%RPkrE)!_QY~I_hdenLPfAI9Vw&c$TZ8%Nb*8l#~D|gn5 zvjUEuSOS%<%Kz$2DYz&edb}2)Za83$(?Dgqs$)yB8bEQJ2AfJ4r`J-gtv|eaY@NF@ z(4wDTEvXNogpxO;S{0h3{3t0oq1JEltsD4oI+Ty01e;i?SBE{$UVP8-jFy@qAM3?_ z+VR`9Z`rR8$Fe{jH@-t1$FK%9s#+X*#eZZ~`%tX|bogj!*Vi?}9y`n_1z|#kl7L@?8QQ2|zwF0pIOwdl&{fuXg};?=K=_HP(Robz zv^`8nn-Cy1Bx9wPx2d8N^BZZQ%vj&O=3m?U@HE#88j?y%c~?0oz*?DAu3!9=Lb#++ z6UzMCbF<~lFPvgk9e`Z0gh>l|ZIebLK{?Ulf*srD)6?(6p(45glfO(V9Uhmx!g76W zT@OC?%nV`~-x@mJntLNFUf7f-ChsV|IXg0Rc9FeNPbUKUJT7`xoUmVfx(w$zmt3b3 zpzcE)_!&*>O}8g_0k)4DocLxJkPPf2fHR+FW6j9(Tsj+QyT_^s) z9w%fm0;ZPt3r5%V@R;zJ_MMu5q4uKIEhDsqB$rm+*0Z6FenO!Sh`Y(%8GA_MAqQ=8@2?2 zcyBx^G|nFO9ONF{G{2al3OwA6t%5oZU`+R1M?udfegE{LOW$T5$SmKm4NhzC#L$f~ z#%%iz2!k!ZYY8ImQZ0l5TVBv40A6$leBU}`xNL6i^Usz@Fq0PVbgC<1;|nmNx%w|^ ze1DwGB|0|AFv)fjNF3z_QGd1Ts&e{;@p@oQW=62_aL4Aw{H5@^G&QWq(Y(11v4=$- zn_rpb`rm&G{PfwME@7J_q-Zta5c*w-H*}oqOwiIi7HE)F$dJ2}1_rg0F5VZ8Y6-s8_%~1qBu9_hhwLb9>K&=;`A3-vHJ% z*@s~2O2v;cX0XP1;=H*_4t;2!PwB){xuESScv8LD~L(lIspJKb2C|hGnHmK{6M1O1ma%RqDD3uq+ePhBIoo2CGR;M;hgcZ^qJphg`okdKxi8T6fe z9M4mi0$NG=5z{DHfZ;RE0iFUO7qzCJ02ikwNH<0L<)19 z6q=w&cu^D@L}V%lD`8QZ!KtEnb0&L9c?k(E5+*6S@GBBxsdG!%rVTUT;>y*R)?Wbo zfb6l|gyW_OJW8P0Xm9$ZExC7hXB%sh7LN8CLAKd7cGyqzkgrp7x2+p5I2u9c5n`Gh6;f)7FC43IrM#O zP`LZrJ3)c^r}NPI*kq^2w-p@+7VO@nK(O(coE|n{R7nMR%No8{KRn5pU>;TY3MV|N z=MMO=An-e{EioADolxfwxggPGjQa-Z%6x=<#!{^PkZ9#H4FL)!Ex>gB8k>HTlFrgi zJ9hsPUtdU#RJ@lrAr2O+E}pp}HtL*rD!Ni{(|OHXzuHA)M}J2MV!7I_Dx>72(vKaW z83YNmIsK)s;*iBpSpDVa&2*9bG0r0r&N4mB#kp+=Fb2i>Q!UA(L-K0kP!y^onk0bN-&^?7bL}Yy!odI zByEOjGhS}xV-H2!558PCW`I9sfsHk+zWWkZ-2AtS?Uy7qbp!(1J~YP^7Lf7?MZ$Gn zTWr{n2S)#s`nxywfRo|!cQ9j4F_*O`nNT%%_@l;%bO79A*S}XkMPAF@m8$fS1N5h6 z^KwO|Qq&nFyVBG+V7u)SVU-`xZ?>y@C9H%^_xbIul&|IF=}HM^lo_a4X#Te$WhL`h zyAa6xl05>nc-eWeoLxfzJA!1ymr8%7h8f%~b_R@h_kvZ&uuSRNL|(YGSW#6I?!Q(7 zs2{2f^vn(e(IioWepZu#4fF%GR9IzO_!k*YC^tmREuJ=~oYhwW$7~UXmAs%~ea;to z*a3)^k`~LyHZ&^Qe^O~B!(f2OLn(*|2+A+N-|MW)VEtQbfkE-lvBop6FoF+L4l1Y3 zlf_$8TCc>ON%_CFLw@tRZCh=RLXm?jcx-IWF^Fh}(5^PMlU(BE>E7TvDyRHuo^yEe zpHL(XR}>i-Lq7ROv8BP{Rjvj0Vhf^#ykY4F#bdWYBUKS-(5F>Ry@$vq9s2*~Twrvp zR8_YWEEr2n{{Iig?rm65PEBwc+qnigE`M;kz=87<=Xf3WOyHAe`{m!7(JoG$+_7II zZoEc{{&9>7FPI3?vHlx1EDcxYW(kCkr|__tqUr*Sv8W$)8uoMNVkrMnlzxe5*Q(oz zQi)8f;~{Ag3uV{jup+5p%Ko?_fz~JP;}FVi-tD!%l@s<;VzD;4RS)Zd_^{9~$p2#$ zEYHgLqD@zx@UrXUWn7?{b^4PbNTGn(YUe|e7&6yD2!G!=*XxQs95@08>_CY(k>BDp z1)qCBAs<3;5T=r1den$zZCS%SZ0vcTIh>lE@!&|x?}0f-u4soHvU>Z>6MeRT^gq<$ zliZ>4_m*EWF0R`0y5CMvyCreK6D(OqZY)4pS9azm%#U683UkbkkGLY0H_&7ulO=Qi zr*koAi6#3e>OVv$1bK62vzhe&S`oRreu3Y2F?$&U`JDWWW{*Ou z)+{8rkuIL9E=8c_*BaT`t-vpZ{4ha}u_JK%+-%Y7hbD^M!3PNh7ZHx8J&YwbZ3b;c z7YZf@!hDn3yj@SnCw42hPYv}H|JUkWQNFW_HvGPvGh~~URJ=sx8Q|O1bBtu-_4(+q z?D;_N&18qtc1!!TX`tm9FD3g>12BOBhxTn$9v3OPYzT5;ytcAlcN#1@(`;ZYE{}#ekY@LNsUA+*uhKHsE%Uw zB}=$xzl@>7V4Cfd(8}pKwlpP640|Ykz|H*)8<;Mz?8M5oG5VRoD5Pl3$vhI`)Qs8f8RKOLB{LsgixI=BAbr@4 ze=$erHa7SJTvl}L{%1vtRjOp1`)`TeZ~mw|Oi}L|<=6$#G)d6=R)dG_3D(K`_!?6aq}ei)BN3fg&tg+FgAURiNx7?Jv&}j& zd0%@UB1x5f)Id$FPaiW#&nmMSD9yXV~}A>C&0VtrlLrQ+14Vg%YgeT+>rc=h@` zc$87afR9@jUNwVm7se-PP-V&H{qHNwvJro~sL+x;5BCGS)X=*F6w}Vd9h9~yaFmp` z#lDUL&~W+hS5}R1&+p^sAUsES+8bHs^!hc5<%*?{{6?)r1m4_n)APQo5bxoCSY6^y z?|aD%iD^n_OsQytaY%d54Ns&Yg&#&WUuu2EpLt>z!lyAJg}WeRMbu6Hp=r`5nF7yZK2`BGPhpExNy_VJz=2I{@`WiG>~t z*{_CNPW|*a$r!P}akEQWGh2ZA8(al)~(d5ON4uBS}>PQu(MhHr*Fw^Za zuPe(?v0yU0D{r8T-h8f2?OL{J5P!dpjM+fB`Zulhb2q0ten#EN;mZ577u>!_A)og@ z?4#z5k4C|yJ~9dJ164nBe)nHj6CeP`%r;4npI(Og z^ZcG}xAFbWN-FO%T&?BRo@F%aB(B%G9W!5tC?unCjXE`VIY+T8HY<9Itxeedg;Vs! zYrIu<%%~(~I_Oksv-#JT-f=itBJ8Svrw5bU2w&sXW6s+eH>Gz)?OLy-b%<(;l^&%V zO(IQiHs3Ni_+O|@Gow4jLeWYbZ=a!x*zbRkDh6k<&2YT_jm&c_8E8{!&?b*i$2Y)u zm;O0&@cC_1t33P`CFodL?tkmZpQDwi2t2+mAk{M0Y*8W$qvzIp3;{kZZV@e zjC=U?e{hp+jIYV+HH@BDv46cL&41r)hHz)yzY=uCp+qQpo_F!3u!jNN5j3Z2ccAyS zjod7aWF(z4sdMXR|AqyfRLa8QjCu}9bxd8_SW+b2^v>3el4e7Y>#&SU@M%@>)vM#@ z_xIvMg1y21Gs*l8?iVSyLJzL@WclCiUwgc1vmRk4Uw#Rwe5a*aEhagQ_kE{d3Wxi1 zp_NeE(a5mhFR`dFY1f~&n0?>9#Yw$Mz(M*Dlj#%6+BsUSb)j!>a=)HGMTy&KNu#0x zaSgY5Hbr@hjl=nhQc@=U&ARB>!Cv@g!j{!!1Fx0M_;p^;Uv$F|@T8gNr*e}!tXu39 zhqoW#)sp8oUr10@DbM({qKj#H(X{Q?>n?eVJ-|@XZ5`Xkx<4$^NED*@L&sx3+c!Y< zUDr4Yu{oXPn$80cx?YwL0NpM(7lM*%UH%mXnLtt{Dj-a0LpkVavo z(3v$HQ(ZI*QPH-kB*H$O&-pt_*l83_cdaIM^Bc)5KLMz*9X&e1 z1x>Z7DMF6(Sdc*C^f=A}j`W}*+OO5;z7>a&RAZqz(8|T*+KeX{pO$eE=!PS2RSF=~GVGnE2<^gF1A2c} zs9OnUfR|hjoNa&X-s2z!)^Ko#~2$jHpt$s!5W?bYZA>dpzh0j7a`r)ucj&KwJrDegDh&@?ndzdKRkEVT~i^_C# zhWaLjJsUv7V=Q5F;c4Hy&m>4VT~hSo9%r=ScF3dc8@~GBd=6#i{LI+&s}B_wJcoPi4vg;lbckF!eUQzNgU6gHUe1j`qm~T!--j{4>|qqoyTX>DkS~-K ziYwrQ z#_>(=C)a~f=cS{VXCWXAOuC6D1BI~v+-!Fsq- zd-y|lJztM3p>*7-eQy)8V&m6v`k^6eNUk^F&wOT}q7&UyCVc&K@9r-g2Y89>G3V7W zbEE&>OK+zf1>Yrg|47aA-;7WTy0D~aZ5{h}Qa?`Ao94T&yOz5!Rdx~aCevx?0dGj3 zbnk_$J{7sieG&*Ri_oE4SC-z-at zPRSBwlr_2!CkBElfS_jQx}O2`Ty>A|q$Zc2-yhV7V>8{mFN}OMkmBspeMCb6SUe0K zzZ>}FjmC?$Im(@IORMYZZ==R33wssHwYR*lIax5f>868|?RZr7pXV^DSPR){tG%YV zQ8l2uDQs52$OTBOfS@p9{=1;l_J@Dfg1!qf(4RT>gCJkiBbnoDgm?X8O@U|7hr|;g zPISCkfx~06_J1iI`+mo%xseButk=`&Wl_610$`oU(6ll32($Ru$9z-sa5+C0Xx(7z zG3v*QaUk?hSgU@wMdiREYgAo)+mfp_ctoz zx*oqk_dn!SA`!DDp2Mln(iMN-yX+-TCT*R4D+t-6N6#?5pwU!$JvJU&JXQML{KOZt zr)J#k`$-&?2kWadN~^8Q(Ax1S_32kLkC?K|$gNAMlwDz=XKN88K9j476<-K{OW+;D zUt0KOy}Ql0Ot{t6(D3?74Mn4wr4H4gODh`^+XL4+jg|+nD73<9RYFwsF7);LoTm}R za*Kc(*Q#Alqc-kVHEh?uQTef;eX7t+LD{;=PM{)HP>eu(* zt@lj$yy~iCrWLa~m~}>}{unCpO^KAl@9w;*-X(W(Jn*B0WuzI!njgi(r{IY|m)Bw;AvHa+?lqFkf)%vI-wgiveJD2W)7>UvWr_SKu zc_B>N;4$yO=uN}JY#@70h`+R(EVADyn%SSPSWdI^d$;-=K=qng4aO3`wh{$$lgEf9 z-AZdspTtqV|AK5qv|dvaqww z!`=C%%+Bls{h$_;y2AO|Z18h~f@6;3;F}mHS0;3V;By)@8K}7AT0D;a)=X>3H`0Jf zQKK&!f{o|o1m8m#!}qXV#5b~H0e0QF+nH6Eh7zYRT6eG^_RUYNM5Jj#eP*q)?7z)`*^;)6m}?s1Tnf zh+^P?RMRT;Rr>z2$4sbo(^Ssvcg~hcd~+`K%u}Ydxm_17bkAH}cGCN?z%jnk@~7v{{M>u; z*wx|E)opsJpnz$RO)1>RlOrSS$InYdCQ2o9>=8I`ax9()+a;IKvAD*yX zK!Ffib{z7eVj>DWIYVtUH<8*Z78kjjSigXEatU7KS2TMXWdgNq?5|ZPZ=hH;o2LYg zv|_ObWWIX?a|ac%bc4aPg@Xp#GgnBY(h!>TN$P&D9Ed0eNi7p^G&Nto8C3%&v{3x7 zls%%FRRxwYu~Cg4G{qkWw7;GL>27j^7`Yftk=A5x4?tyx~iV%HZzYQQh8L-7Cf> zb$UNcw3%WeJ_B@vQ7Shv4&0c)qSU%w$UOUv$RC($oIyjS94iM9n6n3 zNXd>WdUn*UU796kHbFpiKn=4c*a|_Li$X!%21jarbjavWMTpl2zl?N525_Q}I$Nv{ z`_uX^fOb}m!D_l#LRw>#+Rut%E5x#RJ&=kPKmAIxs}8#?l%}GQjH*+C!A_#jVI=!m zpH~nov7+)0_T2}?8K@>O<)g;Xl1c~Zuwh|eCou(4nx^=JAJ1DBeVSBNoy%$+QP1zs3@EN5-F5jq0c6 zSU7C$-~52QeV>j8Ad9sVFzIB~=K#&c1y+$*#WJwz z^9UOZjOiB;ORF+?Z;9DI)n~t3&mgSenQr^}@vqRUhj^e4t8R}vZUJ*uvYuNcQapS` z9l8Ksz$$f`XZJ15L#M7B+o#kp_v`9?oJGgwS}pwR$(-l>Z@Y?S3HE9eNae|NMiL`j98aE* zBiq$a6;D4zjM5ZRKeENq(ZK0d6KT4^ygWJ9;k6Y8x zZiF5=^LJM5z+fl{tG>NxSlaW_xO%J2JTfpE+Mhr=Lo_H#zU8EWOBfqB68lP9$ zA`O4gD1A5H;)c&dbITCN7V+B-W$W=aR4dDOcBWh!;tD*=?^&vyn7j zn%A5F+5s-oH0~5blSR^dAbi*qDRq2ed7D6S!(fG7_5{!O&L=}M`n4qtKpRnF%}L5Xna9BF4a&UJU;L_?${yD>XKg^MYcil zxQjXC6a$RP@x4U(TRZGx7XG{dbKY{^2_`2y&MZemmQ8K3k#IM@Xq_=MuoGzXQyc68 zV;L6=@C;N*I+-<=M4+MOjTcd-K#GNt`7Km(onU+O52l* zcx!~=mWEr*m2@QM&80SOHU>yQ`83?hg8-Z2u+Cnt**16cIe)M1=2p^3>;0}q#)_{- zbqvW1J=F}g%`heP(?%U&9~S3d*xIVrj?c0DI={t_j+T|}IVCHot(nPCTZ+uvm+G*9*j z)$X+~PKxDFPAU0bWP(%`&6&_(lw1nF4mbE?G$wtMXIdRR->8+zWc&EM&O2gW2K-#X z$~Nnmeg59J_N{PjdgzCPt;=ymGZC9yweukBY(pW=r-AI`awU2FA5Tm2w)yyfr+vXt z0!zrl3X&0vw(@UmW;ra4!?mAr%a(wtmBVU=y$Zp;TkZ{5;+8fo%3dQQ|7@d&5&zpG6f)Xz`FV*#GK8H?`)&~n#cR{A!hkvI5rW=0|7((%9mQd(&vnW z_pO=_ehn0!KS{YUvC)`SKNzP0GHhW}QcNU77m2312XvpYE(<;``5AK}DrTtt>Sh^y z$+Kq&QT}f`jRSm0=`6><_P`9-rmDq&0liK~{?nz7`hC!vXn+~+SRmL)qjaC?QG3C- zGI?kc0#cv!{!*cC#JzJZmdg*(+{VlEW;J>0zB>0#hn?omQp{I^fn_Ak-Ra>wr$Yv% z3_}{27};{ZMWJ0*Be72U#R;@VRqYZF&zA3v6)l!C8$7h+t2I<3GH;aSG=B70887M% zcWLeitb0;X9p!siEVq7@?4Y@WXcFeTonz-WUu$V__4D9hC-$mFHU_B1ck| zxUuXx;c!im!Z$^3S|(kma^^$YJcJQICb=w4SCKF7MMi$IY9crNtO|pf0<&Bgpm*K( z`Lj`_Wx()3;SvzF$4;G(8lKpxaQcUi{|-p$nm>}fz`f3BWVJ4}q4MzrHu>|RHev3d z~sZm|( zh`{i{o6pS&UFT*8&03#v*2T?GK8xgJ&nk^BN+I;L9`|{k{RUF0&)Gy{6&X(Zs~#z2 z#YqMC-_j5dq|)PIiD*D@k*yAqOWOyt-PEQZJ65_7GP15z&B%URTN^Dt*!-+_SK^>s zJpyOlkVKM-mI9J`73(Gt-Ix9sxtjCmMX%!+RR#f-48ycm5-0Y08C%3cFnmUsnBTUu z@H^!CWDr9O!FnZx_?)#@@p2G8499(_BjilZoqT$n0u;v3aXsuRkE!F?A}rA z8;*4&h7iswgfMwt$AN~q$+ok3^F6wTI72H$c@3wd2#xbujmFU4f>B8-(Z5VoShTF3 z#WOXGewuBJH!WI=DxG5n@>=cogTVK(k2;2z@mj?pB=(|@7ITka63Bb)k7rnR+OP`0 zbXOg@lJ24n(uUws@px{;F{v)Qm~Olzd)C9_3tanU2r@YxookA&=Oevp@nR*lAaAm` z-|v#{;L{m7f3>SvzmK?Ad%$n}MMK*)j%G(d58auy8|G;#;KCwUIndr9!qU-r%^%*= zy&JbCf9|_t@pq8G90+UcZG<+WpGNq&7YL9fMXM1shZLuZNQ=R_8fWOymyR=gQ9+ec zBF^)d1l@8K>I1$Qyw^f44n4SA)kdXb#tq~I!IBba;~{F2;S5!ivG*3y(Cl#H?dt#c zifoIWfj}x<8o;7W1$9U!_{VOPtQ+iIM(lR~Tq(pvzaJk7zTAPIwQZrNNY}*}=kcB& zXQ7@1a5K|ZdiK~-jj;Q13h z`YI%%i2$f!OX=&+#vYHG{)eo}6Z2b7t0#Z+0y=rS$nWeNGNMgh{9BKL2`)sx`Tnnr z;rH)n`XdPdvGV5$#kJInX(5t|Uj7Z@{3&GRMOqm4gx9^5BT$jyHCDYn@YzhL{t$&-=X0;eC?OuU@<`ioSiFfbC z@1q_z1O+J*?fra}s}9nEWRHBA>8mhiCe?KFt`v`N3N(ekT31-k&%SP4QDzz)Zva)PXX~*@K-}bf zO6SuE=hjkGWuI*vac?;N06YFD#igif>YGf-D^`TIHbb=K_em31Z#6=J5X^bB)ByEN z?@jiNIOE7<6qnX3GU<5*8g})<4i>IxEK=zV^e(CGvsBejZxpHq#iV_q-BJeO3txZG z)VW>a=1jq1pvhL^2)=|i?#%o<(I5DkEJm%mK7F5 z(8^1STFfw(5EL$`r%@H;$~N)N#2ha%;H3mg%sLn8tw}k#Wcy=2JDm$!BO#?+`~k#H zmxn?m4L58ozG&8@6ixYCWyBqc6H_YaI|(RWYNFx@({|v}G06`S$Mm*Vi#&dHQ;hCh zy+gQVKk`{C?Dhm^_Xo%h{P7VO^e`WQjnFT$;F}TE`V@z+ykMEh8!wQGnvtqDe)}yj z$W*V7E!=AMNr(hoiA_lQ4{Im63J1rJrsBlHRqHs*E|mY&sAdnN(vJgf9@>n7xx9=< z1_LNjK4Ia6NJJ|Aa=?c!r!PR0dWJ72rT=YPxdh|~>~>dW>6Oht5SB0SHL4P%|KeJ{ zRYb=vO!nX^#D9`*2$LeN(IFZ7Om~$*seou*AWO=WR@cPWv1&?oPZnU!7r$o1+=xu_ z_VIXCQHw>p!@}RU&${WI>AMtVQ!$z?>5kYG1oV&3 z+IQyMYZ{S^r>RF#zmSX|Y^TBX@zhvS|FFZ(K+fSy(l++aUHbAN+c(t`nFoVr&s6M` zSebwaKuH+_xaO0n#b>F}_;bct!UI`}dR(nAH{vYe09VxI_+x75KjkpyY|9yua%5Hr zKTuw7`388lynfr%XV|IFw(+neNMw?{6eNPIMR7z(%RWw9Xj$Y(>wtR1joG{RxU3W8!IajXZCcIZcm4ZlD^+{_y|h{YW!$B6u5 z0ER$5R*~t#`xKi$3nw+xqj1IaOs`x%Y*!}n7j^g*YP!O^SDDG_utKcWQZc_^yazZ{ zx7~I|dWO9oc@xUQ@5}`3ZvnOwnByT|PbrQ$LmKByQ_f41Qi*T20V05c68mhFMip67 z9_vJTA)ZRhf$|L3F9raAp)g^@Y$_E1>crjrBjdM@I7wzOpf{=Y3WbF=fQGJ=>^dl= zz~?C%V^h3}iN(0WkGx)ZGPw=nNmU>J`LNEv*id@0HAQ|uOqH^di3GxEW7x1 zpU$3b48Ai#8OshHwr012>y-zDT|TbJSZ+KHnn9|*176V&yTmMvwzOj{IqYfM+(B+( zKtG)MRV;V035`t=Q2azXx(itgGnu`O3zFF0MJ`RC%v%rv&}sC`VT=I! zg@c$;Q;bidswga+i(lWMp2nR6_OuzB>Bm2gQBgFOgt^2356NY2i!7@I0h=+ic5_EAoZX-NRn>Pf=icG8CK_B-^8=tCJmUDwO&ua47TsQ(z zIx7FT4Y@2{Jk8OI(R5`okfx4XGJ40fk;gD3O>r;xyZVz{#FFb4+fWM}$x!5Yio6Pe zENgBRMk^fgb_o;~=9W;V95=g$p$CUHVPjm$Q1}A-)=Q?vrh;d%zF$Y@apWKSdxY`O z{Ins(f6f5F1Vb*FjNG%1dBb}`ecC^{(c})nMnZo2m484vMf;52U zW>R?F%`H+=)z#MSk&Tp-{?dVxE<7EI@+!Ui_YZ*Dy2d(U@W&mTUUwbq<#j4?;d zHOGDbT6L$T4?N7XygUP^ta*QC4#?~2jM})}nU=Q#ZJ8DQ6ssgOa*|iFC^E_Y%}~BE zEd{l}pqZ6mCNZa_IeQkPp1pMd?fhSaWPcejeY0*ivDEu$Y)REe;veM#j@XSr($1R< zi880zgc@Ef$jLGW(Erzh%3q<<3l(*M=H-Ljh^@e`hpp1 z{tRksjBJppvrMr4l;V?kW6gri*}j-OQziq9Eh?;$gBHB5p*->-hG3)Z;4Ak4`6$c` znX!P>r&p9h6x4sIp}#Gdet)wzuizFHEtaZSMxWpeHegQPbimwPPl7$|Ll;!CoqtIF z*V|C!&f=M;QF!PxPyKK^XU|5y%?$1_u-6&$&GBf6G2+21JYZVveen8K#ANV-O1}jh zc|bWXvA1u~Z4G$0S1ve%k!7MXy2V=UEm)aSItR@a176KlCq>Gq^dOc*E^{TGdo;lq zG^fIqC+hy*sJSW;#de!qKw~{Bq~OWG#^<1Yjz?6CV+0u7QP0FeskKDEmMh0w8%Ywr z*$M@%-I2b&%L)^$n6pq655M zhkuvTqYhgfbSh0a7V}Cb|akqsrUw!#& zg}+6-5cp&!Wg5AV`fu8@mVJOdj(har^)g#KYLIWC^kc-*rLdIdI@cEdwSK@I>`00s zFUUPMe~nP7^~XMXz?r=AZdXd2k@TARwV5h=lgK{C zi02w%Qz6^U>Yw?E+HA7dxjz64MKj=B=j}P@k4Ko#QdKK z3inQJEZ_gqBENCnw@D;Daf|7n)yWqG8ksOvhO}bs?ptY~ZKJ}1!2 zmT!6U!zK>-m9K^H$jB*)qjwy4;9Ku;K)_J(-yQy(11mv5h9}{i|Xj zsfKMd5772#DoZWMcqHA~ zNTwlECHVejZSboQ z<+5+dg9|kujdysg)HrMHvY)LY9m6kwFIf&I$*F;{#!>XNnX@8zGiReRC0(pe*m6jx?*f=}+k&{O(a zvFArs%aS4L?opf;p1=5F6&$6c3xA)BsMA?JzY}cy{o3~DNB#8emG>kw4QDt54M?_E zh65i(sO9kdxU-}Clrz$VftwF6o@KEy~5ndZBmaXs$<#3~zKl0t; zqo#YyH_eFNYlgdGoOBh8bNIaQix&&RWgyGQ04unD#p$_W#VG}tDgOob45MQC7hmL^ zluUpe|IG^*?frKnoG;X3vZ9PpdH9G(2acST`a66cGRwui6A%IlqsrlPw9(xvLS}es z;aH*0pEu1cih`cfGdjf3d6#N1ASX9PSUppr|2Gy5RouCb{IgZAJ4Hpu8W(%3gbm?! zGO=5d{h{Ll!{FjIea|_nRF}1V7R$Q5f%`8b(T^@_UtM#f*8KOFPhCO}BAi;Ibqo;s z3P_*Q-ygCth_MG{&LetPrMueyG-z?9_3i~fnGOv4E{cu~#x;uyBeB80ZbYfn#YcF|`%eKr*gP(@>G8HCME8D^ zKKgF~iiPH#U;5lMnC?fD|6sl_v%2)D(`GuCcX5U#WLs*SNU0QI<8 zb}u~D{VYCq^$gCtnI>}e(%r%O5>HHOGL7yx+SYu&UF_ zVyCE*pQt4{spYZ3Bfn^dJvK3BZ07Rcepqt(%-e8sIcd|S=DiqA7Hk;vAEQ!$HG!S9 z1;>aVqc3YxHqnI^5A7YkyejP|hWrRl5<8eYd0!rzHT};7{QF`We)}~E6kn*JQAI(@ z8h)A%|2iMyD8}a=DoY6;9*JNJ?(I~n-TAEYm-(I7J!W!2JNW#s>ueiTK%@TRtf1ZWz+v)4 z+&G07Ryh=&u}9dYfX|t&jM5$JT{m3sw_O|X6WUDHrPWvYJPe&3KVy#THx99{HGE5e+5NCC zZA1eUxDFH8#m|OZuIEy03xuUiW4vW}^09=+5#R;!Pn6yIGr5L}ZYr+Z=1TC48OC*{ zAjFDzN_!EX8x!Yu!F*9JWSR;G^u4zoHe?Y@-S9S%mgTvORnc;3Y)mcz$60+A)n(ls`X zi`rVsYFQJU#qfIxEO*`UIGJO9wy>s^o>{kflhUu+-wjPr0r*9s$IV~Y1fjc2urqm# zU@zb?I~|J4j@jGx0#23}_+HqD$^2&W*37?BLw;2ROTf zGwjDOdrMY+72uA*T4hnFzFcy3-ek$v)!@Zl=ONV-uAlP+MBvwqP;Bsei&y|ErSbaH zL{)YcedhK}!m7Qszv-L}rLchar~Ys=eD7&jxMc3ab-zY;TpF>>^NOzQQ$(IFJ}8b5OlK3-huTIE>5O zawX~4|Io5soNWVVhjObboLpx_ZzFpC*A!KKZWB*0S4bI~H4Qp~tvJeY~t~0CH^7Olg&(8P<#m z%=_1y2LX}?e-`&O$}V3F+PHt6GuTarHY?qD*ZHXQ31$wx9vy`jP8ptk-JBRDqjhut z?flbrd8z*n#-j?rcxcxwF)`s}THL5vVyVkfbt7MPaI0$-p9t|V1oTcLL6Lbn*hd1h z`ac`b`_`~zOL z4MkR3sCUES-CSOcLQCgrZYuWhE#uLgPJ;|AzRoR7#WBkdG?60@)Iz5=oxyH}ekVbs zx$Tr=4_=~b-<2KK$?!?@t>_{WSYxrIEIffN)0003wd>=>`apN0URkxzZ3>?dqg_cp zd+4%g+pYE7==Bus-1*RniJ>+S-IOgnd6zhrv;(cDdBuIhwbY6(aZf*>rVss?%0MkI5h6@rX-_UJihr`gt?HM^5XJG1O zTdyeLK01^)Q8>>_zr@(jC;cMjuho8%EPz9*4o*@zZ16qk`v?6(2l821t!&=3tT~qG zkz;8>)6h42=;UWQ7Z=5Dj~eA@#aU#31L^v@4lIB#tDOBEot1uuuaafvqyxF#Ri}B?hqFfA7wH&YFyfTtO(r{S9E^QG6ft=KMYTc5JX+gJ+6M z>J)#Kca<^8Of8=F(8)u;0}})ARKW4&6tWL6Uc5&Z-I?&PvS#FH)2F#$l;+ofr74}3 zt+&~ZVxsmB{z%o<+@wvA3N%>TGXxBQ8@vH9FlIR$54`pWW92EC|5gZNtIyqxC ze~199IT`?kstqO(8*+M5Dz~N$2FrcT34wCdoF|sIl9!Xe(4Nwz<juAEFSTs%v> z{~ZB-p{a#XGXvVe@b1a?4kpMpC)2l__vHit9Rz0x790-HVX|F@Bv!W)$$3A-vBSR? zB%XJauILq*u1pE2X&`VgP&$~BGOspEnetp*cViPUCDyYhC&o%w&I@R?-#|bv5Zyk; zX?L~~sVip+!o>Nuy>&L1peAXHrF?OJ9>T+{Sd<<3rIu1=7wAkl4)Bz&%z#J_zhq2B z1frmI06&vX+GteuAe-K9ykQ?3P+jpite_m)UVj^6{Cs!K_w9?FLs9`#8gXSZ=+=d{ zl{@2)gLk`MG*oBqpl~n_XC7iqGt!3lYYsVuyaSY0t(&v}K?Qp`HQ4xEbqW@LgJ;RLfTb^0$#7 zluEAEi&?l>$=us@LOcQeEzyWFSbUQ z&e`XU`ZuT3j2vKQQapM?!y`Z-{%v+U?R2iz`t(c{UAXIo7_1lEpphGyqHX2x>R8k) zBWHNO6pPc7AH~cF{Fk%L#N-tRQUMc?Xpu1ryDdLt{EW!U+U{jaZ%tQ9Ynb3Bw9eKo z{@P$i{F>`=(|f9@m8`7R68q#7iShm2wK*PbTeZWab-l76`%}HlJNUeD06(`%_znh$ zHW<~Js>^X|ESaZgMCyWu2o_dh0|$p~jfj_{4Ms&j+H(p_!Xf)!I;Io|A$eOQ3s?_; z^RQYoEV%{oAGBUoDVRP-{6)~iT;nVitcXQhUH-OnvV>Bx@egzE5f-|_5;cS5v>2>6 zwJzXP_J{X=^;}KA!y!dC(xwP}i(E%{c<^G*A19=8>mu@FKagK*r|uxtbV8B1xrTNZ zW&l8rKVu93enDL@f%cFS(zU25D!4Du0@!)C4IZ8PJ5nv|%3uQ7QaK3LVZ*1XLKCp{ zC>EM}R`Nm1a#0*lIfTG>fq4JNtZgf(Jf5Fc$$miGpZh?;vKA$Q@e8N~Ztiu5+oTeF z(Dh9OMcK{XV#71P?AWgehmj1h^loP&1vbNm8%@9OP`OyA+n{3>rEFsC5wA?>h!m{+ zy;8Q>dtO$xpBfQaRxjNNy=WyUf9~GGe+M4wm}RMP&>LfF-j8xBxm0j6d7#x1P!QR; z5q4Jcmq*>%Ee8~r?v{fj)@287SJIr6vs1c^vpk^At!sM`f4iV@Py*0nr-_INGkJD9 z&Ps&I0c7C9yw237{)8cT4Xjl>P*Kkb?0+505o1jNFH;$eqyQ9=qK<1G^lofZX@U}5 zB^E9-cG8Sg%S<>i8CL(r`x{kd*yP`Di1j|JK8{#y)D}$w)fdOWZnXstV)4A5PP7;X za|t4U(DjJK0+n-CHX;M15M3`NKjvr9Te{uLVan5Dx)v*R+0Gqyzy7AV82v;7AR;x_ z`dpRd6c6Ja=qxqX0ro;L%Xm}-ev(DX!tDZ8<gMsS3&?0AspLM0U(U*j z_iT(y%H_{5)_OK@`nEVV)wlSWH?`dQ^_T~@7Gor1d$ho2Z3Q5nI;{n!`&g$$lt3K4 zPw7H|l1KTgFIoYaS6~KHJfC>26^fG*H-sLq(}~dw1>>_^0595&ySbB&F8-I?`~cpl5}7%NTc_x&!?XSu8S*C5c?vYS$W_5nJ2p245qAwj!NU* zRahc%TQq_AqI|Ay?81$hd0Q$V2_@v<1y`_iW=iny6V+&{~F%XCOj;%**ZBpp(9@eWtuivgG* zm{pH21sj@Qi9tCcR0YPf46l6|{(8yQY6!jib}Q|eMsaSBTe*2>wTP($0MZUguDgp# z&q|tixEBii|NZ`C8Cw2S;KQIK9%3R`*z#KdcLq$M4|7^MQix`c@jt&aA8QWgD#Nl- z{^^G=x}vaqcim(XaqUx*$bbSvYc&0>xBsS3jcK57NNF{zdG}Y`BBYoCKz6A3Pi)bG zA9WK5UAb!|{{8=1YMIi46WQIXU@3_z+2m6w&c0_v;JQ(CwvGd%z{X*!VgjgEUH>hGTsRJoZ&h9d$Az{~E=$f69a z1%-W-=$;{-CT%Elyi8V25cjSafNp_bZkGuew@wGkZ4k2##Kk>+;TU|*17`4_nPR5Z zq#*v3%WHph7wgW5d)~&V0O)PRj~{uRuq%;x5Kq2BP6yCT<=e2*uFUhJpYLx zbgE6%e$sN6P5s;&e@pEO=mXR7BE<;?DqONswp@u$3v?FgIx>=4MUp%jNW!jVZ(11P z*SZC?uF^ZY^-G5JevN1VwG`(}p@d4uw#V1e?@Hu6))8Q%8UqAs#x1v$O?bqfHi2RkJQ{=adIPypuyJ6o_a1YD&l9l)F zAXXY5V3|LHOWFSlv@)VpRabgP;pyj=u*G3nx+UPzyyZ4Y%eK7BVzgW%(1k#l3V>K) zyP2x#4cCOZStM3kEM(2jcb znpdb)XMQ-y*SGy^2@tXdlHWqKNY3a1SPSB!Fx_?DS0A!l(WW|u9R>JmifHF|cx5jX zK&NfGQ{A7pp8`w)ht2Bv%S|*>ltceBQ9+6pEW2ydD?~?Mt-HnreGHj&(zN=&E-o=? zqBsAH#TykJPt0EiaiVX}M+cMN0Z-QO%Siu=US380WEmndi2>e^nLL-#u~>OPr?Z@1x?_8g^CyHEt82ADSm(`(rv957&Eb*Pc-H=+7OOOBz+5U=m>;DX&m@&|O0C8McrR5_889;yZcrzS$ zZD_NdU8Ngwri;d726~5Hg?d|VsM0OBPpz@m4OvYmj*-#te_+BvhoUKv5rJC9$w2YpZ{{oCLFC1t(^}(v$YLABs(GFc(LhFy0D+ zYwLwFfb-shJnp{7)q$I`LPmjDkN?L+)YzLT<(>pnRcgi?Z*?H(5`7N8(=&Rlcof}~ zITA`_v%fJ4hz_U<$lRkxp{ElDM2Nf;LDgZz9Nk;SDpGAq7X@wnS z>Rd{M?cE737^RRJblMmADwFyizL`wY0vHf zr(gfH@niH`saG4$u0?e8DhoA$AY9Xo7yyK8b^yl~D<_Y16?cRHom<4drrMZKahMMbz;gyD0lN%H}MkOz@`Kt>p6UA}WL_Hjgl+<62x6((!S zHLYDmOQ?z(qe2GU-x2ev1E7wY^eJdB1h7?1QX`|VaViD>09P!)xC$N4|6Z|YYhK#^ zKPxfDk&@HbflKcd;2ULBZ2pjWLQ|s>NTIs*EMP??1;Lp*$suXSbDJOJ;a(x~vAUC+ z6|-H>udZW8iUYI*3Y~JetcNFc0*JJ2SKW7#ge08PNIeo?_uid}ls%t%Wbb<|!crnq zC$i{*0o>_4U2;Wc_ZOzlO{0tc1)48;w?`E#oyNC2tVB z=&cl7&=D?-B9}n=|4IMBQJ3R0y_X?rO9npoS=`$AvPlK*4aWcskrpd~G~x}{dm*3e zNrwrL6`utrE4~Qa_QApkK_9l{H$JYMXAiNr%#U;3x9Ev_S~hkik|urca>Z|>N%SZF zuCbmRyZuYpPg~1P8~`E!m46(PK=J0SS*$L2z4O`S^MKhk`att_2Jo`UoppG zMr>aZQR>d_-UP+t9DP69bgw6In8}$;f;i%*;Sux7);#QH8LNLVdJ;S`O!VI zhuym`jS7dbxW^wyG_U{J`p zK=$=CxI=JE_`_oN-mI66aGZXidlpQZK+Nm6d<qNj0F)>#37{2B}Gj}5&HZJZ4Xr? z;hyLIw)6J8cCuma>9Rr}CbCug#Q5qs!_dw||PV63z)1#uvi+g{+z5C~P)_@}cOa`>BHz z#-|t$0MVgStFuG47Y5DqC+6KswHQAg@ezK=J#vC-Pt$ssQfLru_yup!tF+$cCb4X{nL|HZP9%Kf#_f2QM$Tv9?z#u~#hYGMGRsb4Os5D?hhMUSD{k{BI@^|J>)L z4lEz&9Gx|VignJ|=NSuKve0|$NYraCc3vj=5Sc16E^Q!BXAGKDc{*f!IL7Ey5bUc^ zz$l;mpdRmwi5oMe0yQC5c_eK{I`zCatAl!KeZdLIia+H^j1yi6mV+9Bb~bYJozSRn zk2AjMsH#QUxFo}Mrip)tWr`ZjgAmL#iNe=*)Vxl=t)5;Q328y7JM$MKDL!1BAx|kX zpmB|LOk2O1AN}-ai?wX!9q*Ge%*Zmbj@AG5z9fZZTE`xH^u*lFO^`8_aa}6DL)E{+a_suY=H8N zOqs-aq&I|Ml@Y*=GU&$i#8RBMm7lgdg>T|PSTgia=#j#_H!1w9pTfut{jJCQ`X2Q> zI4wRd&rfzj{hlHxjj13vC4emSrN)VFF05=wPq{Wjp0~_Zmaix8zlQr18T65@bX9|7 zZIWQj5t{_ZGZe>nrSnxfl^Nul^P3uZI;0!(IXcy3#pk@L%WDpBqjr~+Ve+)G4V^Y$ z;sq-dAs6{g^md05wP7OF4f{IzG%hnc%3Zt9a{OUcI2lSkqG~t`g=D&;q-eTtAe4Kd z(zM5K#gu49)5V6l0s+0~R$)8_@3#s&-mp02VQSvw7icj98=f39JToN9cewz0$0W1L zJ_1fuw&`OcL{K6xhf*Cvz~XeK+M<}L((l}2FjE^v{-`IKcE%;;GM5-db9fYnDeA5` zK_$nIgh4TAdHkGV{7>H_Bpgq2_5RKoZf84kUZ>@cqETc*SMqRK*$FBJqS?c^FxcZ^ z@=d`qzA$^ez*HyZ758a!q3CaF&8f$?s(U#hqu*0D?{*IY6)9EtKu6B`MUeA!SKGCC z%M6SLM=EUlUFDD#E_HIp)aX#5+K26o_dm_%To;^*u)UtGoI@Qk`ZiUBvO5KM9b7*w zfBZD|NRjZ0Hg$lWboMaTmOeGFSV}d~348lCd3oLw`FU=-eqWmU?95#hi6!O_lm7m! zD6DLfwcyS0lBG}BN%b-bTRKf`mexBr$U>a;gY|E{yXcRtFV7S%QeyP@Bcgph&vbu} zQNLgDxEuSeP;Bl`69EU;7AKYjj#-(wzPB6;pOIVh_8#YBqzNETISQ1iq(4No2stnx zGm0J1!fr3->yP9A72LV(1;nk+MZSi6m(3?e2rf;6_pK-}*f-5*p4nd&sXmguu_tSL zYB(&1YqBD|Kn?x==3FYL+`ZkS<+ys%%G3*#$DJC0)B|-aJmzL z5nvgcch)4M0nw>Q~!2GX&tjR-|_brY&y8QEhaZA6Gg}zyKG`t zH}{hqVhJFV480VlUG`()u;qooyz#Qlu zAggXMP*sZ#^!{@7#d~2LH};(LTuhjycjr_4N;d?#RIL4=)W`D4mR~oTQ07 zA5jbnna856i#NG;*ebaZ z`8y*ae;6_Y2|%uRcb?t+QVqvugmvld(t!=wD6qiIImhEF3KjEx)AHtq17-{$LYVzb zg(D)=wUVM=M1!Z>cb|P&wI-j|vwyL2wY|ahZ`A;%1y$OzGZWsbHIj3qn{VgY7$?a! z8i)=*#Fj$|>p5;hShQbsRCMON$6%iu`z-}|5wnbR{mNxn^~`I{7Qh*~&YDrZvJM?y zaQ~aX^uF%4qj>*fspB)*Mf$tSO}J6V;#3zY0;|vWDDfvesE#2#o1T#_i>w|dWygQg zAKv4~`WB+&1ZUcdGi?sIFh82TPRbEhQCB_v{TA$|sDW0#IM$=jpqo-|Kj~69PD8+{ z6Rr8L9vi?*DjmLG#2$DoCH&g=b&LHbUVbx8Pf`zk7nT)M~s$QT$91Arjw7Qb-F!UYW>Q8vwZcsiO@mz`wksH&4AYVxJ%k_l3Migxc8ghesCK~ELGJ39scwghAakMZZ^l+x1otFH-XyBDP- z9%N$O+(hb={~#I%n#EODyb$du?omD{WQ+!f)9;hKwh)q%Ya}S$^z22_EBajxwk^Cx z6VaXNafH%&Q*+ICx(Sr778|a~_s=R9zTUl-Blb>XU)AE9>pOqVIuc2Qb}okb0`|E3 zK*-~-e-O0OCFgsFP!!-X$U^W!L~9S>2{H3P!>VOGfl*a>t~_kT6Ug-Y478WcC{_S} zdpiP((LTHy+qpDRZRDN-8LW+ok8hA!mHYM|8!-VM1RTIfQB9yyte2C1vK?F|Y9E@z zO!snl4j|3~{$*RPY{V)mA_?+5@Q*;ci2}8SblI;;|Edn>CkgKlo-Z)u+y70E!;fI65Ai!|9p}Qs?c`V*0Ns>OfJ#%c>o0={e8La-9`#C{Rh--iOx0CAKMa}4E~@Gwh#}-Vm-(; zv>ZFm%&S4Cwn-d)F8$zakZ-oRLeQ;63KlwY=!7pLC1rlzq+fm@H=4|MIcSlwBnTTU z^E-A+L44(B55Kt`i9@DmttE`#mag)I*rT+cZF4Bz$n)uGD^!eJykUI%QdXEzDF&II%CzF(`-6H<{Gd9g zGK=@`qMJ1s5c_isJ=Edi_rfLfa%yK8q%2hom8AZFxzXP@_5Ntf{Loy&`VP*JNopm!AKDLthGH?6X;#q-3WUo##8 zgkmwkcBT1(DWAPP8@G}c7~;Q>Wkb$(5K?ev#L_xN&* zq9fglvNDR2LTsm;xwf{60t>Op-fH9*JIta+OW}s0kV_!8YS#(D{Obu`V$A4~>gbUL ziFe81G3djLlP1;*LdBJM3}Ro=DzA(w9zDDnW(VKV0Ms%wHuh zI~pam%>UU(5+nKSb{-w-3mU|g8sJK_>aK6{N$aK|Wb2Yp3GRG}(f1gH4^Nv>^s}Xp z;Wro@hzliUfQH%gy6t`Q!{$wg4Wc)c#vacy)CrJ8Ec7` z=PdWU-a0z!LswaNVz4uP!r@J9UMoR~XWIeK5sip#`zju!B+v(MK1Ohs<(7!WAP05h zhgj#lRfas0i;&c0>y?C0ylC9~bX`afk-rJ=aDUyRU zx>Rv)A?_pIiv_MIqfKSo>GL$F3~Lo-w|A>-$75W&eP(20{Vd1S?P>#RBvm7V_xb}o zA2ifHEVGwo2*)5^<`MtbiNVu% z$~zhR<-PhmrkTMv()B@<%0$VotFuw+FZVA}i}K2-m{J;sORRS;s&1^a=2KkMAOc25qV49$?v%Zba= zrB-1iAF>Obt0@$dPM|_oWoplnB}3ZWL8qOd4xnC@vgYmmnA?F(-RfCBzoME&Y)0(! z*2WFxc9{1@IU`Q1-_`BqvAtnGn_tlv#(RAKCz@wP!VY`p*G~qr!@J9@uAU4ehIj9q zUuTw+&VM6HFT&yZni5y{b#}~-!^M2T)GX7hHQDLrC5PJ#j@gaG(yI{t(=;vnb<~Ys z)@vV8li_pB)c!wqug36S@?<9aWSp-pkyx3f*N^d)GkLOlDtX#@MtPQdLOc&H(2ljX zI2?R5R;Y#Ny300$MWA5#Is9z_#_o}+?hRdPtQqg=Lh zU?DuIr~GCwJa0cJA!0%Q@Hqzj!lP^E*I!kiQ8QJqi@m&?G_UL6{+x~9pEKBj^jOSu zg5?D-tTCInpf~2%p|{CPNoTt!q(57pODv_gbac9H`{iHQ29y)^riOQ?VisCx*NyAb z@|+Eeu2YF3XlU*hO7vJ_9v&E{OA;To!F(3ku&5PHx_&#M9-kU65 zl!qQrCvf)Xhx@hYQbhKF!cyjKEH`=SG`vvB1#da%I20!O2(mo{;_N=~A))od$( zemeM)veEDUSv;I<2|w?+Y2{osi1Hc5A5@{RBa@4#R{5ITuB^q@I}O!h`LFqiA95%z z*qUWJWhQ&I8JpdFhn%5@Ct-Xrh!%s$g&im^Wwprpn!L5WcY%A)s;fqPD($8KIec!i^~L8MMCn}YxHD$VU=Cm=7x>1|9e zTU4wq+57SHGddB|TEX*v(XkF9^?Q!;|5S)7kxf#hQ&T&e{s3?l|4qJ_iC`%d}5F&%X=R)ND33Rakjs+DAh|6)tpB z_4jmZ-8enjW7fv(v>uZd1z8IBw`Df~(W@z8zt?0riSd z!*I5L(P#k?jn>KDQ>W;;eRaAhRcbN1*^0}Q6^M$}p>`a~F-h5+FV-|a;q_!mpG@G~Plj*39uS}%|q7!LYR98%yD+euS zU`cHT4&pIm_jZ6W#q{Tukq5Ry`@5jn`P|%Tx1oHyRh#nzC=^=a15t@oEK1sCk}T#o zj!ExsuGU|@qmH5bPfS3gQ;q4}IO7Ob5>eGyINcQ;b5f^PnOwYW+crNzjdUA+146+e zjWV_W`-xR9bUcLx-J8f`NQb}#e~@&*3l0pdKEO$5IpdVped@$gtZf$1MerC%^Sv;8 zv%_8&UWBe6!Oeb=;=F}fy35@f7ie`7qvg=vueq}1Lt{+%=> zx%{IlkS}A zt#OEWu6*A3CFf^57&Ut(dF?7jD{@nHy1(St-*j;mL^WVIgCEA2(x7ze=H5!Mct68s zQWyBB)|Gin+T7`EQ>_7KccLlwa#N9a=+RUYJ$6?nzoGVP)n$C9pFu>R+6ZN0K#p0B zN;GDHXqR;i)F*ne+_n{cWHSIO66izV`ky|65c()II>>ikJhfiszJ!NIG-9SK(WZLD zR&R%4z8ny-YC8;@tMXV_p^UNeY&q(LL6YHmX9}%tWQWi9=aBpSu1u8%z9n{5F<>j` zkNe18^I9#Phml1)=tEq~tj!Jm2PUDb&jBk`1r2Di&zb*AY8 zksIwxoBfOVrwm;d9#f8XQ};!@e&|_TBWvL0Dn}x1g`eKGA4U9dcWA4g40a7 z?6WvGBi42cBOwd#N=fHIkRG+?)wx~rHLGvCU4b2qNk44(GdYibJ&diC;Cud2;Qmsn z#m=meo1V+g^e&9-$j!RGM3ADxqt7C}K<}uH;X{~9x4xVI1ZeX7JV~lKGJPF>UGf1k zZMmU8SXdA^ZQ=9VldVeVa4IRh3mS>4K|!4bazr)ih7P-5&+f7xow4dLq3c1j^X`S= z^LXnGnIzUAMG@7}=6(B^UM?Z>!;Esa4WuL@SR6~Byw8>C;U?q^YE`&fpvF~tsbi`- zMdga*vawy~RPP~H^<@&Tl!FSrDX&cGhvMU6O|6v0X%}A!)jD&LxZv-Y0wMxpy=@~G z*s~epKZG9_<9mORQ9GjtwZu^j(2a|+Z&TZX0Uh zDo3cwC9rmsUCjDs@JguGO0 zLe92wE>7Q42vwuLstb{{N@S-m-8`lUl>aaYBR_h+S%|WzL&Wk}x{BFEYDbB+tMH_`RI9=+V6kCpbtzFB)yo?yN1KlaSro|w+ zf(^rEkdv=0uE#vTPG_7!zfVF8)LAV5PX#NKBP@ZqiYU#}YL!1P1?LD&t+8SmqCk@W zCwop878fP~&D9n8QTC+RMs#eO#j}}<&W0P2du(1(iW_>xE?M}DdnQk3#J5>mAf5`$ zZ12EV6qzC}1qefg2Z)+sjZ#^{7#=XIRU!bx*u z?_Gm`zdwn$VK7FI0W)z@kz0LAH&*1XF^F( zE5=5l5rlDLsqiYt6#SOA)48SdH7-*U2stwUJ^RXn zd-8!4Lu1o_*-lAu(yI8#>C6u!!d!4-8LgG_UTW+ASp0B3;Ym_5N)y2L zukMr)1byMOJ0q)8aI(Mg=FhkDcI6zI@;2ext-}-TU7r)x@_~~y z&4r_bFb@~<5g3%M1GaT=(6k!SjT0fj&s*m5OzB9_ck}HkQ;-wmR9)4 zlKgz0<;tbGY*CLkFTA|UlX)ora?54sV9QOD#y8WWH#1UjG4V7Gqd~vzdvv(?*z+m9 zlJvHV>I0X3=EOJS^e8-4q?*sM@S+g8K2T)8ri(`rBE zWu8{X<^*ehC01%^&D&-h|0rwr+#{UWE_^^hd5yCWpH9o3|n-Zbwi> zeA}~_d2wjPq-p6`+l~-T9*--_6!fdl;=|!7t(}nz&X~P0T{q6C1&!$wxpvreb|l#t ziin0I=g_F}j)SwGLbXKttL7(O`a|Y*R<76R1Ww0|wy3T6NzQWu&ui&a$xB;1=HklM z+lq9tQMT`q&i`{rE0oG^%4^@fSf4Yv{>%%($R*Xo&XQcY_d_yl6mk7IGkrfS`uXh6Dv#CGuo7`a*`kUBEBusp9w~H^ z^u>hO>6?p6No0#D&rCPV3HZYj6E-p;!&ylmAWHM zE5U{hRDW0e<0DHNH!4T|XnRI{nBMlVc3>iDHe=H5;F$kd9hKj0dUT22syO$J8n_0- z(g6?)8vQ4{WY5Z1_?jcn353?Y)4!+n&1Otz$4;`)jCp3H{e^AcY=^fPCWo`mzSO$V zh5lr*4cZ)AGMqIF%n#`}&3%=9!cz%?;-WSd-r$H$Dq2MClMvqyzz_8$=MKQ;<|Zx*J9rsB|gaNSAbu z2}+kRU;@G<#~3hjqqaTw@bi7H=emBs{i9s>xlg?N{d%8s?%|}&N%-v)?9LAuad9*R zp+u~KN#+d0n=Bp*K|{2&Lx?lNDn)jCOu`UuQw}{XxuW%4lyZ{d`m<#WsHgs3^WOnv zmULucru#dApr3!fxLd2nP{U;w-WT$HYfdz~GthYMewCHB!~*U?6->!_V!L)6g52Iy zFN5E7op>FX1%!-Jw?)>iNV4)4{xbGZFCa!){*u^HF^Lm)PMlVhilG&8t_q zgVs}QOoz0LCSHN{7+!1_(r;pI25|bG5&zeq0NXL&MHi~>*V#RnKt79&e3ovB)6B+# z+3+!+0mVA1y+@7H)h=9i00d^YK40w(ZA3?P*7qL*{MF@@oNUutem3S;#8t;d-+$gt z*w%1KekbFpx#tD3NhAZIy>~Ayen@g-+->IR9PS;7Z=>>m zlGYa8BrNUj+X^*tFX(?Cr}fe2G;<#@(71vN8>-)Y7BZR^IsK~CLZzySJ5Y0G#33Wy zENC1N7*`qy{sLBO;=To7I8s2086RZRtXf~hvpX)E@;>s;R)BNY-nt)LrvF9oFrp~< zR0C%ILINj9y@&4ThizELi1)|Y<*3Ui71J7Ctx)x@^S#KNyydyYlk7Ml@lA-7MR7jz8iG6Hym5=& z$oNN>g00n;2ia8jtf{h0<*cbRr?55^vfxDF|QH zEv0jxPDeL1*-5^C6!Fl|fMe$P5A*w*LZyrXRC8uD0jTV#Czq&j0A~XZ+MKeNqXo{# z1h?`PL@5;%PxlUB4_*(fu_u7D^FKu@h38xCq&se2exB7lcKgNCl(HLa{o3&FSH$0Q z=v(vrdH4*@p!Q|?L%I{IvP$2N+JLHU@Q6XlmC=l{nq;$IKpOZSsg9F6kZ}~@JC^

K$XQ&KDy^ZG@9s$9ih%u;JM_2~CqM##@eeLdiRkb%6)to}&aFs{JN zO}qA4s#9Q#?ee-U;hzTsJZ6C^PzF#T>D0Ud4Uk29YG}u1`5jF5N)>bWW)r@w?;J>= zirQ-3w}{V)mCV%NX zc}l@7y52>hAgYsyq_BHX9N7ZovuXmXC7p$==K_Zpdi08rY685d0JFXO3)woT*wv5B z?I@-%)+mqo+fnSg$3rr~Dmr)8O|n0hzz8OV*79%*p#GKGf8-0e}y3w4_>F zrZk!^CU z2?PFXQaK@ItA$7RqAtM)_hlrYt7Yx;al(<0C0r(wmCx*sp4ncE02PbO+?O%s{CCBp zGRxXju!B82#A&1#YsLvYQ*8It65+Y^7^XrW@l>0yjK(BEm6xea+_CuERb?CEUEc3_#JacU77?~uM)H+z@Qe5{wt zxwerzvS`-D{1Bf6)vhQuv80}*4#iVEUrFo&qH@gsW_3s1(KI|OMhZBHVfZ0RNt{Nr zD?Dep-Fvk5-6}M6`2I-158XGafMv%vF8As_w+z;pK~)9ma7gt?njgXEQsQW$ggEDx zq6Mg0koO~LtN{9?Pw{JttOLnDof*MzB&$y^is^Ag}0hT`t@eY89PrY zQntO&e(nrtB-_(lf|Bk zjq19NNlAG|zJSdXb~>8rtZP{kb)hv!A1szK_tZoG)S0}8$6~#X0UN10n zpog~q)=C%dpw4>y4RIOYXmJv)-MNDdPe*B$RUmtDeOOZ(&G7k0#zs28lw>;0_d z;KBHD@xjC2$?abcgpCc=nE*%7QRmlj+E(lmx&krH4*21WW%dNDC?ilRvDoDJ>aKTP znZ4Z814q%R^)=0J0iG$H(H-w@{n)HnZw<4Jx~#XMlJx^Z|5H@^ZtpmXuL`w7RSEW$ zt*~EjePk>frKh~KDqm*55%>c0y63*>&?{yCOZC9q-Xpa0=v@zv;=<97)E=22>gPFu z0CtX8b@0eobnrYduaB@Mi03pdj?j#l{03wVW`M7ncnqX&sgX*_P~mA?=F+;6*4~!A z7uUxQ8P_l6l4&(3a7zhQ^<*=7E_f(KN-pR>c>r;Pw};0&e^DQ0uXOT-z@!)|VmW_g ze<=Ewf9mwzZ`zDdf0QF-q&0T@8s6Y3QTgDD)hv$J&d!zC$q$z~q^?7y0k=$XAOXM9 zKz)`T85;_qs>r zP^S5+KUS-x%(@QP%>!iCCOIFPH~}E)EVEWlC~400IVs;vnLEk3P({WHL{c0W$o4Df ztBx*h=DU~TX;<;|gx)uazjaIPH(HBoYfj8J!vJU1IRpv&z;bsfVv42mNkmU3lr;SF zx>{jK4f+(oaM#>_G|q}V2>yKpoO!wtJ4N5NWgy}MHpzKwcufu{UCG&P`8OO#lg!8Z z4pEG$7$_NfCag7;M+Y2e|UBf-CJdrU@+ zWElhKo{4yU?C*ouhdiOU7oF?x6}lIFdXI7j`LJGPvew0Ep&mOiuax?1l+o{|q7q!5 zS>xh(=_;iKjKkvr&m9SOn6c{#rPyceiKs&eky{)-fxekn?E8$w z(}y^1r+(dH`Zs`Pyu-)qLxn&r98JCNJNX#UT#+f#QU~$q}z;vGXIZ zkYm)8w;R`EfgOFt6OT+IB0Di$w(#}?4-}b=2>*OI;t=WNd&Zk9) zZ9pp?ts)3p=KVzyRRA^x{qk1tMF1g3NmG%iAE&??p}X^ienjM(l{Ug) zy(~8U#lg?ejEtPXxeD;HPhF?mO)AAteCsdvL!ppgxBV9=qmPOidrk-YL#zpkP~Fg& z9Q0*Il{0i4`T#%nXFpmO_jQ*<<4e)uFBSM`S*UYPFnMc3{5NF)N?$x z45$+^_Ai_zFr7bZ87-bu4^~01lSc_({aMf16l4>)G!uo#ATeDCqsa*7;flH~v5x>; zhQw*~**qQ0N|xvUxUix(Sek6a-_*$-ecM`t{qs56$CDj8YJ~L?kd7V?)$8Xi&FB(9 zqg)6hUJu&unS+mAY)NIdAA-SCEf=)E*xBO@QW$a40s(5JF=Nv6KZ=uwUbGJ0*18a9^C|fjKVN^b z9}i*oVDP8Vs-Tm}>f3HgPFGSTWWmj7WEYA91I}um6=GH)EUZ2I$~NaDS8_kUI^$kd zP-lx-!tSSzP=0a~R@>|%tKBulck}T$z>` z@#B}Mscaxu4Nie35uW>hG%uC%cvmVLT@ndKfya2v=>=DJ8b!1BrYiAVposwG8tZcKYUHG|9U%ZxPHDPr zNI~;!oR-|<-fU$rKUI0e>KH4Gvh%B6pQ+A`)t|eVBeXVZH?(3*pihq^- ziCn=>=>CgVIcsKO+ZshjQp}gAR>5C>g_sA2$6@(8e#gggTJm{L*~za$Us#w=|86MH zFGas5+CiOKhKvd7==d#ujAK) z(Rj2M!`4cY%&N8?Qycl)|8L1w@QnB%jY0=?cD5=h!m5MR+;_JyvlVeAu_Jll23*2~ zTl95F<_EEB-X-fvVw_d!h3ci{FZKLZvuvlPajC~%Ta-+12n0xZ zc$-o1#8d;wA_6GW68NFI2YwOvkk_h$?&?O5l^Ur8-TxGVbA#3Ylp(aYYA<#Wtk-I& z;!yI9*-u6bkrQ6rOm+mPF{g1SnA3#Qq|=m>U)J~+UO>pe2aK0;zQ_p&`&$08D;*U% z^gs7L2!P_C76FR5;&OVQ(JJGKx#eo{ltc8m8WHL8MbljS2>ol2Fe+}2Hrpve5C098 zT|qWRc1PgA5%d4;4R{-OSv89N)&L0D8^iN_qvXzz+j5XWu@cHr+%O6*t^uIr8p=yE z`n=3-DMu6qf<_`f)w^C%h&%1G(9J8*g#EV&_hT)R#GcL)C!u&ld3mfGco@1#L6mbl z+WUFBe*(qqVfN-N1)W3O4*iH?ntRBx_8+(%OnI-WOfqX7S~~ECYzh%K7w8J@Y*edO zt}e~@n7eOCaM~wk7wA9QpE+QC+jJqf9$&>Ks-(l@{I1qmc9XjtZu#DtY8oddzC$0+ z2+V$Zobaa@Z*N-DH!2xYwGZ30D`v~eu5=l23Rw)0B8$1|F|EK6Sx|j3xPt9MP;7P#u zQEmL1FRk{53?nCP#J(~1QSv1y+6M*ESUrIS@BhKduGJhgX{H8K1YVc5J@mQ^mBZh- z3=m;`#k%Q2i`Jv8TJxZ*8}@W+`eo-6D8d2i^sV;Mgy%25hW@Z=1m)*zNb#d_E0{G@eWub)HC$}-S%gdXumcsizSiKt3hAv;J0Sej8Z6>Y0JfBj z2w=f#hJA_wfUQ>$sfwSYTV{`z|4eDLWx`;TuogMDcl7NMqY&*UXC9h!s&)^|Q|{By za-ZNi)qUEcuCkp#Aq#y?TQt!>=8s3tV)#Ld6yMiKX{-rdhVDcBW1#4(8;;+Vtt z=y9lt$Jw(EU(?|@pZo6af=j28HPOwg_DK4^Aa|q^WpFlygeU*(zZIfNbZE5vPZDG_ zAA8gwUJK17yDW6lMI<8OrADphvaj~r0wMMm%m*Q3`^$~-|)&KJzzu8m`s0;>;Sbgd#(&>z(&g04o)-+_hr}V>f)0IXYWm;`F0}@yZS?mWPF*1XcYbuF0BVf}fS|t?!iIqK_MVY=-DqC6YFe{g zR7AUcp6dI41z@Kgr4&!G{s33OLz^FmPmX+7stVX6vS^8R?$i&{y7*-N8)vJn9ys&% z$s;DBAU{HOj_>F*ujw6+m@fs7K5i%srE{C#;+*tCA9)iiw8WG6d|4X$9&sgPtHdqw z{#?UP!&7BI2TF(>exX99W>X%jSAe@YSE!Dz*TUDL4{gGCJ{ApZx9spz@!MP{Uw-1> zO&%8U60B!Zdn7PfprTA+y07CM`Q<7Himd#(hsvJ41z4U|+j;Egjn)m`d}Cg+X{(5Q z_{|Ut(~W1dOV5haj;2|ph7t3ErzU2jNmUBMW4PkkAGpcAj|g&Q_Gnswp!I9tdz1v7 zsyDmXSt?I%y^vOGln<$5XfUYz~kyWUhww>Goc<^zgU-|Hy> z+_}-xtO4P_uK8X|aIo>)`=Aypc$XKIN)zjgzN64ojRlIUBnDB_#PG36I@&G1{8Lr1 z;eTD>eu3&mnkqB1BDpyS8y?Pxnh0P1=#N#+=axWKzY3X5BMOqaK;s)= zrfH>XPo(kgQ@WCR^!Bt7iLHtkTBzoKfxat!lYQK&Zl;{9f2Lar0n4B_62 z0}rCRFKKAxnH8}zhzUI%s|q*y1_FWp#xs09PAKpdQV<2beA}!cdv%tUG8m|k7kuLL z0-ke>5ytuqhgdC23OYa_8$P3@YaVHV%a)5&M{Tj=#y5c`ueGfuZs1zw)*}37k z5K31v5Qyi>!rPnOcR>nN0{WEAW-aKFKJspM-&_z#!GpQMZLd#vqx}Nt0q`)|@E7OF zYcj8kAW*l>SBakvuR&q2T9_2nuAAblP)20+_DcfP+~e-orm6a7>muljaFlKwad z^xXzV_;B?JR*|_?iZA>wh{vf#Dkgd-Jj{$ij&rjlE26-UfNABg*?T&683bAm!}8VV zyaHc4lJ~32IR|<@O!vX8JDbF2g^r6CEXX0zZy#pg0)bSYK;Ay3fByl;VsJB`=OT#B zWL2d*Mv$ApD-W=ihxUiG-($f7mDpqK+WADHxNICip@12B27!y5RWjIGHVQ7LeKjqb za0{4-G~q7j@0~`SeecEUF|m_p0+Bqe5MzM7xqs|$zi}5V_<3WogniuI`PTh|{RI8F z4{>rrStt!VDOlRbeph;SstWnX57!?1GJ#%d@Zw7jcl70455Ze!H6OIGQcXqR6VRruFX6 z`Rw8QudxOVCcy6Vj@Cm1?J7EOtvBj}psaeGHZW9f99x z3ffN_4zJcVzb>j4?YtK;-Ur8jzxMq{d;1c2SQ9?zi3z zckZPgcW_`TRvp9WzZskWxVxO-%de3LFK~1zV9+HC| z$Mx5427l%|Tgsi$jgh?A8;*8VAP^0`bf0(GJ6YfMJNRm#gykA2C+If%dL0s~fT_G8pn3qqO{%gOSa!w(cDp%( z-anjd1|*v{c{&sz^XJX}G|gLgum>#5!Sf5zMJjQoa@1#F?%a69;zxdNo_?Ru4*x% z@O(n#(^6IvpOOiPnQVn=3qy@doWt^l-p6>QUiw85A%Jq9xmu||3-HpdID($W=>BCY z{_gtI^4{5v1Be_!%O7P^lQi>G$-Q@duBI(}2Hiaqw#XUd9&RS}UD3;pEG3=~ ziW8;>k+xtZY7j^xQ#*=;h-}Ebx(0|NKsdJ%(Fm2ZL{>Cv!RG^I!$;rN#b0*>n+0DMK-ln^+%^SSF$XFIFqs#0Be2c~BbalWN zC$}`5xYN(y0_8GHE9#W7)?q`l6aQFGW&0*?5p!ilP4_e!&0+&f0x5wQVEV$n=}dvmbxpqG z9QrN8uU7G^6l|(#L$I0#V&_?>SHrX@nbDjUy}~*+9fE0syz9SNtdynOc&|=FebP>C ze4W{q!|%4~SLCF{HqLYPg1 zrT~vMYt=BU=~Ff`0TS9+uSO9lMCMzF-+|hB(8~(vxaV*phqPw#hwxGz8zNhwv~k`$ z=5%dh-9|819j=`JI?5+T98l6nX9}L%x7L^KL@sDPVHi~Ace*dt<0&$U<NP*w_n+NwJsT|u5Y?_aGj5>he5*0zwj^|! zqyA*@pDhDBrqR1W1L<4BtbsS$>a^Zm0SzSak8ltLe-YD`yW%3Hju8a`E<1hL%j4F~ zfj1mlS&hg=4gA#(h?=sA`x4aMXB>!6hy}$S*yl&8>(?aN_tJXC;BEr9EKM!Ov%=@$ z^En<-pxo0n&>KQ;Nmv1qe0qO=?6ivYN&d+!ksozKii4;PLtxcxmt_CG(!Mv?eW-Y5 z;&X?hi;cgQ37bjq4bqh8ItYJwzg1dnevUjPk65d_zWPsAX88>PdgkYJ6v))|*Oi^%wwPXP{D#X&B%5!F~!rFhMdK-k( z1f-574C{Zmfobm_2S0$kudie4fW{K^bk2t@0>@t>MQ5tY*9wTVt(ru(jmH;_v<;-W z5(t7e`Pqutntgrcl78MBE4v~>J@i1}_B}NQIo^CrzWZnP+ai`=b?HN_>O*Uqa|#|~ zVrG|^IrTexvaO01wNh`;tc5Db1zs!!;`;?+opns4n$#f>x&RKg@5%v{=ca#ARICBr zh1qv~lb63)YyB#}3@S=)9Gl^?({FC-COS9GVQK2NJI@bdsnkTVzij>r9RTn0T%Gao z*^Dky2ewywUwu=~U=zo|EzTnXEjkA(vhcrYk9P$hbI|)akEzUVJ5O81M@4E?o!1j^ zFuU`QDET7URtd3@S_ii@A+gK)S08|vC1GP$v}qETC_uaFvleYx&qaLAqASzo0H2jSIR?IdMmdljOh|D zQ)$hsI4yKzjTqGiin272=NzcEV6!^#ayZe5r)n|S|0T-XniBQMMb}a{E^iw#L(sPM zICB=DT;~`-5be1Gc}rg9vg>+|=5ODpaMUTem5tAuOi53I`0g{6^gU7nHW-i&4P2U@;BKycBh~;S_)0#vyzDbl(D08*jU0cDa2Z&OVjG?<*9roeNkxmn@7n^|sYT9!4gGoxDM|r7Jhvn{z_cSwJE5*RTK)7{ z^WA)e%eg+o+f*$5Q{J*MN_{`@EmIp;LKpvNTp} z_TB>1O=eI$n0LfUy5cR~W^m;{Z3Tj5^8<5f7)r>nn_aq5xmBNjBb6zQNq|>k+9R$< znm`8nEak#BX|3Ar=OS(xWaOb*oT<6$VDm3BO;udFPv>y^lj7{r^B>C3=mf6bcRb{X zD?A61NNxNw^MN-h;w(-CU3#kG);m*ZY5jyokv`;rb^Mm=fyB76(n6=9=+lI9p06;t zi0qMUY3?!;8c2hfTVzOy04e%>_B9Jv!#Xw@OEB0hDF@j^$u{zQF6%3p-3%))CF_`S zpb5N9o6bXbPQmqem(F2muW*ELmvHsz!W(0sHdk1w9Vl!A2>5}iW-Ht|D+{Jf_J6XVh~Qhff}UZ$I!C2iIF0w^`mH|Q?BQEK*|*8ml) zmFyVD$)QCqlJQ>(JN>7!pj}1foEkB?iQf!^rB{4Gh9CuB1BL;>B^*cq-GLr*Nwvyk z-DB3v>`teT$!}9aq zXqv|)diPH-Kcip*@ffxI7rTK#sY29{L@iH)pz~p#xvX;QsH7-Z-+!;Yy|uIC*dEQj z0E2;YKQCJa-JMJMCpbL*Qs!NgC5%oCIhPem`?~*AQBZD43HdK2?Tl|@OCW_By;uKq z9*FIjuH7>N&g+qy*N~e3i#sOW9v#DA}FCB*iW<8&KUmM2v&ga!2hPOnw)A49{g zjH8?!%{awd8HQ-T+x!zRkdBgs;ROYO6@d1<5B`Y}Ak*xLz|v{X%pNe7j?F(R0Ks3{ ze>?)%swx{~@T;G+-e4ewCpA+7WGjAa@XU7ue@9;bH!$D?^#7Y)m_dF5Ds)kq_eO+@ zqhovW`gBcl+0hv77nSaGr`RZQBk@g#Qn~!5mUS;oHF2Yw-0!}ltYsDf>?Q~YS7sLA zW~oanX6xDmZQEP&rKZdbk8Hh$_wBzr&Qw=b9ajDo;IU|th|&0OZDEVaJn!o#E8`j_ zA-9}zL>bypsf6rdI}ExF%_O|tM`4SB%?{1G%_RkQ)O26p>X^R%2jO^(Y3};jmO%z- zoWWZl+lgqOq3MdZdfO8L#c$)_a_GQn$im059@T8`cEUL1vKJFrAj?q}-hw}EuV{vdR6FIrx z+)J#SpI@i#-!W#*(H{M1_;Ya@mr_THdBx}L$pg?BpOK>~xTISsafEN^m#}o!sII*( z<=5;g8ZV-Bk1gWN`w3s1;cHy*yz zc6Aq9^-9RAMmUnOTD%Ty%67BL`iNDYT?e-tev~bR@0u*@6Q%tFs8@Z&%x=1*?PwuJ z20xlr7Z~Tm=742jRE(OVxW`5U?7%K{WPyzWbI8`vM^6V;PJ6;H5KaOoG<09p^sW3q z+OJT`_-O(-dE)K|t`sF-L~HMpZ8?cO(Z;xo{VEoEf@q%uM9O)QS$h`KFwu@_812x% zaF5*WUO_gkXy`%%oT!dDPQp>j250O^Mv6We{o@cQp7qLVvyO<~q%~BBU_V%5ou#~p z^LAVABbK~lyMG%-rEa>Ey58upZT?SPzjqoK??@rHc1>*CVRb9*!Qh;k4e@3S@4$6g zua79`9xt`2WD~-v68zYR?%Mn+j|DOhftd~Lw18WGR{Y>w{m24&yTBuyEXNp=Sv&!& z+o{Lb#7-lwcX%#u2^~b8o@uvQcXo1_2_XC2I|s%4lyVJ|8^Kd<#wjm9baYJbyGqUZ zyBt6x1*$D>yOdipTkeJ5g|`>pJUEQBgnr*g?9n3kJvD;nBFIIaKL56sl4O?owCF!C zz#|H6uUe3 zhGjeY{o@0(Pi_)Ep@>SmjQL7+O$7FJ5wQ{YcQb{P?B&*+|9Z%QZ3F_)#frLD?%IRV zevss}6ItTU>1m%+PH-F<>lAIr+v1$~wa9^jeW?>R`#t6%zvS0e{m4^j=q zd&1=KW_cK&Wv~ zP0Ls*#i+yobB5oQC>~TFE&U#Ng4tsip5L7FNMH;)Zv0s|HYj<4Qb{9dlU}3UDS^>{ zpNrIm{&9nrw|M#+Zt+w?Th$|3w6>Uq=^VirnF$wl)g6s2yZ26Wz(D6spW%K#*FTS= z5CA)rvG|IGg2l3D#1DUx*%icJx7&6CjL`>ujnke{?nsrYfXnxfWE#5tHxITP9+R=6 zg=8~E#VphtcBGiLA}x$W`F}4jzY8QbM=Jc6?t&J_t0q7oy6k64j}2CaO&{%PTeBP> zP%loxv)}ZJr{=kBalDVO5I&p`LvG&R+jl8)L(=x3GsTx1rXasydWOu-v0N~xki;m( z%%^A0lgg#ab=Smk9@Ta-xb@*S@EMuA7uwnY0_>2tf7v5ea_ixzUs8)Q`kv_iFOmpr zpA$Q|S{;o2{&!1sgp3*3F=j~SJNME`{I(>+X6lK7jNUU!Hj?h9USF4_aY+k!`FG(< zi1mgqaPP$0zUvD|Kp+U)y(z(1_`6{SQp1<|fZ;Yeacy(9Uvq%&09QOHf1V$God3V92mx&NZJ}{7+_)FY2G0{S7=`ya00tZ z7QiRtfh$q;hQ9yci-FH?n|xy=g_I!Gib}#40=d-iFa3}u;7Yjx4pIaV8e)idyO^O! zY7qw4+Z0c2q*hdY(d|?|M>P4m>-SC*fO-Lpw5bVjwu=oTGJhQvfZ6r0I+;=#NnR5p z*{QiN<)(0Bk9*LL7nB&NZ%5)-SXA!A91G-Za`+<$(58@S7I6J_- zcR#D-D~Oe}h7_H)XU~BHCQiiNrh-8}G}c?pj0TO3SN0qR74d)ouwjGJECj z*HmG5%kl^xk<`4oIorQ)w5VOm!N|?kb^W)RyrdpzQtWM>*TJQ{EHT)qk7hBX+tkUq zL-st1+y=+rxf)^mfn}eoZrOEvJN)46g+@S%^mjzLuiucX>AxQ0at*i~<^fe3;W7*s zobP2KvE#$N8P_EKN3^~B67WLU`DMV`{)7kR*=;|A-hBe>{In+Xt$eh@OrJilX(w4+ ztVYDU*lMGP=1GjDa(+w7b6&0=RFoWDGf%WIV)7ET9eBhy>L5f=N$QK(vz0>jbB$JY z*e-kQgNp)ss31?_OOV>&!fE$@)M{V4L%}L>F7DKJQA`tz?`Rk~p;D|>A6@m*sPM{0 z0r$|2pr&nJ;-sjZD(9-B&K;t8NUPG|udZBpdq{DO%$y?CQ>$6^Ysq(^?5+XhC?(#% zYZ&KzE1g4+t!C_LLhl$Fg8gbeRFI&qvI@u0-paH)q@wZg;NvD+(C%|+o?_RF04?kMPx;y6aZd#p@0U3@FN+{gOLsTc z%T>0@5xvsdMei-G#o~76DEOYPvZ28PJ{xwj^EfTl8oR#5ayzTp{8E8YqfabwQ$n5ihpP$% z%u*J&KC25pmpcJCh$Q@C4C&ta=wo{NXo%yoD9^EJP72!V>S&4p@U3duD&S&;w5~Ci z(rjtTg;DvSjGp}3@W=S;h~Cu$Fg%rlKF9Z$)34Lk%~+=tDNG7)pha+U&e2b{*gzTC z;KCAFt&az<5WN-UYg8|D@s61)z@oeDd;-`)DrLqqX zf7(J{DVi~wb1=#Bn;HCe-zg>61kOxh+^vIx_H2?=Oa)m3Zoh6;9bd>4rysMPMUErC zA}O`u;IYkgIWq$=FlJmi~!uzrr?AV$}Q`6xU-2HnTD&3E14flTo7X|#rbRMK1m`@`ODf5?6RqvzZBb``flZ#L-Ke~$yY6iiL zdr8j*teKD?5*fcS`p>e(=0`2#(*BsL52{O~ ztK?K?C7&|1YQlPsv%9J#=jwNU9!NbSEH;Q=@45FS2MfJMnpYns5E*~|J#5L~Evnb| zJ5oukaV+bvmxC@_en` z`kIg%fo#46B}(32nT6kqI<4#pS6%vHF z^`PZ`gO^5n)LQIhl-axIwU4%1S0ZLN5T#ddTJ!61s6YZX@V$BpSLm694zOveUDj*O zMF5TG+cs-HGl=(wj#dSjje+Y%7kJmB5B9W;_Md4Tws{O!!>xy$8&V5=s_)qbh&~A}5`b==g$Z&@T_Ee~#yr$!rf& z+u-t5=WDNalMD}5bWN_c+KE-@C<1ZX^v=R-(q;xc!7oHZAEvJ8Khe>U|HWU+GFXALkU9 zSBz;K)I9Aez~6QNaFZTM?H0}%lP0LR-cBeLvkj>So{JVvlT_7}u5QFEOJ6Y?;1e)MqjNZS>#G2ACo3nd7Cmx%bEGE;F1n#iXBAUa~jFB?1I+QbV zUr&AG_#UoXdNs3n^Gri&WU5=%9jgmx_U9MHX8^zcu1y>`^N6$9E3l@}?c_Dr)cp3|%et#1`gHTrrR~_& zG`^5ud*Qd+kvE6NKaI;#fu|8>t3_nZW1d1{7sXj$2a0F99=o=6U6*Ti^k-GPoThM$ zY3Hg5)*b5;h5X&y*5qn*B z9xu~@izH&dLVofmJo9CstCUzmb8X?$08>rL>|ZaE+HqUQuzmLnx2|`+bxA zx7d}2q2}`9ljY!b(shseS)C^{8Wlbt7@OOsB_R*TU97~}{Jx{@4^6(^|M$WH$u7#w znBhmh&+*-+00OnNnV-*DTmey;l5{B>4~2VDqcf$OiyIoHe}5+V6$KA;)X*Vcb>Zsw zzfZ)am=%F7Q(#5--z7OD2+f!?gRyIUJ$;MWY`gFWJH ztx1paS{~WDQqEA#>d6 z1eO$u;=;kd7s@pdjZq}+`Pkk!lxKXB`Ee;>;Ky4{cG7^KtF!xtMuD6Z|D<;&=D}Mb{X3EUxxp#mje)Jwpj?Q_U)DpC`B5IU1ABHCa5~5kr&?x`g`^>y?y@Sgkr#oWo((am5mU<1d-y?@@knXMKBv$X>B-_1TO`$3-XQdf0ZO#N)U z(t04Xeg-7bbvx*|-_qVlqbv$Z-~i=o@5yds^iXG03jEaL+kFxO^UXH zN>L2fRbu%19Pj(!hTYEx$qGm-w zrzK^27EFyFpD`*7Q12xmSeBQ?&M*W}^wi1Z3?i;^s$;4NC1bagNJ^3cjB0c}e8izJ zu~WLkInebms2ctV33gL13g$3gIpevi8z7C9^l3HPF?L4plH$kvDY)oLSy+Lu>p=Wr zq3ENYs0o%&J|p0IZlO)<&UFgXO27YBHg#mex66M6ra2aH;VvhAmY$^hY)U39k{>TE zAP#YgMX<$8^kD0UTX~FQ&PM9XnqMs&4M><_!&S+5PfKuqWf1vD6mo zlcE-2yr5N<7rDrXlpeI_T(QnLbW;)LwKPWFeg^md0Z*O?8k_&?kFvVKw*W2&c7NHs zVvD%|PoiP`6VMRRFv(eU6`8J8B!)LlW~_FZ!oBN0zQeVfN4#aO>tj>j;93cPPK1%L z`JKt$+iM8#SjOhffoFJ&k6Y-67o@8m1~ZR}0FPL=XV$SNgL;3WCgk^gom|vMBh#Um zEx68jt&cHEh?LhW5DmfK!awmdLpyIIqBVn7f`q@}sgNQ4o5J2os$Qa z)(DzB4{Ji1A-#oKfce)7PK|BHb+Vr+EU2g}J9+(He!`1UaHTK~=bL3u=0;4cyK7m7 zLik2RRLgBC)VHi=*`u#lH2Q$| zHM7UgGkeIa0a+>^>3VkG4wE%#NFxL= z?xZGGl5$_nC|Kao|LiT$0PMsG9r^>ESvT%f4wm-CBN2?hYlk-4stdjf)$cLE>Q)c& ztxTElv2RNxwYfSk_wPl$2o+w+D3x`wpEI`jai@$1U?V5Husd3)4p&82hnbJ<8j^Lf zbw?`~`MDJA%-FJR&)7j30gxPAQD|$i-T1GU_-m_C!UUs=mQ@Kr-Ln8ZdDtF36kKEY zu@?*%OM9b= 0: + word = text[start:i] + tokens.append({ + 'word': word, + 'original_text': word, + 'char_begin': start, + 'char_end': i}) + start = -1 + else: + if start < 0: + start = i + if start >= 0: + tokens.append({ + 'word': text[start:len(text)], + 'original_text': text[start:len(text)], + 'char_begin': start, + 'char_end': len(text) + }) + return tokens + + +def load_from_file(path, fmt=None, is_training=True): + ''' + load data from file + ''' + if fmt is None: + fmt = 'squad' + assert fmt in ['squad', 'csv'], 'input format must be squad or csv' + qp_pairs = [] + if fmt == 'squad': + with open(path) as data_file: + data = json.load(data_file)['data'] + for doc in data: + for paragraph in doc['paragraphs']: + passage = paragraph['context'] + for qa_pair in paragraph['qas']: + question = qa_pair['question'] + qa_id = qa_pair['id'] + if not is_training: + qp_pairs.append( + {'passage': passage, 'question': question, 'id': qa_id}) + else: + for answer in qa_pair['answers']: + answer_begin = int(answer['answer_start']) + answer_end = answer_begin + len(answer['text']) + qp_pairs.append({'passage': passage, + 'question': question, + 'id': qa_id, + 'answer_begin': answer_begin, + 'answer_end': answer_end}) + else: + with open(path, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + line_num = 0 + for row in reader: + qp_pairs.append( + {'passage': row[1], 'question': row[0], 'id': line_num}) + line_num += 1 + return qp_pairs + + +def tokenize(qp_pair, tokenizer=None, is_training=False): + ''' + tokenize function. + ''' + question_tokens = tokenizer.tokenize(qp_pair['question']) + passage_tokens = tokenizer.tokenize(qp_pair['passage']) + if is_training: + question_tokens = question_tokens[:300] + passage_tokens = passage_tokens[:300] + passage_tokens.insert( + 0, {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + passage_tokens.append( + {'word': '', 'original_text': '', 'char_begin': 0, 'char_end': 0}) + qp_pair['question_tokens'] = question_tokens + qp_pair['passage_tokens'] = passage_tokens + + +def collect_vocab(qp_pairs): + ''' + Build the vocab from corpus. + ''' + vocab = set() + for qp_pair in qp_pairs: + for word in qp_pair['question_tokens']: + vocab.add(word['word']) + for word in qp_pair['passage_tokens']: + vocab.add(word['word']) + return vocab + + +def shuffle_step(entries, step): + ''' + Shuffle the step + ''' + answer = [] + for i in range(0, len(entries), step): + sub = entries[i:i+step] + shuffle(sub) + answer += sub + return answer + + +def get_batches(qp_pairs, batch_size, need_sort=True): + ''' + Get batches data and shuffle. + ''' + if need_sort: + qp_pairs = sorted(qp_pairs, key=lambda qp: ( + len(qp['passage_tokens']), qp['id']), reverse=True) + batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]} + for i in range(0, len(qp_pairs), batch_size)] + shuffle(batches) + return batches + + +def get_char_input(data, char_dict, max_char_length): + ''' + Get char input. + ''' + batch_size = len(data) + sequence_length = max(len(d) for d in data) + char_id = np.zeros((max_char_length, sequence_length, + batch_size), dtype=np.int32) + char_lengths = np.zeros((sequence_length, batch_size), dtype=np.float32) + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'] + char_lengths[sample_idx, batch_idx] = min( + len(word), max_char_length) + for i in range(0, min(len(word), max_char_length)): + char_id[i, sample_idx, batch_idx] = get_id(char_dict, word[i]) + return char_id, char_lengths + + +def get_word_input(data, word_dict, embed, embed_dim): + ''' + Get word input. + ''' + batch_size = len(data) + max_sequence_length = max(len(d) for d in data) + sequence_length = max_sequence_length + word_input = np.zeros((max_sequence_length, batch_size, + embed_dim), dtype=np.float32) + ids = np.zeros((sequence_length, batch_size), dtype=np.int32) + masks = np.zeros((sequence_length, batch_size), dtype=np.float32) + lengths = np.zeros([batch_size], dtype=np.int32) + + for batch_idx in range(0, min(len(data), batch_size)): + batch_data = data[batch_idx] + + lengths[batch_idx] = len(batch_data) + + for sample_idx in range(0, min(len(batch_data), sequence_length)): + word = batch_data[sample_idx]['word'].lower() + if word in word_dict.keys(): + word_input[sample_idx, batch_idx] = embed[word_dict[word]] + ids[sample_idx, batch_idx] = word_dict[word] + masks[sample_idx, batch_idx] = 1 + + word_input = np.reshape(word_input, (-1, embed_dim)) + return word_input, ids, masks, lengths + + +def get_word_index(tokens, char_index): + ''' + Given word return word index. + ''' + for (i, token) in enumerate(tokens): + if token['char_end'] == 0: + continue + if token['char_begin'] <= char_index and char_index <= token['char_end']: + return i + return 0 + + +def get_answer_begin_end(data): + ''' + Get answer's index of begin and end. + ''' + begin = [] + end = [] + for qa_pair in data: + tokens = qa_pair['passage_tokens'] + char_begin = qa_pair['answer_begin'] + char_end = qa_pair['answer_end'] + word_begin = get_word_index(tokens, char_begin) + word_end = get_word_index(tokens, char_end) + begin.append(word_begin) + end.append(word_end) + return np.asarray(begin), np.asarray(end) + + +def get_id(word_dict, word): + ''' + Given word, return word id. + ''' + if word in word_dict.keys(): + return word_dict[word] + return word_dict[''] + + +def get_buckets(min_length, max_length, bucket_count): + ''' + Get bucket by length. + ''' + if bucket_count <= 0: + return [max_length] + unit_length = int((max_length - min_length) // (bucket_count)) + buckets = [min_length + unit_length * + (i + 1) for i in range(0, bucket_count)] + buckets[-1] = max_length + return buckets + + +def find_bucket(length, buckets): + ''' + Find bucket. + ''' + for bucket in buckets: + if length <= bucket: + return bucket + return buckets[-1] diff --git a/examples/trials/weight_sharing/ga_squad/download.sh b/examples/trials/weight_sharing/ga_squad/download.sh new file mode 100644 index 0000000000..308fbaedbf --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/download.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json +wget http://nlp.stanford.edu/data/glove.840B.300d.zip +unzip glove.840B.300d.zip \ No newline at end of file diff --git a/examples/trials/weight_sharing/ga_squad/evaluate.py b/examples/trials/weight_sharing/ga_squad/evaluate.py new file mode 100644 index 0000000000..6db1abbc99 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/evaluate.py @@ -0,0 +1,174 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Evaluation scripts for QA model. +''' + +from __future__ import print_function +from collections import Counter +import string +import re +import argparse +import json +import sys + + +def normalize_answer(str_input): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + ''' + Remove "a|an|the" + ''' + return re.sub(r'\b(a|an|the)\b', ' ', text) + + def white_space_fix(text): + ''' + Remove unnessary whitespace + ''' + return ' '.join(text.split()) + + def remove_punc(text): + ''' + Remove punc + ''' + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + ''' + Change string to lower form. + ''' + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(str_input)))) + + +def f1_score(prediction, ground_truth): + ''' + Calculate the f1 score. + ''' + prediction_tokens = normalize_answer(prediction).split() + ground_truth_tokens = normalize_answer(ground_truth).split() + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + if not prediction_tokens: + raise ValueError("empty prediction tokens") + precision = 1.0 * num_same / len(prediction_tokens) + + if not ground_truth_tokens: + raise ValueError("empty groundtruth tokens") + recall = 1.0 * num_same / len(ground_truth_tokens) + f1_result = (2 * precision * recall) / (precision + recall + 1e-10) + return f1_result + + +def exact_match_score(prediction, ground_truth): + ''' + Calculate the match score with prediction and ground truth. + ''' + return normalize_answer(prediction) == normalize_answer(ground_truth) + + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + ''' + Metric max over the ground truths. + ''' + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def _evaluate(dataset, predictions): + ''' + Evaluate function. + ''' + f1_result = exact_match = total = 0 + count = 0 + for article in dataset: + for paragraph in article['paragraphs']: + for qa_pair in paragraph['qas']: + total += 1 + if qa_pair['id'] not in predictions: + count += 1 + continue + ground_truths = list( + map(lambda x: x['text'], qa_pair['answers'])) + prediction = predictions[qa_pair['id']] + exact_match += metric_max_over_ground_truths( + exact_match_score, prediction, ground_truths) + f1_result += metric_max_over_ground_truths( + f1_score, prediction, ground_truths) + print('total', total, 'exact_match', + exact_match, 'unanswer_question ', count) + exact_match = 100.0 * exact_match / total + f1_result = 100.0 * f1_result / total + return {'exact_match': exact_match, 'f1': f1_result} + + +def evaluate(data_file, pred_file): + ''' + Evaluate. + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + with open(pred_file) as prediction_file: + predictions = json.load(prediction_file) + # print(json.dumps(evaluate(dataset, predictions))) + result = _evaluate(dataset, predictions) + # print('em:', result['exact_match'], 'f1:', result['f1']) + return result['exact_match'] + + +def evaluate_with_predictions(data_file, predictions): + ''' + Evalutate with predictions/ + ''' + expected_version = '1.1' + with open(data_file) as dataset_file: + dataset_json = json.load(dataset_file) + if dataset_json['version'] != expected_version: + print('Evaluation expects v-' + expected_version + + ', but got dataset with v-' + dataset_json['version'], + file=sys.stderr) + dataset = dataset_json['data'] + result = _evaluate(dataset, predictions) + return result['exact_match'] + + +if __name__ == '__main__': + EXPECT_VERSION = '1.1' + parser = argparse.ArgumentParser( + description='Evaluation for SQuAD ' + EXPECT_VERSION) + parser.add_argument('dataset_file', help='Dataset file') + parser.add_argument('prediction_file', help='Prediction File') + args = parser.parse_args() + print(evaluate(args.dataset_file, args.prediction_file)) diff --git a/examples/trials/weight_sharing/ga_squad/graph.py b/examples/trials/weight_sharing/ga_squad/graph.py new file mode 100644 index 0000000000..8e675a06ff --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/graph.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +Graph is customed-define class, this module contains related class and function about graph. +''' + + +import copy +import hashlib +import logging +import json +import random +from collections import deque +from enum import Enum, unique +from typing import Iterable + +import numpy as np + +_logger = logging.getLogger('ga_squad_graph') + +@unique +class LayerType(Enum): + ''' + Layer type + ''' + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + ''' + Layer class, which contains the information of graph. + ''' + def __init__(self, graph_type, inputs=None, output=None, size=None, hash_id=None): + self.input = inputs if inputs is not None else [] + self.output = output if output is not None else [] + self.graph_type = graph_type + self.is_delete = False + self.size = size + self.hash_id = hash_id + if graph_type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif graph_type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + if self.hash_id is None: + hasher = hashlib.md5() + hasher.update(np.random.bytes(100)) + self.hash_id = hasher.hexdigest() + elif graph_type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + raise ValueError('Unsupported LayerType: {}'.format(graph_type)) + + def update_hash(self, layers: Iterable): + """ + Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers + """ + if self.graph_type == LayerType.input.value: + return + hasher = hashlib.md5() + hasher.update(LayerType(self.graph_type).name.encode('ascii')) + hasher.update(str(self.size).encode('ascii')) + for i in self.input: + if layers[i].hash_id is None: + raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) + hasher.update(layers[i].hash_id.encode('ascii')) + self.hash_id = hasher.hexdigest() + + def set_size(self, graph_id, size): + ''' + Set size. + ''' + if self.graph_type == LayerType.attention.value: + if self.input[0] == graph_id: + self.size = size + if self.graph_type == LayerType.rnn.value: + self.size = size + if self.graph_type == LayerType.self_attention.value: + self.size = size + if self.graph_type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + ''' + Clear size + ''' + if self.graph_type == LayerType.attention.value or \ + LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str(self.graph_type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + ''' + Dump the graph. + ''' + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(graph_json): + ''' + Load graph + ''' + layers = [] + for layer in graph_json['layers']: + layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) + layer_info.is_delete = layer['is_delete'] + _logger.debug('append layer {}'.format(layer_info)) + layers.append(layer_info) + graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) + graph.layers = layers + _logger.debug('graph {} loaded'.format(graph)) + return graph + +class Graph(object): + ''' + Customed Graph class. + ''' + def __init__(self, max_layer_num, min_layer_num, inputs, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + self.min_layer_num = min_layer_num + assert min_layer_num < max_layer_num + + for layer in inputs: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + ''' + valid the topology + ''' + if layers is None: + layers = self.layers + layers_nodle = [] + result = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive is True: + for layer2 in layers[layer1].output: + # Size is error + if layers[layer2].set_size(layer1, layers[layer1].size) is False: + return False + layers_toremove.append(layer1) + result.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + result.append('|') + if flag_break: + break + # There is loop in graph || some layers can't to arrive + if layers_nodle: + return False + return result + + def layer_num(self, layers=None): + ''' + Reutn number of layer. + ''' + if layers is None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete is False and layer.graph_type != LayerType.input.value\ + and layer.graph_type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + ''' + Judge whether is legal for layers + ''' + if layers is None: + layers = self.layers + + for layer in layers: + if layer.is_delete is False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + # There is loop in graph || some layers can't to arrive + if self.is_topology(layers) is False: + return False + + return True + + def update_hash(self): + """ + update hash id of each layer, in topological order/recursively + hash id will be used in weight sharing + """ + _logger.debug('update hash') + layer_in_cnt = [len(layer.input) for layer in self.layers] + topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value]) + while topo_queue: + layer_i = topo_queue.pop() + self.layers[layer_i].update_hash(self.layers) + for layer_j in self.layers[layer_i].output: + layer_in_cnt[layer_j] -= 1 + if layer_in_cnt[layer_j] == 0: + topo_queue.appendleft(layer_j) + + def mutation(self, only_add=False): + ''' + Mutation for a graph + ''' + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > self.min_layer_num and only_add is False: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + graph_type = random.choice(types) + layer_type = random.choice([LayerType.attention.value,\ + LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + if layer.graph_type != LayerType.output.value: + layers_in.append(i) + if layer.graph_type != LayerType.input.value: + layers_out.append(i) + if layer.graph_type != LayerType.output.value\ + and layer.graph_type != LayerType.input.value: + layers_del.append(i) + if graph_type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + inputs = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if graph_type == 0: + layers[last_in].output.remove(out) + if graph_type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + inputs = [last_in] + lay = Layer(graph_type=layer_type, inputs=inputs, output=output) + while len(inputs) < lay.input_size: + layer1 = random.choice(layers_in) + inputs.append(layer1) + layers[layer1].output.append(new_id) + lay.input = inputs + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if graph_type == 2: + random_in = random.choice(layers_in) + else: + random_in = random.choice(layers[layer1].input) + layers[layer2].input.append(random_in) + layers[random_in].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + self.update_hash() + + def __str__(self): + info = "" + for l_id, layer in enumerate(self.layers): + if layer.is_delete is False: + info += 'id:%d ' % l_id + str(layer) + '\n' + return info diff --git a/examples/trials/weight_sharing/ga_squad/graph_to_tf.py b/examples/trials/weight_sharing/ga_squad/graph_to_tf.py new file mode 100644 index 0000000000..2712d531ca --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/graph_to_tf.py @@ -0,0 +1,342 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from rnn import XGRUCell +from util import dropout +from graph import LayerType + + +def normalize(inputs, + epsilon=1e-8, + scope="ln"): + '''Applies layer normalization. + + Args: + inputs: A tensor with 2 or more dimensions, where the first dimension has + `batch_size`. + epsilon: A floating number. A very small number for preventing ZeroDivision Error. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A tensor with the same shape and data dtype as `inputs`. + ''' + with tf.variable_scope(scope): + inputs_shape = inputs.get_shape() + params_shape = inputs_shape[-1:] + + mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) + beta = tf.Variable(tf.zeros(params_shape)) + gamma = tf.Variable(tf.ones(params_shape)) + normalized = (inputs - mean) / ((variance + epsilon) ** (.5)) + outputs = gamma * normalized + beta + + return outputs + + +def multihead_attention(queries, + keys, + scope="multihead_attention", + num_units=None, + num_heads=4, + dropout_rate=0, + is_training=True, + causality=False): + '''Applies multihead attention. + + Args: + queries: A 3d tensor with shape of [N, T_q, C_q]. + keys: A 3d tensor with shape of [N, T_k, C_k]. + num_units: A cdscalar. Attention size. + dropout_rate: A floating point number. + is_training: Boolean. Controller of mechanism for dropout. + causality: Boolean. If true, units that reference the future are masked. + num_heads: An int. Number of heads. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns + A 3d tensor with shape of (N, T_q, C) + ''' + global look5 + with tf.variable_scope(scope): + # Set the fall back option for num_units + if num_units is None: + num_units = queries.get_shape().as_list()[-1] + + Q_ = [] + K_ = [] + V_ = [] + for head_i in range(num_heads): + Q = tf.layers.dense(queries, num_units / num_heads, + activation=tf.nn.relu, name='Query' + str(head_i)) # (N, T_q, C) + K = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu, name='Key' + str(head_i)) # (N, T_k, C) + V = tf.layers.dense(keys, num_units / num_heads, + activation=tf.nn.relu, name='Value' + str(head_i)) # (N, T_k, C) + Q_.append(Q) + K_.append(K) + V_.append(V) + + # Split and concat + Q_ = tf.concat(Q_, axis=0) # (h*N, T_q, C/h) + K_ = tf.concat(K_, axis=0) # (h*N, T_k, C/h) + V_ = tf.concat(V_, axis=0) # (h*N, T_k, C/h) + + # Multiplication + outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) + + # Scale + outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) + + # Key Masking + key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) + key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) + key_masks = tf.tile(tf.expand_dims(key_masks, 1), + [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(outputs) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(key_masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Causality = Future blinding + if causality: + diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) + tril = tf.contrib.linalg.LinearOperatorTriL( + diag_vals).to_dense() # (T_q, T_k) + masks = tf.tile(tf.expand_dims(tril, 0), + [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) + + paddings = tf.ones_like(masks) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(masks, 0), paddings, + outputs) # (h*N, T_q, T_k) + + # Activation + look5 = outputs + outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) + + # Query Masking + query_masks = tf.sign( + tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) + query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) + query_masks = tf.tile(tf.expand_dims( + query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) + outputs *= query_masks # broadcasting. (N, T_q, C) + + # Dropouts + outputs = dropout(outputs, dropout_rate, is_training) + + # Weighted sum + outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) + + # Restore shape + outputs = tf.concat(tf.split(outputs, num_heads, + axis=0), axis=2) # (N, T_q, C) + + # Residual connection + if queries.get_shape().as_list()[-1] == num_units: + outputs += queries + + # Normalize + outputs = normalize(outputs, scope=scope) # (N, T_q, C) + + return outputs + + +def positional_encoding(inputs, + num_units=None, + zero_pad=True, + scale=True, + scope="positional_encoding", + reuse=None): + ''' + Return positinal embedding. + ''' + Shape = tf.shape(inputs) + N = Shape[0] + T = Shape[1] + num_units = Shape[2] + with tf.variable_scope(scope, reuse=reuse): + position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) + + # First part of the PE function: sin and cos argument + # Second part, apply the cosine to even columns and sin to odds. + X = tf.expand_dims(tf.cast(tf.range(T), tf.float32), axis=1) + Y = tf.expand_dims( + tf.cast(10000 ** -(2 * tf.range(num_units) / num_units), tf.float32), axis=0) + h1 = tf.cast((tf.range(num_units) + 1) % 2, tf.float32) + h2 = tf.cast((tf.range(num_units) % 2), tf.float32) + position_enc = tf.multiply(X, Y) + position_enc = tf.sin(position_enc) * tf.multiply(tf.ones_like(X), h1) + \ + tf.cos(position_enc) * tf.multiply(tf.ones_like(X), h2) + + # Convert to a tensor + lookup_table = position_enc + + if zero_pad: + lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), + lookup_table[1:, :]), 0) + outputs = tf.nn.embedding_lookup(lookup_table, position_ind) + + if scale: + outputs = outputs * tf.sqrt(tf.cast(num_units, tf.float32)) + + return outputs + + +def feedforward(inputs, + num_units, + scope="multihead_attention"): + '''Point-wise feed forward net. + + Args: + inputs: A 3d tensor with shape of [N, T, C]. + num_units: A list of two integers. + scope: Optional scope for `variable_scope`. + reuse: Boolean, whether to reuse the weights of a previous layer + by the same name. + + Returns: + A 3d tensor with the same shape and dtype as inputs + ''' + with tf.variable_scope(scope): + # Inner layer + params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, + "activation": tf.nn.relu, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Readout layer + params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, + "activation": None, "use_bias": True} + outputs = tf.layers.conv1d(**params) + + # Residual connection + outputs += inputs + + # Normalize + outputs = normalize(outputs) + + return outputs + + +def rnn(input_states, sequence_lengths, dropout_rate, is_training, num_units): + layer_cnt = 1 + states = [] + xs = tf.transpose(input_states, perm=[1, 0, 2]) + for i in range(0, layer_cnt): + xs = dropout(xs, dropout_rate, is_training) + with tf.variable_scope('layer_' + str(i)): + cell_fw = XGRUCell(num_units) + cell_bw = XGRUCell(num_units) + outputs, _ = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + dtype=tf.float32, + sequence_length=sequence_lengths, + inputs=xs, + time_major=True) + + y_lr, y_rl = outputs + xs = tf.concat([y_lr, y_rl], 2) + states.append(xs) + + return tf.transpose(dropout(tf.concat(states, axis=2), + dropout_rate, + is_training), perm=[1, 0, 2]) + + +def graph_to_network(input1, + input2, + input1_lengths, + input2_lengths, + p_graph, + dropout_rate, + is_training, + num_heads=1, + rnn_units=256): + topology = p_graph.is_topology() + layers = dict() + layers_sequence_lengths = dict() + num_units = input1.get_shape().as_list()[-1] + layers[0] = input1*tf.sqrt(tf.cast(num_units, tf.float32)) + \ + positional_encoding(input1, scale=False, zero_pad=False) + layers[1] = input2*tf.sqrt(tf.cast(num_units, tf.float32)) + layers[0] = dropout(layers[0], dropout_rate, is_training) + layers[1] = dropout(layers[1], dropout_rate, is_training) + layers_sequence_lengths[0] = input1_lengths + layers_sequence_lengths[1] = input2_lengths + for _, topo_i in enumerate(topology): + if topo_i == '|': + continue + + # Note: here we use the `hash_id` of layer as scope name, + # so that we can automatically load sharable weights from previous trained models + with tf.variable_scope(p_graph.layers[topo_i].hash_id, reuse=tf.AUTO_REUSE): + if p_graph.layers[topo_i].graph_type == LayerType.input.value: + continue + elif p_graph.layers[topo_i].graph_type == LayerType.attention.value: + with tf.variable_scope('attention'): + layer = multihead_attention(layers[p_graph.layers[topo_i].input[0]], + layers[p_graph.layers[topo_i].input[1]], + scope="multihead_attention", + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward", + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.self_attention.value: + with tf.variable_scope('self-attention'): + layer = multihead_attention(layers[p_graph.layers[topo_i].input[0]], + layers[p_graph.layers[topo_i].input[0]], + scope="multihead_attention", + dropout_rate=dropout_rate, + is_training=is_training, + num_heads=num_heads, + num_units=rnn_units * 2) + layer = feedforward(layer, scope="feedforward", + num_units=[rnn_units * 2 * 4, rnn_units * 2]) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.rnn.value: + with tf.variable_scope('rnn'): + layer = rnn(layers[p_graph.layers[topo_i].input[0]], + layers_sequence_lengths[p_graph.layers[topo_i].input[0]], + dropout_rate, + is_training, + rnn_units) + layers[topo_i] = layer + layers_sequence_lengths[topo_i] = layers_sequence_lengths[ + p_graph.layers[topo_i].input[0]] + elif p_graph.layers[topo_i].graph_type == LayerType.output.value: + layers[topo_i] = layers[p_graph.layers[topo_i].input[0]] + if layers[topo_i].get_shape().as_list()[-1] != rnn_units * 1 * 2: + with tf.variable_scope('add_dense'): + layers[topo_i] = tf.layers.dense( + layers[topo_i], units=rnn_units*2) + return layers[2], layers[3] diff --git a/examples/trials/weight_sharing/ga_squad/rnn.py b/examples/trials/weight_sharing/ga_squad/rnn.py new file mode 100644 index 0000000000..82f7d070bf --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/rnn.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import tensorflow as tf +from tensorflow.python.ops.rnn_cell_impl import RNNCell + + +class GRU: + ''' + GRU class. + ''' + def __init__(self, name, input_dim, hidden_dim): + self.name = '/'.join([name, 'gru']) + self.input_dim = input_dim + self.hidden_dim = hidden_dim + self.w_matrix = None + self.U = None + self.bias = None + + def define_params(self): + ''' + Define parameters. + ''' + input_dim = self.input_dim + hidden_dim = self.hidden_dim + prefix = self.name + self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'W'])) + self.U = tf.Variable(tf.random_normal([hidden_dim, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'U'])) + self.bias = tf.Variable(tf.random_normal([1, 3 * hidden_dim], stddev=0.1), + name='/'.join([prefix, 'b'])) + return self + + def build(self, x, h, mask=None): + ''' + Build the GRU cell. + ''' + xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1) + hu = tf.split(tf.matmul(h, self.U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = tf.tanh(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + h * z + if mask is not None: + next_h = next_h * mask + h * (1 - mask) + return next_h + + def build_sequence(self, xs, masks, init, is_left_to_right): + ''' + Build GRU sequence. + ''' + states = [] + last = init + if is_left_to_right: + for i, xs_i in enumerate(xs): + h = self.build(xs_i, last, masks[i]) + states.append(h) + last = h + else: + for i in range(len(xs) - 1, -1, -1): + h = self.build(xs[i], last, masks[i]) + states.insert(0, h) + last = h + return states + + +class XGRUCell(RNNCell): + + def __init__(self, hidden_dim, reuse=None): + super(XGRUCell, self).__init__(self, _reuse=reuse) + self._num_units = hidden_dim + self._activation = tf.tanh + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def call(self, inputs, state): + + input_dim = inputs.get_shape()[-1] + assert input_dim is not None, "input dimension must be defined" + W = tf.get_variable( + name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32) + U = tf.get_variable( + name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32) + b = tf.get_variable( + name='b', shape=[1, 3 * self._num_units], dtype=tf.float32) + + xw = tf.split(tf.matmul(inputs, W) + b, 3, 1) + hu = tf.split(tf.matmul(state, U), 3, 1) + r = tf.sigmoid(xw[0] + hu[0]) + z = tf.sigmoid(xw[1] + hu[1]) + h1 = self._activation(xw[2] + r * hu[2]) + next_h = h1 * (1 - z) + state * z + return next_h, next_h diff --git a/examples/trials/weight_sharing/ga_squad/train_model.py b/examples/trials/weight_sharing/ga_squad/train_model.py new file mode 100644 index 0000000000..b8240bc960 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/train_model.py @@ -0,0 +1,263 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Train the network combined by RNN and attention. +''' + +import tensorflow as tf + +from attention import DotAttention +from rnn import XGRUCell +from util import dropout +from graph_to_tf import graph_to_network + + +class GAGConfig: + """The class for model hyper-parameter configuration.""" + def __init__(self): + self.batch_size = 128 + + self.dropout = 0.1 + + self.char_vcb_size = 1500 + self.max_char_length = 20 + self.char_embed_dim = 100 + + self.max_query_length = 40 + self.max_passage_length = 800 + + self.att_is_vanilla = True + self.att_need_padding = False + self.att_is_id = False + + self.ptr_dim = 70 + self.learning_rate = 0.1 + self.labelsmoothing = 0.1 + self.num_heads = 1 + self.rnn_units = 256 + + +class GAG: + """The class for the computation graph based QA model.""" + def __init__(self, cfg, embed, p_graph): + self.cfg = cfg + self.embed = embed + self.graph = p_graph + + self.query_word = None + self.query_mask = None + self.query_lengths = None + self.passage_word = None + self.passage_mask = None + self.passage_lengths = None + self.answer_begin = None + self.answer_end = None + self.query_char_ids = None + self.query_char_lengths = None + self.passage_char_ids = None + self.passage_char_lengths = None + self.passage_states = None + self.query_states = None + self.query_init = None + self.begin_prob = None + self.end_prob = None + self.loss = None + self.train_op = None + + + def build_net(self, is_training): + """Build the whole neural network for the QA model.""" + cfg = self.cfg + word_embed = tf.get_variable( + name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False) + char_embed = tf.get_variable(name='char_embed', + shape=[cfg.char_vcb_size, + cfg.char_embed_dim], + dtype=tf.float32) + + # [query_length, batch_size] + self.query_word = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='query_word') + self.query_mask = tf.placeholder(dtype=tf.float32, + shape=[None, None], + name='query_mask') + # [batch_size] + self.query_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='query_lengths') + + # [passage_length, batch_size] + self.passage_word = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='passage_word') + self.passage_mask = tf.placeholder( + dtype=tf.float32, shape=[None, None], name='passage_mask') + # [batch_size] + self.passage_lengths = tf.placeholder( + dtype=tf.int32, shape=[None], name='passage_lengths') + + if is_training: + self.answer_begin = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_begin') + self.answer_end = tf.placeholder( + dtype=tf.int32, shape=[None], name='answer_end') + + self.query_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='query_char_ids') + # sequence_length, batch_size + self.query_char_lengths = tf.placeholder( + dtype=tf.int32, shape=[None, None], name='query_char_lengths') + + self.passage_char_ids = tf.placeholder(dtype=tf.int32, + shape=[ + self.cfg.max_char_length, None, None], + name='passage_char_ids') + # sequence_length, batch_size + self.passage_char_lengths = tf.placeholder(dtype=tf.int32, + shape=[None, None], + name='passage_char_lengths') + + query_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=False, + char_ids=self.query_char_ids, + char_lengths=self.query_char_lengths) + + passage_char_states = self.build_char_states(char_embed=char_embed, + is_training=is_training, + reuse=True, + char_ids=self.passage_char_ids, + char_lengths=self.passage_char_lengths) + + with tf.variable_scope("encoding") as scope: + query_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.query_word), query_char_states], axis=2) + scope.reuse_variables() + passage_states = tf.concat([tf.nn.embedding_lookup( + word_embed, self.passage_word), passage_char_states], axis=2) + passage_states = tf.transpose(passage_states, perm=[1, 0, 2]) + query_states = tf.transpose(query_states, perm=[1, 0, 2]) + self.passage_states = passage_states + self.query_states = query_states + + output, output2 = graph_to_network(passage_states, query_states, + self.passage_lengths, self.query_lengths, + self.graph, self.cfg.dropout, + is_training, num_heads=cfg.num_heads, + rnn_units=cfg.rnn_units) + + passage_att_mask = self.passage_mask + batch_size_x = tf.shape(self.query_lengths) + answer_h = tf.zeros( + tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0)) + + answer_context = tf.reduce_mean(output2, axis=1) + + query_init_w = tf.get_variable( + 'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim]) + self.query_init = query_init_w + answer_context = tf.matmul(answer_context, query_init_w) + + output = tf.transpose(output, perm=[1, 0, 2]) + + with tf.variable_scope('answer_ptr_layer'): + ptr_att = DotAttention('ptr', + hidden_dim=cfg.ptr_dim, + is_vanilla=self.cfg.att_is_vanilla, + is_identity_transform=self.cfg.att_is_id, + need_padding=self.cfg.att_need_padding) + answer_pre_compute = ptr_att.get_pre_compute(output) + ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim) + begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask, + answer_pre_compute, True) + att_state = ptr_att.get_att(output, begin_prob) + (_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h) + answer_context = answer_h + end_prob, end_logits = ptr_att.get_prob(output, answer_context, + passage_att_mask, answer_pre_compute, + True) + + self.begin_prob = tf.transpose(begin_prob, perm=[1, 0]) + self.end_prob = tf.transpose(end_prob, perm=[1, 0]) + begin_logits = tf.transpose(begin_logits, perm=[1, 0]) + end_logits = tf.transpose(end_logits, perm=[1, 0]) + + if is_training: + def label_smoothing(inputs, masks, epsilon=0.1): + """Modify target for label smoothing.""" + epsilon = cfg.labelsmoothing + num_of_channel = tf.shape(inputs)[-1] # number of channels + inputs = tf.cast(inputs, tf.float32) + return (((1 - epsilon) * inputs) + (epsilon / + tf.cast(num_of_channel, tf.float32))) * masks + cost1 = tf.reduce_mean( + tf.losses.softmax_cross_entropy(label_smoothing( + tf.one_hot(self.answer_begin, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits)) + cost2 = tf.reduce_mean( + tf.losses.softmax_cross_entropy( + label_smoothing(tf.one_hot(self.answer_end, + depth=tf.shape(self.passage_word)[0]), + tf.transpose(self.passage_mask, perm=[1, 0])), end_logits)) + + reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) + l2_loss = tf.reduce_sum(reg_ws) + loss = cost1 + cost2 + l2_loss + self.loss = loss + + optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate) + self.train_op = optimizer.minimize(self.loss) + + return tf.stack([self.begin_prob, self.end_prob]) + + def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): + """Build char embedding network for the QA model.""" + max_char_length = self.cfg.max_char_length + + inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), + self.cfg.dropout, is_training) + inputs = tf.reshape( + inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) + char_lengths = tf.reshape(char_lengths, shape=[-1]) + with tf.variable_scope('char_encoding', reuse=reuse): + cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) + _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( + cell_fw=cell_fw, + cell_bw=cell_bw, + sequence_length=char_lengths, + inputs=inputs, + time_major=True, + dtype=tf.float32 + ) + + left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) + + right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) + + states = tf.concat([left_right, right_left], axis=1) + out_shape = tf.shape(char_ids)[1:3] + out_shape = tf.concat([out_shape, tf.constant( + value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) + return tf.reshape(states, shape=out_shape) diff --git a/examples/trials/weight_sharing/ga_squad/trial.py b/examples/trials/weight_sharing/ga_squad/trial.py new file mode 100644 index 0000000000..bafe1e707a --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/trial.py @@ -0,0 +1,461 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import heapq +import json +import os +import pickle + +import logging +logger = logging.getLogger('ga_squad') + +import numpy as np +from tensorflow.train import init_from_checkpoint + +import graph + +from util import Timer + +import nni +import data +import evaluate +from train_model import * + + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + + +def get_config(): + ''' + Get config from arument parser. + ''' + parser = argparse.ArgumentParser( + description='This program is using genetic algorithm to search architecture for SQuAD.') + parser.add_argument('--input_file', type=str, + default='./train-v1.1.json', help='input file') + parser.add_argument('--dev_file', type=str, + default='./dev-v1.1.json', help='dev file') + parser.add_argument('--embedding_file', type=str, + default='./glove.840B.300d.txt', help='dev file') + parser.add_argument('--root_path', default='./data/', + type=str, help='Root path of models') + parser.add_argument('--batch_size', type=int, default=64, help='batch size') + parser.add_argument('--save_path', type=str, + default='./save', help='save path dir') + parser.add_argument('--learning_rate', type=float, default=0.0001, + help='set half of original learning rate reload data and train.') + parser.add_argument('--max_epoch', type=int, default=30) + parser.add_argument('--dropout_rate', type=float, + default=0.1, help='dropout_rate') + parser.add_argument('--labelsmoothing', type=float, + default=0.1, help='labelsmoothing') + parser.add_argument('--num_heads', type=int, default=1, help='num_heads') + parser.add_argument('--rnn_units', type=int, default=256, help='rnn_units') + + args = parser.parse_args() + return args + + +def get_id(word_dict, word): + ''' + Return word id. + ''' + if word in word_dict.keys(): + return word_dict[word] + return word_dict[''] + + +def load_embedding(path): + ''' + return embedding for a specif file by given file path. + ''' + EMBEDDING_DIM = 300 + embedding_dict = {} + with open(path, 'r', encoding='utf-8') as file: + pairs = [line.strip('\r\n').split() for line in file.readlines()] + for pair in pairs: + if len(pair) == EMBEDDING_DIM + 1: + embedding_dict[pair[0]] = [float(x) for x in pair[1:]] + logger.debug('embedding_dict size: %d', len(embedding_dict)) + return embedding_dict + + +class MaxQueue: + ''' + Queue for max value. + ''' + + def __init__(self, capacity): + assert capacity > 0, 'queue size must be larger than 0' + self._capacity = capacity + self._entries = [] + + @property + def entries(self): + return self._entries + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._entries) + + def clear(self): + self._entries = [] + + def push(self, item): + if self.size < self.capacity: + heapq.heappush(self.entries, item) + else: + heapq.heappushpop(self.entries, item) + + +def find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length): + left = 0 + right = 0 + max_prob = left_prob[0] * right_prob[0] + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + if max_prob < total_prob: + left, right, max_prob = i, j, total_prob + return [(max_prob, left, right)] + + +def write_prediction(path, position1_result, position2_result): + import codecs + + with codecs.open(path, 'w', encoding='utf8') as file: + batch_num = len(position1_result) + for i in range(batch_num): + position1_batch = position1_result[i] + position2_batch = position2_result[i] + + for j in range(position1_batch.shape[0]): + file.write(str(position1_batch[j]) + + '\t' + str(position2_batch[j]) + '\n') + + +def find_kbest_answer_span(k, left_prob, right_prob, passage_length, max_answer_length): + if k == 1: + return find_best_answer_span(left_prob, right_prob, passage_length, max_answer_length) + + queue = MaxQueue(k) + for i in range(0, passage_length): + left_p = left_prob[i] + for j in range(i, min(i + max_answer_length, passage_length)): + total_prob = left_p * right_prob[j] + queue.push((total_prob, i, j)) + return list(sorted(queue.entries, key=lambda x: -x[0])) + + +def run_epoch(batches, answer_net, is_training): + if not is_training: + position1_result = [] + position2_result = [] + contexts = [] + ids = [] + + loss_sum = 0 + timer = Timer() + count = 0 + for batch in batches: + used = timer.get_elapsed(False) + count += 1 + qps = batch['qp_pairs'] + question_tokens = [qp['question_tokens'] for qp in qps] + passage_tokens = [qp['passage_tokens'] for qp in qps] + context = [(qp['passage'], qp['passage_tokens']) for qp in qps] + sample_id = [qp['id'] for qp in qps] + + _, query, query_mask, query_lengths = data.get_word_input( + data=question_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + _, passage, passage_mask, passage_lengths = data.get_word_input( + data=passage_tokens, word_dict=word_vcb, embed=embed, embed_dim=cfg.word_embed_dim) + + query_char, query_char_lengths = data.get_char_input( + data=question_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + passage_char, passage_char_lengths = data.get_char_input( + data=passage_tokens, char_dict=char_vcb, max_char_length=cfg.max_char_length) + + if is_training: + answer_begin, answer_end = data.get_answer_begin_end(qps) + + if is_training: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths, + answer_net.answer_begin: answer_begin, + answer_net.answer_end: answer_end} + loss, _, = sess.run( + [answer_net.loss, answer_net.train_op], feed_dict=feed_dict) + if count % 100 == 0: + logger.debug('%d %g except:%g, loss:%g' % + (count, used, used / count * len(batches), loss)) + loss_sum += loss + else: + feed_dict = {answer_net.query_word: query, + answer_net.query_mask: query_mask, + answer_net.query_lengths: query_lengths, + answer_net.passage_word: passage, + answer_net.passage_mask: passage_mask, + answer_net.passage_lengths: passage_lengths, + answer_net.query_char_ids: query_char, + answer_net.query_char_lengths: query_char_lengths, + answer_net.passage_char_ids: passage_char, + answer_net.passage_char_lengths: passage_char_lengths} + position1, position2 = sess.run( + [answer_net.begin_prob, answer_net.end_prob], feed_dict=feed_dict) + position1_result += position1.tolist() + position2_result += position2.tolist() + contexts += context + ids = np.concatenate((ids, sample_id)) + if count % 100 == 0: + logger.debug('%d %g except:%g' % + (count, used, used / count * len(batches))) + loss = loss_sum / len(batches) + if is_training: + return loss + return loss, position1_result, position2_result, ids, contexts + + +def generate_predict_json(position1_result, position2_result, ids, passage_tokens): + ''' + Generate json by prediction. + ''' + predict_len = len(position1_result) + logger.debug('total prediction num is %s', str(predict_len)) + + answers = {} + for i in range(predict_len): + sample_id = ids[i] + passage, tokens = passage_tokens[i] + kbest = find_best_answer_span( + position1_result[i], position2_result[i], len(tokens), 23) + _, start, end = kbest[0] + answer = passage[tokens[start]['char_begin']:tokens[end]['char_end']] + answers[sample_id] = answer + logger.debug('generate predict done.') + return answers + + +def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False): + ''' + Generate data + ''' + global root_path + qp_pairs = data.load_from_file(path=path, is_training=is_training) + + tokenized_sent = 0 + # qp_pairs = qp_pairs[:1000]1 + for qp_pair in qp_pairs: + tokenized_sent += 1 + data.tokenize(qp_pair, tokenizer, is_training) + for word in qp_pair['question_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + for word in qp_pair['passage_tokens']: + word_vcb.add(word['word']) + for char in word['word']: + char_vcb.add(char) + + max_query_length = max(len(x['question_tokens']) for x in qp_pairs) + max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs) + #min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs) + cfg.max_query_length = max_query_length + cfg.max_passage_length = max_passage_length + + return qp_pairs + + +def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): + ''' + Train a network from a specific graph. + ''' + global sess + with tf.Graph().as_default(): + train_model = GAG(cfg, embed, p_graph) + train_model.build_net(is_training=True) + tf.get_variable_scope().reuse_variables() + dev_model = GAG(cfg, embed, p_graph) + dev_model.build_net(is_training=False) + with tf.Session() as sess: + if restore_path is not None: + restore_mapping = dict(zip(restore_shared, restore_shared)) + logger.debug('init shared variables from {}, restore_scopes: {}'.format(restore_path, restore_shared)) + init_from_checkpoint(restore_path, restore_mapping) + logger.debug('init variables') + logger.debug(sess.run(tf.report_uninitialized_variables())) + init = tf.global_variables_initializer() + sess.run(init) + # writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph) + logger.debug('assign to graph') + + saver = tf.train.Saver() + train_loss = None + bestacc = 0 + patience = 5 + patience_increase = 2 + improvement_threshold = 0.995 + + for epoch in range(max_epoch): + logger.debug('begin to train') + train_batches = data.get_batches(qp_pairs, cfg.batch_size) + train_loss = run_epoch(train_batches, train_model, True) + logger.debug('epoch ' + str(epoch) + + ' loss: ' + str(train_loss)) + dev_batches = list(data.get_batches( + dev_qp_pairs, cfg.batch_size)) + _, position1, position2, ids, contexts = run_epoch( + dev_batches, dev_model, False) + + answers = generate_predict_json( + position1, position2, ids, contexts) + if save_path is not None: + logger.info('save prediction file to {}'.format(save_path)) + with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file: + json.dump(answers, file) + else: + answers = json.dumps(answers) + answers = json.loads(answers) + iter = epoch + 1 + + acc = evaluate.evaluate_with_predictions( + args.dev_file, answers) + + logger.debug('Send intermediate acc: %s', str(acc)) + nni.report_intermediate_result(acc) + + logger.debug('Send intermediate result done.') + + if acc > bestacc: + if acc * improvement_threshold > bestacc: + patience = max(patience, iter * patience_increase) + bestacc = acc + + if save_path is not None: + logger.info('save model & prediction to {}'.format(save_path)) + saver.save(sess, os.path.join(save_path, 'epoch%d.model' % epoch)) + with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: + pickle.dump( + (position1, position2, ids, contexts), file) + logger.debug('epoch %d acc %g bestacc %g' % + (epoch, acc, bestacc)) + if patience <= iter: + break + logger.debug('save done.') + return train_loss, bestacc + + +embed = None +char_vcb = None +tokenizer = None +word_vcb = None + + +def load_data(): + global embed, char_vcb, tokenizer, word_vcb + logger.debug('tokenize data') + tokenizer = data.WhitespaceTokenizer() + + char_set = set() + word_set = set() + logger.debug('generate train data') + qp_pairs = generate_data(input_file, tokenizer, + char_set, word_set, is_training=True) + logger.debug('generate dev data') + dev_qp_pairs = generate_data( + dev_file, tokenizer, char_set, word_set, is_training=False) + logger.debug('generate data done.') + + char_vcb = {char: sample_id for sample_id, char in enumerate(char_set)} + word_vcb = {word: sample_id for sample_id, word in enumerate(word_set)} + + timer.start() + logger.debug('read embedding table') + + cfg.word_embed_dim = 300 + embed = np.zeros((len(word_vcb), cfg.word_embed_dim), dtype=np.float32) + + embedding = load_embedding(args.embedding_file) + for word, sample_id in enumerate(word_vcb): + if word in embedding: + embed[sample_id] = embedding[word] + + # add UNK into dict + unk = np.zeros((1, cfg.word_embed_dim), dtype=np.float32) + embed = np.concatenate((unk, embed), axis=0) + word_vcb = {key: value + 1 for key, value in word_vcb.items()} + + return qp_pairs, dev_qp_pairs + + +if __name__ == '__main__': + try: + args = get_config() + + root_path = os.path.expanduser(args.root_path) + input_file = os.path.expanduser(args.input_file) + dev_file = os.path.expanduser(args.dev_file) + max_epoch = args.max_epoch + + cfg = GAGConfig() + cfg.batch_size = args.batch_size + cfg.learning_rate = float(args.learning_rate) + cfg.dropout = args.dropout_rate + cfg.rnn_units = args.rnn_units + cfg.labelsmoothing = args.labelsmoothing + cfg.num_heads = args.num_heads + timer = Timer() + + qp_pairs, dev_qp_pairs = load_data() + logger.debug('Init finish.') + + original_params = nni.get_next_parameter() + ''' + with open('data.json') as f: + original_params = json.load(f) + ''' + p_graph = graph.graph_loads(original_params['graph']) + save_path = original_params['save_dir'] + os.makedirs(save_path) + restore_path = original_params['restore_dir'] + restore_shared = [hash_id + '/' for hash_id in original_params['shared_id']] if original_params['shared_id'] is not None else [] + ['word_embed', 'char_embed', 'char_encoding/'] + train_loss, best_acc = train_with_graph(p_graph, qp_pairs, dev_qp_pairs) + + logger.debug('Send best acc: %s', str(best_acc)) + nni.report_final_result(best_acc) + logger.debug('Send final result done') + except: + logger.exception('Catch exception in trial.py.') + raise diff --git a/examples/trials/weight_sharing/ga_squad/util.py b/examples/trials/weight_sharing/ga_squad/util.py new file mode 100644 index 0000000000..ac9f363003 --- /dev/null +++ b/examples/trials/weight_sharing/ga_squad/util.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +''' +Util Module +''' + +import time + +import tensorflow as tf + + +def shape(tensor): + ''' + Get shape of variable. + Return type is tuple. + ''' + temp_s = tensor.get_shape() + return tuple([temp_s[i].value for i in range(0, len(temp_s))]) + + +def get_variable(name, temp_s): + ''' + Get variable by name. + ''' + return tf.Variable(tf.zeros(temp_s), name=name) + + +def dropout(tensor, drop_prob, is_training): + ''' + Dropout except test. + ''' + if not is_training: + return tensor + return tf.nn.dropout(tensor, 1.0 - drop_prob) + + +class Timer: + ''' + Class Timer is for calculate time. + ''' + def __init__(self): + self.__start = time.time() + + def start(self): + ''' + Start to calculate time. + ''' + self.__start = time.time() + + def get_elapsed(self, restart=True): + ''' + Calculate time span. + ''' + end = time.time() + span = end - self.__start + if restart: + self.__start = end + return span diff --git a/examples/tuners/ga_customer_tuner/customer_tuner.py b/examples/tuners/ga_customer_tuner/customer_tuner.py index 2cfae001e5..699df5eb0e 100644 --- a/examples/tuners/ga_customer_tuner/customer_tuner.py +++ b/examples/tuners/ga_customer_tuner/customer_tuner.py @@ -96,7 +96,7 @@ def generate_parameters(self, parameter_id): temp = json.loads(graph_dumps(indiv.config)) else: random.shuffle(self.population) - if self.population[0].result > self.population[1].result: + if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] indiv = copy.deepcopy(self.population[0]) self.population.pop(1) diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/README.md b/examples/tuners/weight_sharing/ga_customer_tuner/README.md new file mode 100644 index 0000000000..bc7a6f1f84 --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/README.md @@ -0,0 +1,15 @@ +# How to use ga_customer_tuner? +This tuner is a customized tuner which only suitable for trial whose code path is "~/nni/examples/trials/ga_squad", +type `cd ~/nni/examples/trials/ga_squad` and check readme.md to get more information for ga_squad trial. + +# config +If you want to use ga_customer_tuner in your experiment, you could set config file as following format: + +``` +tuner: + codeDir: ~/nni/examples/tuners/ga_customer_tuner + classFileName: customer_tuner.py + className: CustomerTuner + classArgs: + optimize_mode: maximize +``` \ No newline at end of file diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/__init__.py b/examples/tuners/weight_sharing/ga_customer_tuner/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py b/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py new file mode 100644 index 0000000000..86520b5220 --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py @@ -0,0 +1,224 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +import copy +import json +import logging +import random +import os + +from threading import Event, Lock, current_thread + +from nni.tuner import Tuner + +from graph import Graph, Layer, LayerType, Enum, graph_dumps, graph_loads, unique + +logger = logging.getLogger('ga_customer_tuner') + + +@unique +class OptimizeMode(Enum): + Minimize = 'minimize' + Maximize = 'maximize' + + + + +class Individual(object): + """ + Basic Unit for evolution algorithm + """ + def __init__(self, graph_cfg: Graph = None, info=None, result=None, indiv_id=None): + self.config = graph_cfg + self.result = result + self.info = info + self.indiv_id = indiv_id + self.parent_id = None + self.shared_ids = {layer.hash_id for layer in self.config.layers if layer.is_delete is False} + + def __str__(self): + return "info: " + str(self.info) + ", config :" + str(self.config) + ", result: " + str(self.result) + + def mutation(self, indiv_id: int, graph_cfg: Graph = None, info=None): + self.result = None + if graph_cfg is not None: + self.config = graph_cfg + self.config.mutation() + self.info = info + self.parent_id = self.indiv_id + self.indiv_id = indiv_id + self.shared_ids.intersection_update({layer.hash_id for layer in self.config.layers if layer.is_delete is False}) + + +class CustomerTuner(Tuner): + """ + NAS Tuner using Evolution Algorithm, with weight sharing enabled + """ + def __init__(self, optimize_mode, save_dir_root, population_size=32, graph_max_layer=6, graph_min_layer=3): + self.optimize_mode = OptimizeMode(optimize_mode) + self.indiv_counter = 0 + self.events = [] + self.thread_lock = Lock() + self.save_dir_root = save_dir_root + self.population = self.init_population(population_size, graph_max_layer, graph_min_layer) + assert len(self.population) == population_size + logger.debug('init population done.') + return + + def generate_new_id(self): + """ + generate new id and event hook for new Individual + """ + self.events.append(Event()) + indiv_id = self.indiv_counter + self.indiv_counter += 1 + return indiv_id + + def save_dir(self, indiv_id): + if indiv_id is None: + return None + else: + return os.path.join(self.save_dir_root, str(indiv_id)) + + def init_population(self, population_size, graph_max_layer, graph_min_layer): + """ + initialize populations for evolution tuner + """ + population = [] + graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer, + inputs=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], + output=[Layer(LayerType.output.value, inputs=[4], size='x'), Layer(LayerType.output.value, inputs=[5], size='y')], + hide=[Layer(LayerType.attention.value, inputs=[0, 1], output=[2]), + Layer(LayerType.attention.value, inputs=[1, 0], output=[3])]) + for _ in range(population_size): + graph_tmp = copy.deepcopy(graph) + graph_tmp.mutation() + population.append(Individual(indiv_id=self.generate_new_id(), graph_cfg=graph_tmp, result=None)) + return population + + def generate_parameters(self, parameter_id): + """Returns a set of trial graph config, as a serializable object. + An example configuration: + ```json + { + "shared_id": [ + "4a11b2ef9cb7211590dfe81039b27670", + "370af04de24985e5ea5b3d72b12644c9", + "11f646e9f650f5f3fedc12b6349ec60f", + "0604e5350b9c734dd2d770ee877cfb26", + "6dbeb8b022083396acb721267335f228", + "ba55380d6c84f5caeb87155d1c5fa654" + ], + "graph": { + "layers": [ + ... + { + "hash_id": "ba55380d6c84f5caeb87155d1c5fa654", + "is_delete": false, + "size": "x", + "graph_type": 0, + "output": [ + 6 + ], + "output_size": 1, + "input": [ + 7, + 1 + ], + "input_size": 2 + }, + ... + ] + }, + "restore_dir": "/mnt/nfs/nni/ga_squad/87", + "save_dir": "/mnt/nfs/nni/ga_squad/95" + } + ``` + `restore_dir` means the path in which to load the previous trained model weights. if null, init from stratch. + `save_dir` means the path to save trained model for current trial. + `graph` is the configuration of model network. + Note: each configuration of layers has a `hash_id` property, + which tells tuner & trial code whether to share trained weights or not. + `shared_id` is the hash_id of layers that should be shared with previously trained model. + """ + logger.debug('acquiring lock for param {}'.format(parameter_id)) + self.thread_lock.acquire() + logger.debug('lock for current thread acquired') + if not self.population: + logger.debug("the len of poplution lower than zero.") + raise Exception('The population is empty') + pos = -1 + for i in range(len(self.population)): + if self.population[i].result is None: + pos = i + break + if pos != -1: + indiv = copy.deepcopy(self.population[pos]) + self.population.pop(pos) + graph_param = json.loads(graph_dumps(indiv.config)) + else: + random.shuffle(self.population) + if self.population[0].result < self.population[1].result: + self.population[0] = self.population[1] + indiv = copy.deepcopy(self.population[0]) + self.population.pop(1) + indiv.mutation(indiv_id = self.generate_new_id()) + graph_param = json.loads(graph_dumps(indiv.config)) + param_json = { + 'graph': graph_param, + 'restore_dir': self.save_dir(indiv.parent_id), + 'save_dir': self.save_dir(indiv.indiv_id), + 'shared_id': list(indiv.shared_ids) if indiv.parent_id is not None else None, + } + logger.debug('generate_parameter return value is:') + logger.debug(param_json) + logger.debug('releasing lock') + self.thread_lock.release() + if indiv.parent_id is not None: + logger.debug("new trial {} pending on parent experiment {}".format(indiv.indiv_id, indiv.parent_id)) + self.events[indiv.parent_id].wait() + logger.debug("trial {} ready".format(indiv.indiv_id)) + return param_json + + def receive_trial_result(self, parameter_id, parameters, value): + ''' + Record an observation of the objective function + parameter_id : int + parameters : dict of parameters + value: final metrics of the trial, including reward + ''' + logger.debug('acquiring lock for param {}'.format(parameter_id)) + self.thread_lock.acquire() + logger.debug('lock for current acquired') + reward = self.extract_scalar_reward(value) + if self.optimize_mode is OptimizeMode.Minimize: + reward = -reward + + logger.debug('receive trial result is:\n') + logger.debug(str(parameters)) + logger.debug(str(reward)) + + indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]), + graph_cfg=graph_loads(parameters['graph']), result=reward) + self.population.append(indiv) + logger.debug('releasing lock') + self.thread_lock.release() + self.events[indiv.indiv_id].set() + + def update_search_space(self, data): + pass diff --git a/examples/tuners/weight_sharing/ga_customer_tuner/graph.py b/examples/tuners/weight_sharing/ga_customer_tuner/graph.py new file mode 100644 index 0000000000..8e675a06ff --- /dev/null +++ b/examples/tuners/weight_sharing/ga_customer_tuner/graph.py @@ -0,0 +1,336 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, +# to any person obtaining a copy of this software and associated +# documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +''' +Graph is customed-define class, this module contains related class and function about graph. +''' + + +import copy +import hashlib +import logging +import json +import random +from collections import deque +from enum import Enum, unique +from typing import Iterable + +import numpy as np + +_logger = logging.getLogger('ga_squad_graph') + +@unique +class LayerType(Enum): + ''' + Layer type + ''' + attention = 0 + self_attention = 1 + rnn = 2 + input = 3 + output = 4 + +class Layer(object): + ''' + Layer class, which contains the information of graph. + ''' + def __init__(self, graph_type, inputs=None, output=None, size=None, hash_id=None): + self.input = inputs if inputs is not None else [] + self.output = output if output is not None else [] + self.graph_type = graph_type + self.is_delete = False + self.size = size + self.hash_id = hash_id + if graph_type == LayerType.attention.value: + self.input_size = 2 + self.output_size = 1 + elif graph_type == LayerType.rnn.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.self_attention.value: + self.input_size = 1 + self.output_size = 1 + elif graph_type == LayerType.input.value: + self.input_size = 0 + self.output_size = 1 + if self.hash_id is None: + hasher = hashlib.md5() + hasher.update(np.random.bytes(100)) + self.hash_id = hasher.hexdigest() + elif graph_type == LayerType.output.value: + self.input_size = 1 + self.output_size = 0 + else: + raise ValueError('Unsupported LayerType: {}'.format(graph_type)) + + def update_hash(self, layers: Iterable): + """ + Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers + """ + if self.graph_type == LayerType.input.value: + return + hasher = hashlib.md5() + hasher.update(LayerType(self.graph_type).name.encode('ascii')) + hasher.update(str(self.size).encode('ascii')) + for i in self.input: + if layers[i].hash_id is None: + raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) + hasher.update(layers[i].hash_id.encode('ascii')) + self.hash_id = hasher.hexdigest() + + def set_size(self, graph_id, size): + ''' + Set size. + ''' + if self.graph_type == LayerType.attention.value: + if self.input[0] == graph_id: + self.size = size + if self.graph_type == LayerType.rnn.value: + self.size = size + if self.graph_type == LayerType.self_attention.value: + self.size = size + if self.graph_type == LayerType.output.value: + if self.size != size: + return False + return True + + def clear_size(self): + ''' + Clear size + ''' + if self.graph_type == LayerType.attention.value or \ + LayerType.rnn.value or LayerType.self_attention.value: + self.size = None + + def __str__(self): + return 'input:' + str(self.input) + ' output:' + str(self.output) + ' type:' + str(self.graph_type) + ' is_delete:' + str(self.is_delete) + ' size:' + str(self.size) + +def graph_dumps(graph): + ''' + Dump the graph. + ''' + return json.dumps(graph, default=lambda obj: obj.__dict__) + +def graph_loads(graph_json): + ''' + Load graph + ''' + layers = [] + for layer in graph_json['layers']: + layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) + layer_info.is_delete = layer['is_delete'] + _logger.debug('append layer {}'.format(layer_info)) + layers.append(layer_info) + graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) + graph.layers = layers + _logger.debug('graph {} loaded'.format(graph)) + return graph + +class Graph(object): + ''' + Customed Graph class. + ''' + def __init__(self, max_layer_num, min_layer_num, inputs, output, hide): + self.layers = [] + self.max_layer_num = max_layer_num + self.min_layer_num = min_layer_num + assert min_layer_num < max_layer_num + + for layer in inputs: + self.layers.append(layer) + for layer in output: + self.layers.append(layer) + if hide is not None: + for layer in hide: + self.layers.append(layer) + assert self.is_legal() + + def is_topology(self, layers=None): + ''' + valid the topology + ''' + if layers is None: + layers = self.layers + layers_nodle = [] + result = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + layers_nodle.append(i) + while True: + flag_break = True + layers_toremove = [] + for layer1 in layers_nodle: + flag_arrive = True + for layer2 in layers[layer1].input: + if layer2 in layers_nodle: + flag_arrive = False + if flag_arrive is True: + for layer2 in layers[layer1].output: + # Size is error + if layers[layer2].set_size(layer1, layers[layer1].size) is False: + return False + layers_toremove.append(layer1) + result.append(layer1) + flag_break = False + for layer in layers_toremove: + layers_nodle.remove(layer) + result.append('|') + if flag_break: + break + # There is loop in graph || some layers can't to arrive + if layers_nodle: + return False + return result + + def layer_num(self, layers=None): + ''' + Reutn number of layer. + ''' + if layers is None: + layers = self.layers + layer_num = 0 + for layer in layers: + if layer.is_delete is False and layer.graph_type != LayerType.input.value\ + and layer.graph_type != LayerType.output.value: + layer_num += 1 + return layer_num + + def is_legal(self, layers=None): + ''' + Judge whether is legal for layers + ''' + if layers is None: + layers = self.layers + + for layer in layers: + if layer.is_delete is False: + if len(layer.input) != layer.input_size: + return False + if len(layer.output) < layer.output_size: + return False + + # layer_num <= max_layer_num + if self.layer_num(layers) > self.max_layer_num: + return False + + # There is loop in graph || some layers can't to arrive + if self.is_topology(layers) is False: + return False + + return True + + def update_hash(self): + """ + update hash id of each layer, in topological order/recursively + hash id will be used in weight sharing + """ + _logger.debug('update hash') + layer_in_cnt = [len(layer.input) for layer in self.layers] + topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value]) + while topo_queue: + layer_i = topo_queue.pop() + self.layers[layer_i].update_hash(self.layers) + for layer_j in self.layers[layer_i].output: + layer_in_cnt[layer_j] -= 1 + if layer_in_cnt[layer_j] == 0: + topo_queue.appendleft(layer_j) + + def mutation(self, only_add=False): + ''' + Mutation for a graph + ''' + types = [] + if self.layer_num() < self.max_layer_num: + types.append(0) + types.append(1) + if self.layer_num() > self.min_layer_num and only_add is False: + types.append(2) + types.append(3) + # 0 : add a layer , delete a edge + # 1 : add a layer , change a edge + # 2 : delete a layer, delete a edge + # 3 : delete a layer, change a edge + graph_type = random.choice(types) + layer_type = random.choice([LayerType.attention.value,\ + LayerType.self_attention.value, LayerType.rnn.value]) + layers = copy.deepcopy(self.layers) + cnt_try = 0 + while True: + layers_in = [] + layers_out = [] + layers_del = [] + for i, layer in enumerate(layers): + if layer.is_delete is False: + if layer.graph_type != LayerType.output.value: + layers_in.append(i) + if layer.graph_type != LayerType.input.value: + layers_out.append(i) + if layer.graph_type != LayerType.output.value\ + and layer.graph_type != LayerType.input.value: + layers_del.append(i) + if graph_type <= 1: + new_id = len(layers) + out = random.choice(layers_out) + inputs = [] + output = [out] + pos = random.randint(0, len(layers[out].input) - 1) + last_in = layers[out].input[pos] + layers[out].input[pos] = new_id + if graph_type == 0: + layers[last_in].output.remove(out) + if graph_type == 1: + layers[last_in].output.remove(out) + layers[last_in].output.append(new_id) + inputs = [last_in] + lay = Layer(graph_type=layer_type, inputs=inputs, output=output) + while len(inputs) < lay.input_size: + layer1 = random.choice(layers_in) + inputs.append(layer1) + layers[layer1].output.append(new_id) + lay.input = inputs + layers.append(lay) + else: + layer1 = random.choice(layers_del) + for layer2 in layers[layer1].output: + layers[layer2].input.remove(layer1) + if graph_type == 2: + random_in = random.choice(layers_in) + else: + random_in = random.choice(layers[layer1].input) + layers[layer2].input.append(random_in) + layers[random_in].output.append(layer2) + for layer2 in layers[layer1].input: + layers[layer2].output.remove(layer1) + layers[layer1].is_delete = True + + if self.is_legal(layers): + self.layers = layers + break + else: + layers = copy.deepcopy(self.layers) + cnt_try += 1 + self.update_hash() + + def __str__(self): + info = "" + for l_id, layer in enumerate(self.layers): + if layer.is_delete is False: + info += 'id:%d ' % l_id + str(layer) + '\n' + return info diff --git a/src/sdk/pynni/nni/common.py b/src/sdk/pynni/nni/common.py index d71241a7f5..946571bd8c 100644 --- a/src/sdk/pynni/nni/common.py +++ b/src/sdk/pynni/nni/common.py @@ -66,8 +66,7 @@ def init_logger(logger_file_path): elif env_args.log_dir is not None: logger_file_path = os.path.join(env_args.log_dir, logger_file_path) logger_file = open(logger_file_path, 'w') - - fmt = '[%(asctime)s] %(levelname)s (%(name)s) %(message)s' + fmt = '[%(asctime)s] %(levelname)s (%(name)s/%(threadName)s) %(message)s' formatter = logging.Formatter(fmt, _time_format) handler = logging.StreamHandler(logger_file) diff --git a/src/sdk/pynni/nni/msg_dispatcher.py b/src/sdk/pynni/nni/msg_dispatcher.py index 4275e58e7e..325befc7d1 100644 --- a/src/sdk/pynni/nni/msg_dispatcher.py +++ b/src/sdk/pynni/nni/msg_dispatcher.py @@ -97,6 +97,7 @@ def handle_initialize(self, data): def handle_request_trial_jobs(self, data): # data: number or trial jobs ids = [_create_parameter_id() for _ in range(data)] + _logger.debug("requesting for generating params of {}".format(ids)) params_list = self.tuner.generate_multiple_parameters(ids) for i, _ in enumerate(params_list): diff --git a/src/sdk/pynni/nni/msg_dispatcher_base.py b/src/sdk/pynni/nni/msg_dispatcher_base.py index d366ac2b50..541f670b4d 100644 --- a/src/sdk/pynni/nni/msg_dispatcher_base.py +++ b/src/sdk/pynni/nni/msg_dispatcher_base.py @@ -19,10 +19,14 @@ # ================================================================================================== #import json_tricks -import os import logging -import json_tricks +import os +from queue import Queue +import sys + from multiprocessing.dummy import Pool as ThreadPool + +import json_tricks from .common import init_logger, multi_thread_enabled from .recoverable import Recoverable from .protocol import CommandType, receive @@ -34,6 +38,7 @@ class MsgDispatcherBase(Recoverable): def __init__(self): if multi_thread_enabled(): self.pool = ThreadPool() + self.thread_results = [] def run(self): """Run the tuner. @@ -49,7 +54,11 @@ def run(self): if command is None or command is CommandType.Terminate: break if multi_thread_enabled(): - self.pool.map_async(self.handle_request, [(command, data)]) + result = self.pool.map_async(self.handle_request_thread, [(command, data)]) + self.thread_results.append(result) + if any([thread_result.ready() and not thread_result.successful() for thread_result in self.thread_results]): + _logger.debug('Caught thread exception') + break else: self.handle_request((command, data)) @@ -59,6 +68,16 @@ def run(self): _logger.info('Terminated by NNI manager') + def handle_request_thread(self, request): + if multi_thread_enabled(): + try: + self.handle_request(request) + except Exception as e: + _logger.exception(str(e)) + raise + else: + pass + def handle_request(self, request): command, data = request diff --git a/src/sdk/pynni/nni/tuner.py b/src/sdk/pynni/nni/tuner.py index 7d65395425..4dcf705bcf 100644 --- a/src/sdk/pynni/nni/tuner.py +++ b/src/sdk/pynni/nni/tuner.py @@ -48,6 +48,7 @@ def generate_multiple_parameters(self, parameter_id_list): result = [] for parameter_id in parameter_id_list: try: + _logger.debug("generating param for {}".format(parameter_id)) res = self.generate_parameters(parameter_id) except nni.NoMoreTrialError: return result diff --git a/test/async_sharing_test/config.yml b/test/async_sharing_test/config.yml new file mode 100644 index 0000000000..8cefad3c1a --- /dev/null +++ b/test/async_sharing_test/config.yml @@ -0,0 +1,25 @@ +authorName: default +experimentName: example_weight_sharing +trialConcurrency: 3 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: remote +#choice: true, false +useAnnotation: false +multiThread: true +tuner: + codeDir: . + classFileName: simple_tuner.py + className: SimpleTuner +trial: + command: python3 main.py + codeDir: . + gpuNum: 0 +machineList: + - ip: 10.10.10.10 + username: bob + passwd: bob123 + - ip: 10.10.10.11 + username: bob + passwd: bob123 diff --git a/test/async_sharing_test/main.py b/test/async_sharing_test/main.py new file mode 100644 index 0000000000..d5a6315812 --- /dev/null +++ b/test/async_sharing_test/main.py @@ -0,0 +1,57 @@ +""" +Test code for weight sharing +need NFS setup and mounted as `/mnt/nfs/nni` +""" + +import hashlib +import os +import random +import time + +import nni + + +def generate_rand_file(fl_name): + """ + generate random file and write to `fl_name` + """ + fl_size = random.randint(1024, 102400) + fl_dir = os.path.split(fl_name)[0] + if not os.path.exists(fl_dir): + os.makedirs(fl_dir) + with open(fl_name, 'wb') as fout: + fout.write(os.urandom(fl_size)) + + +def check_sum(fl_name, tid=None): + """ + compute checksum for generated file of `fl_name` + """ + hasher = hashlib.md5() + with open(fl_name, 'rb') as fin: + for chunk in iter(lambda: fin.read(4096), b""): + hasher.update(chunk) + ret = hasher.hexdigest() + if tid is not None: + ret = ret + str(tid) + return ret + + +if __name__ == '__main__': + nfs_path = '/mnt/nfs/nni/test' + params = nni.get_next_parameter() + print(params) + if params['id'] == 0: + model_file = os.path.join(nfs_path, str(params['id']), 'model.dat') + generate_rand_file(model_file) + time.sleep(10) + nni.report_final_result({ + 'checksum': check_sum(model_file, tid=params['id']), + 'path': model_file + }) + else: + model_file = params['prev_path'] + time.sleep(10) + nni.report_final_result({ + 'checksum': check_sum(model_file, tid=params['prev_id']) + }) diff --git a/test/async_sharing_test/simple_tuner.py b/test/async_sharing_test/simple_tuner.py new file mode 100644 index 0000000000..de40ea9117 --- /dev/null +++ b/test/async_sharing_test/simple_tuner.py @@ -0,0 +1,66 @@ +""" +SimpleTuner for Weight Sharing +""" + +import logging + +from threading import Event, Lock +from nni.tuner import Tuner + +_logger = logging.getLogger('WeightSharingTuner') + + +class SimpleTuner(Tuner): + """ + simple tuner, test for weight sharing + """ + + def __init__(self): + super(SimpleTuner, self).__init__() + self.trial_meta = {} + self.f_id = None # father + self.sig_event = Event() + self.thread_lock = Lock() + + def generate_parameters(self, parameter_id): + if self.f_id is None: + self.thread_lock.acquire() + self.f_id = parameter_id + self.trial_meta[parameter_id] = { + 'prev_id': 0, + 'id': parameter_id, + 'checksum': None, + 'path': '', + } + _logger.info('generate parameter for father trial %s' % + parameter_id) + self.thread_lock.release() + return { + 'prev_id': 0, + 'id': parameter_id, + } + else: + self.sig_event.wait() + self.thread_lock.acquire() + self.trial_meta[parameter_id] = { + 'id': parameter_id, + 'prev_id': self.f_id, + 'prev_path': self.trial_meta[self.f_id]['path'] + } + self.thread_lock.release() + return self.trial_meta[parameter_id] + + def receive_trial_result(self, parameter_id, parameters, reward): + self.thread_lock.acquire() + if parameter_id == self.f_id: + self.trial_meta[parameter_id]['checksum'] = reward['checksum'] + self.trial_meta[parameter_id]['path'] = reward['path'] + self.sig_event.set() + else: + if reward['checksum'] != self.trial_meta[self.f_id]['checksum']: + raise ValueError("Inconsistency in weight sharing: {} != {}".format( + reward['checksum'], self.trial_meta[self.f_id]['checksum'])) + self.thread_lock.release() + + def update_search_space(self, search_space): + pass From d13964dc4444d24b73e3ec1f6f8250e385d72cec Mon Sep 17 00:00:00 2001 From: SparkSnail Date: Tue, 8 Jan 2019 16:06:32 +0800 Subject: [PATCH 21/54] Add frameworkcontroller document (#530) Add frameworkcontroller document. Fix other document small issues. --- docs/FrameworkControllerMode.md | 100 ++++++++++++++++++++ docs/KubeflowMode.md | 156 ++++++++++++++++++++++++-------- 2 files changed, 219 insertions(+), 37 deletions(-) create mode 100644 docs/FrameworkControllerMode.md diff --git a/docs/FrameworkControllerMode.md b/docs/FrameworkControllerMode.md new file mode 100644 index 0000000000..c54c33756b --- /dev/null +++ b/docs/FrameworkControllerMode.md @@ -0,0 +1,100 @@ +**Run an Experiment on FrameworkController** +=== +NNI supports running experiment using [FrameworkController](https://github.com/Microsoft/frameworkcontroller), called frameworkcontroller mode. FrameworkController is built to orchestrate all kinds of applications on Kubernetes, you don't need to install kubeflow for specific deeplearning framework like tf-operator or pytorch-operator. Now you can use frameworkcontroller as the training service to run NNI experiment. + +## Prerequisite for on-premises Kubernetes Service +1. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this [guideline](https://kubernetes.io/docs/setup/) to set up Kubernetes +2. Prepare a **kubeconfig** file, which will be used by NNI to interact with your kubernetes API server. By default, NNI manager will use $(HOME)/.kube/config as kubeconfig file's path. You can also specify other kubeconfig files by setting the **KUBECONFIG** environment variable. Refer this [guideline]( https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig) to learn more about kubeconfig. +3. If your NNI trial job needs GPU resource, you should follow this [guideline](https://github.com/NVIDIA/k8s-device-plugin) to configure **Nvidia device plugin for Kubernetes**. +4. Prepare a **NFS server** and export a general purpose mount (we recommend to map your NFS server path in `root_squash option`, otherwise permission issue may raise when nni copy files to NFS. Refer this [page](https://linux.die.net/man/5/exports) to learn what root_squash option is), or **Azure File Storage**. +5. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + ``` + apt-get install nfs-common + ``` + +6. Install **NNI**, follow the install guide [here](GetStarted.md). + +## Prerequisite for Azure Kubernetes Service +1. NNI support kubeflow based on Azure Kubernetes Service, follow the [guideline](https://azure.microsoft.com/en-us/services/kubernetes-service/) to set up Azure Kubernetes Service. +2. Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and __kubectl__. Use `az login` to set azure account, and connect kubectl client to AKS, refer this [guideline](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough#connect-to-the-cluster). +3. Follow the [guideline](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account?tabs=portal) to create azure file storage account. If you use Azure Kubernetes Service, nni need Azure Storage Service to store code files and the output files. +4. To access Azure storage service, nni need the access key of the storage account, and nni use [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) Service to protect your private key. Set up Azure Key Vault Service, add a secret to Key Vault to store the access key of Azure storage account. Follow this [guideline](https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli) to store the access key. + + +## Set up FrameworkController +Follow the [guideline](https://github.com/Microsoft/frameworkcontroller/tree/master/example/run) to set up frameworkcontroller in the kubernetes cluster, nni support frameworkcontroller by the statefulset mode. + +## Design +Please refer the design of [kubeflow training service](./KubeflowMode.md), frameworkcontroller training service pipeline is similar. + +## Example + +The frameworkcontroller config file format is: +``` +authorName: default +experimentName: example_mnist +trialConcurrency: 1 +maxExecDuration: 10h +maxTrialNum: 100 +#choice: local, remote, pai, kubeflow, frameworkcontroller +trainingServicePlatform: frameworkcontroller +searchSpacePath: ~/nni/examples/trials/mnist/search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + gpuNum: 0 +trial: + codeDir: ~/nni/examples/trials/mnist + taskRoles: + - name: worker + taskNum: 1 + command: python3 mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 8192 + image: msranni/nni:latest + frameworkAttemptCompletionPolicy: + minFailedTaskCount: 1 + minSucceededTaskCount: 1 +frameworkcontrollerConfig: + storage: nfs + nfs: + server: {your_nfs_server} + path: {your_nfs_server_exported_path} +``` +If you use Azure Kubernetes Service, you should set `frameworkcontrollerConfig` in your config yaml file as follows: +``` +frameworkcontrollerConfig: + storage: azureStorage + keyVault: + vaultName: {your_vault_name} + name: {your_secert_name} + azureStorage: + accountName: {your_storage_account_name} + azureShare: {your_azure_share_name} +``` +Note: You should explicitly set `trainingServicePlatform: frameworkcontroller` in nni config yaml file if you want to start experiment in frameworkcontrollerConfig mode. + +The trial's config format for nni frameworkcontroller mode is a simple version of frameworkcontroller's offical config, you could refer the [tensorflow example of frameworkcontroller](https://github.com/Microsoft/frameworkcontroller/blob/master/example/framework/scenario/tensorflow/cpu/tensorflowdistributedtrainingwithcpu.yaml) for deep understanding. +Trial configuration in frameworkcontroller mode have the following configuration keys: +* taskRoles: you could set multiple task roles in config file, and each task role is a basic unit to process in kubernetes cluster. + * name: the name of task role specified, like "worker", "ps", "master". + * taskNum: the replica number of the task role. + * command: the users' command to be used in the container. + * gpuNum: the number of gpu device used in container. + * cpuNum: the number of cpu device used in container. + * memoryMB: the memory limitaion to be specified in container. + * image: the docker image used to create pod and run the program. + * frameworkAttemptCompletionPolicy: the policy to run framework, please refer the [user-manual](https://github.com/Microsoft/frameworkcontroller/blob/master/doc/user-manual.md#frameworkattemptcompletionpolicy) to get the specific information. Users could use the policy to control the pod, for example, if ps does not stop, only worker stops, this completionpolicy could helps stop ps. + +## How to run example +After you prepare a config file, you could run your experiment by nnictl. The way to start an experiment on frameworkcontroller is similar to kubeflow, please refer the [document](./KubeflowMode.md) for more information. \ No newline at end of file diff --git a/docs/KubeflowMode.md b/docs/KubeflowMode.md index 2c4721b971..2beb69b59a 100644 --- a/docs/KubeflowMode.md +++ b/docs/KubeflowMode.md @@ -1,6 +1,6 @@ **Run an Experiment on Kubeflow** === -Now NNI supports running experiment on [Kubeflow](https://github.com/kubeflow/kubeflow), called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a kubernetes cluster, either on-prem or [Azure Kubernetes Service(AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/), a Ubuntu machine on which [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) is setup to connect to your kubernetes cluster. If you are not familiar with kubernetes, [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/) is a goot start. In kubeflow mode, your trial program will run as kubeflow job in kubernetes cluster. +Now NNI supports running experiment on [Kubeflow](https://github.com/kubeflow/kubeflow), called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a kubernetes cluster, either on-prem or [Azure Kubernetes Service(AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/), a Ubuntu machine on which [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) is setup to connect to your kubernetes cluster. If you are not familiar with kubernetes, [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/) is a good start. In kubeflow mode, your trial program will run as kubeflow job in kubernetes cluster. ## Prerequisite for on-premises Kubernetes Service 1. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this [guideline](https://kubernetes.io/docs/setup/) to set up Kubernetes @@ -28,64 +28,143 @@ Kubeflow training service instantiates a kubernetes rest client to interact with For each trial, we will upload all the files in your local codeDir path (configured in nni_config.yaml) together with NNI generated files like parameter.cfg into a storage volumn. Right now we support two kinds of storage volumns: [nfs](https://en.wikipedia.org/wiki/Network_File_System) and [azure file storage](https://azure.microsoft.com/en-us/services/storage/files/), you should configure the storage volumn in nni config yaml file. After files are prepared, Kubeflow training service will call K8S rest API to create kubeflow jobs ([tf-operator](https://github.com/kubeflow/tf-operator) job or [pytorch-operator](https://github.com/kubeflow/pytorch-operator) job) in K8S, and mount your storage volumn into the job's pod. Output files of kubeflow job, like stdout, stderr, trial.log or model files, will also be copied back to the storage volumn. NNI will show the storage volumn's URL for each trial in WebUI, to allow user browse the log files and job's output files. +## Supported operator +NNI only support tf-operator and pytorch-operator of kubeflow, other operators is not tested. +Users could set operator type in config file. +The setting of tf-operator: +``` +kubeflowConfig: + operator: tf-operator +``` +The setting of pytorch-operator: +``` +kubeflowConfig: + operator: pytorch-operator +``` +If users want to use tf-operator, he could set `ps` and `worker` in trial config. If users want to use pytorch-operator, he could set `master` and `worker` in trial config. + +## Supported sotrage type +NNI support NFS and Azure Storage to store the code and output files, users could set storage type in config file and set the corresponding config. +The setting for NFS storage are as follows: +``` +kubeflowConfig: + storage: nfs + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} +``` +If you use Azure storage, you should set `kubeflowConfig` in your config yaml file as follows: +``` +kubeflowConfig: + storage: azureStorage + keyVault: + vaultName: {your_vault_name} + name: {your_secert_name} + azureStorage: + accountName: {your_storage_account_name} + azureShare: {your_azure_share_name} +``` + + ## Run an experiment -Use `examples/trials/mnist` as an example. The nni config yaml file's content is like: +Use `examples/trials/mnist` as an example. This is a tensorflow job, and use tf-operator of kubeflow. The nni config yaml file's content is like: ``` -authorName: your_name +authorName: default experimentName: example_mnist -# how many trials could be concurrently running -trialConcurrency: 4 -# maximum experiment running duration -maxExecDuration: 3h -# empty means never stop -maxTrialNum: 100 -# choice: local, remote, pai, kubeflow +trialConcurrency: 2 +maxExecDuration: 1h +maxTrialNum: 20 +#choice: local, remote, pai, kubeflow trainingServicePlatform: kubeflow -# choice: true, false +searchSpacePath: search_space.json +#choice: true, false useAnnotation: false tuner: + #choice: TPE, Random, Anneal, Evolution builtinTunerName: TPE classArgs: #choice: maximize, minimize optimize_mode: maximize +assessor: + builtinAssessorName: Medianstop + classArgs: + optimize_mode: maximize + gpuNum: 0 trial: - codeDir: ~/nni/examples/trials/mnist - ps: - replicas: 1 - command: python mnist-keras.py - gpuNum: 0 + codeDir: . + worker: + replicas: 2 + command: python3 dist_mnist.py + gpuNum: 1 cpuNum: 1 memoryMB: 8196 - image: {your_docker_image_for_tensorflow_ps} - worker: - replicas: 1 - command: python mnist-keras.py - gpuNum: 2 + image: msranni/nni:latest + ps: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 cpuNum: 1 memoryMB: 8196 - image: {your_docker_image_for_tensorflow_worker} + image: msranni/nni:latest kubeflowConfig: operator: tf-operator + apiVersion: v1alpha2 storage: nfs nfs: - server: {your_nfs_server} - path: {your_nfs_server_exported_path} + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} ``` -If you use Azure Kubernetes Service, you should set `kubeflowConfig` in your config yaml file as follows: + +Note: You should explicitly set `trainingServicePlatform: kubeflow` in nni config yaml file if you want to start experiment in kubeflow mode. + +If you want to run Pytorch jobs, you could set your config files as follow: ``` +authorName: default +experimentName: example_mnist_distributed_pytorch +trialConcurrency: 1 +maxExecDuration: 1h +maxTrialNum: 10 +#choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: minimize +trial: + codeDir: . + master: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 1 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest + worker: + replicas: 1 + command: python3 dist_mnist.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 2048 + image: msranni/nni:latest kubeflowConfig: - operator: tf-operator - storage: azureStorage - keyVault: - vaultName: {your_vault_name} - name: {your_secert_name} - azureStorage: - accountName: {your_storage_account_name} - azureShare: {your_azure_share_name} + operator: pytorch-operator + apiVersion: v1alpha2 + nfs: + # Your NFS server IP, like 10.10.10.10 + server: {your_nfs_server_ip} + # Your NFS server export path, like /var/nfs/nni + path: {your_nfs_server_export_path} ``` -Note: You should explicitly set `trainingServicePlatform: kubeflow` in nni config yaml file if you want to start experiment in kubeflow mode. - Trial configuration in kubeflow mode have the following configuration keys: * codeDir * code directory, where you put training code and config files @@ -100,14 +179,17 @@ Trial configuration in kubeflow mode have the following configuration keys: * gpuNum * image * Required key. In kubeflow mode, your trial program will be scheduled by Kubernetes to run in [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/). This key is used to specify the Docker image used to create the pod where your trail program will run. - * We already build a docker image [nnimsra/nni](https://hub.docker.com/r/msranni/nni/) on [Docker Hub](https://hub.docker.com/). It contains NNI python packages, Node modules and javascript artifact files required to start experiment, and all of NNI dependencies. The docker file used to build this image can be found at [here](../deployment/Dockerfile.build.base). You can either use this image directly in your config file, or build your own image based on it. + * We already build a docker image [msranni/nni](https://hub.docker.com/r/msranni/nni/) on [Docker Hub](https://hub.docker.com/). It contains NNI python packages, Node modules and javascript artifact files required to start experiment, and all of NNI dependencies. The docker file used to build this image can be found at [here](../deployment/Dockerfile.build.base). You can either use this image directly in your config file, or build your own image based on it. + * apiVersion + * Required key. The API version of your kubeflow. * ps (optional). This config section is used to configure tensorflow parameter server role. +* master(optional). This config section is used to configure pytorch parameter server role. Once complete to fill nni experiment config file and save (for example, save as exp_kubeflow.yaml), then run the following command ``` nnictl create --config exp_kubeflow.yaml ``` -to start the experiment in kubeflow mode. NNI will create Kubeflow tfjob for each trial, and the job name format is something like `nni_exp_{experiment_id}_trial_{trial_id}`. +to start the experiment in kubeflow mode. NNI will create Kubeflow tfjob or pytorchjob for each trial, and the job name format is something like `nni_exp_{experiment_id}_trial_{trial_id}`. You can see the kubeflow tfjob created by NNI in your Kubernetes dashboard. Notice: In kubeflow mode, NNIManager will start a rest server and listen on a port which is your NNI WebUI's port plus 1. For example, if your WebUI port is `8080`, the rest server will listen on `8081`, to receive metrics from trial job running in Kubernetes. So you should `enable 8081` TCP port in your firewall rule to allow incoming traffic. From c288a16e94c09e5ddfca12d2b624c2c70a865367 Mon Sep 17 00:00:00 2001 From: Lijiao <35484733+lvybriage@users.noreply.github.com> Date: Tue, 8 Jan 2019 16:14:13 +0800 Subject: [PATCH 22/54] [WebUI] Show trial log for pai and k8s (#580) * [WebUI] Show trial log for pai and k8s * fix lint * Fix comments --- src/webui/src/App.css | 5 + src/webui/src/App.tsx | 10 +- src/webui/src/components/Overview.tsx | 6 +- src/webui/src/components/TrialsDetail.tsx | 29 +++- .../src/components/logPath/PaiTrialChild.tsx | 55 +++++++ .../src/components/logPath/PaiTrialLog.tsx | 72 ++++++++ src/webui/src/components/logPath/TrialLog.tsx | 30 ++++ .../src/components/overview/Progress.tsx | 87 +++++----- .../src/components/overview/SuccessTable.tsx | 141 +++++++++++++--- .../src/components/trial-detail/TableList.tsx | 154 +++++++++++++++--- src/webui/src/static/style/probar.scss | 3 +- src/webui/src/static/style/progress.scss | 1 + src/webui/src/static/style/tableList.scss | 46 ++++++ 13 files changed, 541 insertions(+), 98 deletions(-) create mode 100644 src/webui/src/components/logPath/PaiTrialChild.tsx create mode 100644 src/webui/src/components/logPath/PaiTrialLog.tsx create mode 100644 src/webui/src/components/logPath/TrialLog.tsx create mode 100644 src/webui/src/static/style/tableList.scss diff --git a/src/webui/src/App.css b/src/webui/src/App.css index d506354555..bc9b69e810 100644 --- a/src/webui/src/App.css +++ b/src/webui/src/App.css @@ -17,12 +17,17 @@ .headerCon{ min-width: 1024px; } +.contentBox{ + width: 100%; + background: #f2f2f2; +} .content{ width: 86%; min-width: 1024px; margin: 0 auto; margin-top: 74px; margin-bottom: 30px; + background: #fff; } diff --git a/src/webui/src/App.tsx b/src/webui/src/App.tsx index 23c3b70252..a013a9d258 100644 --- a/src/webui/src/App.tsx +++ b/src/webui/src/App.tsx @@ -6,16 +6,18 @@ import SlideBar from './components/SlideBar'; class App extends React.Component<{}, {}> { render() { return ( - +

- + - + - + + {this.props.children} + ); diff --git a/src/webui/src/components/Overview.tsx b/src/webui/src/components/Overview.tsx index 7a0ed8cadb..ed1dad927d 100644 --- a/src/webui/src/components/Overview.tsx +++ b/src/webui/src/components/Overview.tsx @@ -215,6 +215,7 @@ class Overview extends React.Component<{}, OverviewState> { case 'USER_CANCELED': case 'SYS_CANCELED': + case 'EARLY_STOPPED': profile.stopTrial += 1; break; case 'SUCCEEDED': @@ -461,7 +462,10 @@ class Overview extends React.Component<{}, OverviewState> { - + diff --git a/src/webui/src/components/TrialsDetail.tsx b/src/webui/src/components/TrialsDetail.tsx index 0b614104df..a9590c7c22 100644 --- a/src/webui/src/components/TrialsDetail.tsx +++ b/src/webui/src/components/TrialsDetail.tsx @@ -21,6 +21,7 @@ interface TrialDetailState { isHasSearch: boolean; experimentStatus: string; entriesTable: number; + experimentPlatform: string; } class TrialsDetail extends React.Component<{}, TrialDetailState> { @@ -43,6 +44,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { experimentStatus: '', entriesTable: 20, isHasSearch: false, + experimentPlatform: '' }; } // trial accuracy graph @@ -370,6 +372,26 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { alert('TableList component was not properly initialized.'); } + checkExperimentPlatform = () => { + axios(`${MANAGER_IP}/experiment`, { + method: 'GET' + }) + .then(res => { + if (res.status === 200) { + const trainingPlatform = res.data.params.trainingServicePlatform !== undefined + ? + res.data.params.trainingServicePlatform + : + ''; + if (this._isMounted) { + this.setState({ + experimentPlatform: trainingPlatform + }); + } + } + }); + } + componentDidMount() { this._isMounted = true; @@ -377,6 +399,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { this.drawPointGraph(); this.interTableList = window.setInterval(this.drawTableList, 10000); this.interAccuracy = window.setInterval(this.drawPointGraph, 10000); + this.checkExperimentPlatform(); } componentWillUnmount() { @@ -386,7 +409,10 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { } render() { - const { accSource, accNodata, tableListSource, entriesTable, searchResultSource, isHasSearch } = this.state; + const { accSource, accNodata, tableListSource, + entriesTable, searchResultSource, isHasSearch, + experimentPlatform + } = this.state; const titleOfacc = ( ); @@ -463,6 +489,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> { updateList={this.drawTableList} searchResult={searchResultSource} isHasSearch={isHasSearch} + platform={experimentPlatform} ref={(tabList) => this.tableList = tabList} /> diff --git a/src/webui/src/components/logPath/PaiTrialChild.tsx b/src/webui/src/components/logPath/PaiTrialChild.tsx new file mode 100644 index 0000000000..588599a6b6 --- /dev/null +++ b/src/webui/src/components/logPath/PaiTrialChild.tsx @@ -0,0 +1,55 @@ +import * as React from 'react'; +import { Row, Button } from 'antd'; +import { DOWNLOAD_IP } from '../../static/const'; + +interface PaiTrialChildProps { + logString: string; + id: string; + showLogModal: Function; + isdisLogbtn?: boolean; +} + +class PaiTrialChild extends React.Component { + + constructor(props: PaiTrialChildProps) { + super(props); + + } + + render() { + const { logString, id, showLogModal, isdisLogbtn } = this.props; + return ( +
+ { + logString === '' + ? +
+ : + + + + trial stdout + + + + + + + } +
+ ); + } +} + +export default PaiTrialChild; diff --git a/src/webui/src/components/logPath/PaiTrialLog.tsx b/src/webui/src/components/logPath/PaiTrialLog.tsx new file mode 100644 index 0000000000..42cf5f6774 --- /dev/null +++ b/src/webui/src/components/logPath/PaiTrialLog.tsx @@ -0,0 +1,72 @@ +import * as React from 'react'; +import { Row, Button } from 'antd'; +import { DOWNLOAD_IP } from '../../static/const'; +import PaiTrialChild from './PaiTrialChild'; + +interface PaitrialLogProps { + logStr: string; + id: string; + showLogModal: Function; + trialStatus?: string; + isdisLogbutton?: boolean; +} + +class PaitrialLog extends React.Component { + + constructor(props: PaitrialLogProps) { + super(props); + + } + + render() { + const { logStr, id, showLogModal, + isdisLogbutton + } = this.props; + const isTwopath = logStr.indexOf(',') !== -1 + ? + true + : + false; + return ( +
+
+ { + isTwopath + ? + + + + trial stdout + + hdfsLog + + + + + + : + + } +
+
+ ); + } +} + +export default PaitrialLog; diff --git a/src/webui/src/components/logPath/TrialLog.tsx b/src/webui/src/components/logPath/TrialLog.tsx new file mode 100644 index 0000000000..b2da64bf72 --- /dev/null +++ b/src/webui/src/components/logPath/TrialLog.tsx @@ -0,0 +1,30 @@ +import * as React from 'react'; +import LogPathChild from './LogPathChild'; + +interface TrialLogProps { + logStr: string; + id: string; +} + +class TrialLog extends React.Component { + + constructor(props: TrialLogProps) { + super(props); + + } + + render() { + const { logStr } = this.props; + + return ( +
+ +
+ ); + } +} + +export default TrialLog; diff --git a/src/webui/src/components/overview/Progress.tsx b/src/webui/src/components/overview/Progress.tsx index a0858403da..0e102dc50f 100644 --- a/src/webui/src/components/overview/Progress.tsx +++ b/src/webui/src/components/overview/Progress.tsx @@ -1,10 +1,6 @@ import * as React from 'react'; import { - Row, - Col, - Popover, - Button, - message + Row, Col, Popover, Button, message } from 'antd'; import axios from 'axios'; import { MANAGER_IP, CONTROLTYPE } from '../../static/const'; @@ -92,7 +88,8 @@ class Progressed extends React.Component { if (error.response.data.error) { message.error(error.response.data.error); } else { - message.error(`Update ${CONTROLTYPE[1].toLocaleLowerCase()} failed`); + message.error( + `Update ${CONTROLTYPE[1].toLocaleLowerCase()} failed`); } } }); @@ -179,29 +176,47 @@ class Progressed extends React.Component { return ( -
-

Status

-
- {status} - { - status === 'ERROR' - ? - - i - - : - - } -
+

Status

+
+ {status} + { + status === 'ERROR' + ? + + i + + : + + } +
+ + + + +
+

best metric

+
{bestAccuracy.toFixed(6)}
-
+ {/* modify concurrency */} -

Concurrency

+

concurrency

{ - - - -

best metric

-
{bestAccuracy}
-
diff --git a/src/webui/src/components/overview/SuccessTable.tsx b/src/webui/src/components/overview/SuccessTable.tsx index 500e55b051..ba0ecc201d 100644 --- a/src/webui/src/components/overview/SuccessTable.tsx +++ b/src/webui/src/components/overview/SuccessTable.tsx @@ -1,24 +1,90 @@ import * as React from 'react'; +import axios from 'axios'; import JSONTree from 'react-json-tree'; -import { Table } from 'antd'; +import { Row, Modal, Input, Table, Tabs } from 'antd'; +const TabPane = Tabs.TabPane; +const { TextArea } = Input; +import { DOWNLOAD_IP } from '../../static/const'; import { TableObj } from '../../static/interface'; import { convertDuration } from '../../static/function'; +import PaiTrialLog from '../logPath/PaiTrialLog'; +import TrialLog from '../logPath/TrialLog'; import '../../static/style/tableStatus.css'; -import LogPath from '../logPath/LogPath'; +import '../../static/style/tableList.scss'; interface SuccessTableProps { tableSource: Array; + trainingPlatform: string; } -class SuccessTable extends React.Component { +interface SuccessTableState { + isShowLogModal: boolean; + logContent: string; +} + +class SuccessTable extends React.Component { + + public _isMounted = false; constructor(props: SuccessTableProps) { super(props); + this.state = { + isShowLogModal: false, + logContent: '' + }; + + } + + showLogModalOverview = (id: string) => { + axios(`${DOWNLOAD_IP}/trial_${id}.log`, { + method: 'GET' + }) + .then(res => { + if (res.status === 200) { + if (this._isMounted) { + this.setState(() => ({ + logContent: res.data + })); + } + } + }) + .catch(error => { + if (error.response.status === 500) { + if (this._isMounted) { + this.setState(() => ({ + logContent: 'failed to get log message' + })); + } + } + }); + if (this._isMounted) { + this.setState({ + isShowLogModal: true + }); + } + } + + hideLogModalOverview = () => { + if (this._isMounted) { + this.setState({ + isShowLogModal: false, + logContent: '' // close modal, delete data + }); + } + } + + componentDidMount() { + this._isMounted = true; + } + + componentWillUnmount() { + this._isMounted = false; } render() { - const { tableSource } = this.props; + const { tableSource, trainingPlatform } = this.props; + const { isShowLogModal, logContent } = this.state; let bgColor = ''; const columns = [{ @@ -114,22 +180,40 @@ class SuccessTable extends React.Component { 'This trial\'s logPath are not available.'; return (
-                    {
-                        isHasParameters
-                            ?
-                             true}  // default expandNode
-                                getItemString={() => ()}  // remove the {} items
-                                data={openRowDataSource}
-                            />
-                            :
-                            
- Error: - 'This trial's parameters are not available.' -
- } - + + + + { + isHasParameters + ? + true} // default expandNode + getItemString={() => ()} // remove the {} items + data={openRowDataSource} + /> + : +
+ Error: + 'This trial's parameters are not available.' +
+ } +
+ + { + trainingPlatform === 'pai' || trainingPlatform === 'kubeflow' + ? + + : + + } + +
+
); }; @@ -142,6 +226,23 @@ class SuccessTable extends React.Component { className="commonTableStyle" pagination={false} /> + {/* trial log modal */} + +
+