Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Dev pylint #1697

Merged
merged 19 commits into from
Nov 4, 2019
Merged
23 changes: 20 additions & 3 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,33 @@ jobs:
PYTHON_VERSION: '3.6'

steps:
- script: python3 -m pip install --upgrade pip setuptools --user
- script: |
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install pylint==2.3.1 astroid==2.2.5 --user
python3 -m pip install coverage --user
displayName: 'Install python tools'
- script: |
source install.sh
displayName: 'Install nni toolkit via source code'
- script: |
python3 -m pip install torch==0.4.1 --user
python3 -m pip install torchvision==0.2.1 --user
python3 -m pip install tensorflow==1.13.1 --user
python3 -m pip install keras==2.1.6 --user
python3 -m pip install gym onnx --user
sudo apt-get install swig -y
PATH=$HOME/.local/bin:$PATH nnictl package install --name=SMAC
PATH=$HOME/.local/bin:$PATH nnictl package install --name=BOHB
displayName: 'Install dependencies'
- script: |
source install.sh
displayName: 'Install nni toolkit via source code'
set -e
python3 -m pylint --rcfile pylintrc nni_annotation
python3 -m pylint --rcfile pylintrc nni_cmd
python3 -m pylint --rcfile pylintrc nni_gpu_tool
python3 -m pylint --rcfile pylintrc nni_trial_tool
python3 -m pylint --rcfile pylintrc nni
python3 -m pylint --rcfile pylintrc nnicli
displayName: 'Run pylint'
- script: |
python3 -m pip install flake8 --user
IGNORE=./tools/nni_annotation/testcase/*:F821,./examples/trials/mnist-nas/*/mnist*.py:F821,./examples/trials/nas_cifar10/src/cifar10/general_child.py:F821
Expand Down
20 changes: 10 additions & 10 deletions src/sdk/pycli/setup.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
import setuptools

setuptools.setup(
name = 'nnicli',
version = '999.0.0-developing',
packages = setuptools.find_packages(),
name='nnicli',
version='999.0.0-developing',
packages=setuptools.find_packages(),

python_requires = '>=3.5',
install_requires = [
python_requires='>=3.5',
install_requires=[
'requests'
],

author = 'Microsoft NNI Team',
author_email = 'nni@microsoft.com',
description = 'nnicli for Neural Network Intelligence project',
license = 'MIT',
url = 'https://github.com/Microsoft/nni',
author='Microsoft NNI Team',
author_email='nni@microsoft.com',
description='nnicli for Neural Network Intelligence project',
license='MIT',
url='https://github.com/Microsoft/nni',
)
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/compression/tensorflow/compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def select_config(self, layer):
Returns
-------
ret : config or None
the retrieved configuration for this layer, if None, this layer should
the retrieved configuration for this layer, if None, this layer should
not be compressed
"""
ret = None
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/compression/torch/compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def select_config(self, layer):
Returns
-------
ret : config or None
the retrieved configuration for this layer, if None, this layer should
the retrieved configuration for this layer, if None, this layer should
not be compressed
"""
ret = None
Expand Down
6 changes: 3 additions & 3 deletions src/sdk/pynni/nni/ppo_tuner/distri.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,14 +143,14 @@ def sample(self):
re_masked_res = tf.reshape(masked_res, [-1, self.size])

u = tf.random_uniform(tf.shape(re_masked_res), dtype=self.logits.dtype)
return tf.argmax(re_masked_res - tf.log(-tf.log(u)), axis=-1)
return tf.argmax(re_masked_res - tf.log(-1*tf.log(u)), axis=-1)
else:
u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
return tf.argmax(self.logits - tf.log(-1*tf.log(u)), axis=-1)

@classmethod
def fromflat(cls, flat):
return cls(flat)
return cls(flat) # pylint: disable=no-value-for-parameter

class CategoricalPdType(PdType):
"""
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/ppo_tuner/policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def _build_model_for_step(self):
def sample(logits, mask_npinf):
new_logits = tf.math.add(logits, mask_npinf)
u = tf.random_uniform(tf.shape(new_logits), dtype=logits.dtype)
return tf.argmax(new_logits - tf.log(-tf.log(u)), axis=-1)
return tf.argmax(new_logits - tf.log(-1*tf.log(u)), axis=-1)

def neglogp(logits, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
Expand Down
15 changes: 8 additions & 7 deletions src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,9 @@
class PPOTuner
"""

import os
import copy
import logging
import numpy as np
import json_tricks
from gym import spaces

import nni
Expand Down Expand Up @@ -236,7 +234,8 @@ def compute_rewards(self, trials_info, trials_result):
nextnonterminal = 1.0 - trials_info.dones[t+1]
nextvalues = trials_info.values[t+1]
delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t]
mb_advs[t] = lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
mb_advs[t] = lastgaelam # pylint: disable=unsupported-assignment-operation
mb_returns = mb_advs + trials_info.values

trials_info.update_rewards(mb_rewards, mb_returns)
Expand Down Expand Up @@ -536,8 +535,10 @@ def _next_round_inference(self):
# generate new trials
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs,
mb_dones, last_values, self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions,
mb_values, mb_neglogpacs,
mb_dones, last_values,
self.inf_batch_size)
# check credit and submit new trials
for _ in range(self.credit):
trial_info_idx, actions = self.trials_info.get_next()
Expand Down Expand Up @@ -581,8 +582,8 @@ def trial_end(self, parameter_id, success, **kwargs):
assert trial_info_idx is not None
# use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None]
logger.warning('zql values: {0}'.format(values))
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if len(values) > 0 else 0
logger.warning('zql values: %s', values)
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
self._next_round_inference()
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/ppo_tuner/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def seq_to_batch(h, flat=False):

def lstm(xs, ms, s, scope, nh, init_scale=1.0):
"""lstm cell"""
nbatch, nin = [v.value for v in xs[0].get_shape()]
_, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/pynni/nni/smac_tuner/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .smac_tuner import SMACTuner
from .smac_tuner import SMACTuner
1 change: 0 additions & 1 deletion src/sdk/pynni/nni/smac_tuner/smac_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@

from .convert_ss_to_scenario import generate_scenario


class SMACTuner(Tuner):
"""
Parameters
Expand Down
2 changes: 1 addition & 1 deletion tools/nni_cmd/command_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import signal
import psutil
from .common_utils import print_error, print_normal, print_warning
from .common_utils import print_error


def check_output_command(file_path, head=None, tail=None):
Expand Down
10 changes: 6 additions & 4 deletions tools/nni_cmd/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
import os
import sys
import json
import ruamel.yaml as yaml
import psutil
import socket
from pathlib import Path
import ruamel.yaml as yaml
import psutil
from .constants import ERROR_INFO, NORMAL_INFO, WARNING_INFO, COLOR_RED_FORMAT, COLOR_YELLOW_FORMAT

def get_yml_content(file_path):
Expand All @@ -34,6 +34,7 @@ def get_yml_content(file_path):
return yaml.load(file, Loader=yaml.Loader)
except yaml.scanner.ScannerError as err:
print_error('yaml file format error!')
print_error(err)
exit(1)
except Exception as exception:
print_error(exception)
Expand All @@ -46,6 +47,7 @@ def get_json_content(file_path):
return json.load(file)
except TypeError as err:
print_error('json file format error!')
print_error(err)
return None

def print_error(content):
Expand All @@ -70,7 +72,7 @@ def detect_process(pid):

def detect_port(port):
'''Detect if the port is used'''
socket_test = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket_test = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
socket_test.connect(('127.0.0.1', int(port)))
socket_test.close()
Expand All @@ -79,7 +81,7 @@ def detect_port(port):
return False

def get_user():
if sys.platform =='win32':
if sys.platform == 'win32':
return os.environ['USERNAME']
else:
return os.environ['USER']
Expand Down
Loading