Skip to content

Commit

Permalink
Merge pull request #277 from microsoft/master
Browse files Browse the repository at this point in the history
merge master
  • Loading branch information
SparkSnail authored Dec 2, 2020
2 parents 765bc33 + 95f731e commit cff51cc
Show file tree
Hide file tree
Showing 23 changed files with 579 additions and 148 deletions.
54 changes: 33 additions & 21 deletions nni/algorithms/compression/pytorch/quantization/quantizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ def update_quantization_param(bits, rmin, rmax):
----------
bits : int
quantization bits length
rmin : float
rmin : Tensor
min value of real value
rmax : float
rmax : Tensor
max value of real value
Returns
Expand All @@ -85,12 +85,17 @@ def update_quantization_param(bits, rmin, rmax):
# extend the [min, max] interval to ensure that it contains 0.
# Otherwise, we would not meet the requirement that 0 be an exactly
# representable value.
rmin = min(rmin, 0)
rmax = max(rmax, 0)
if rmin.is_cuda:
rmin = torch.min(rmin, torch.Tensor([0]).cuda())
rmax = torch.max(rmax, torch.Tensor([0]).cuda())
qmin = torch.Tensor([0]).cuda()
qmax = torch.Tensor([(1 << bits) - 1]).cuda()
else:
rmin = torch.min(rmin, torch.Tensor([0]))
rmax = torch.max(rmax, torch.Tensor([0]))
qmin = torch.Tensor([0])
qmax = torch.Tensor([(1 << bits) - 1])

# the min and max quantized values, as floating-point values
qmin = 0
qmax = (1 << bits) - 1
# First determine the scale.
scale = (rmax - rmin) / (qmax - qmin)

Expand Down Expand Up @@ -143,11 +148,11 @@ def __init__(self, model, config_list, optimizer=None):
types of nn.module you want to apply quantization, eg. 'Conv2d'
"""
super().__init__(model, config_list, optimizer)
self.steps = 1
modules_to_compress = self.get_modules_to_compress()
self.bound_model.register_buffer("steps", torch.Tensor([1]))
for layer, config in modules_to_compress:
layer.module.register_buffer("zero_point", None)
layer.module.register_buffer("scale", None)
layer.module.register_buffer("zero_point", torch.Tensor([0.0]))
layer.module.register_buffer("scale", torch.Tensor([1.0]))
if "output" in config.get("quant_types", []):
layer.module.register_buffer('ema_decay', torch.Tensor([0.99]))
layer.module.register_buffer('tracked_min_biased', torch.zeros(1))
Expand Down Expand Up @@ -187,13 +192,17 @@ def _quantize(self, bits, op, real_val):
quantization bits length
op : torch.nn.Module
target module
real_val : float
real_val : Tensor
real value to be quantized
Returns
-------
float
Tensor
"""
if real_val.is_cuda:
op.zero_point = op.zero_point.cuda()
op.scale = op.scale.cuda()

transformed_val = op.zero_point + real_val / op.scale
qmin = 0
qmax = (1 << bits) - 1
Expand Down Expand Up @@ -229,7 +238,8 @@ def quantize_weight(self, wrapper, **kwargs):
quant_start_step = config.get('quant_start_step', 0)
assert weight_bits >= 1, "quant bits length should be at least 1"

if quant_start_step > self.steps:
# we dont update weight in evaluation stage
if quant_start_step > self.bound_model.steps or not wrapper.training:
return weight

# if bias exists, quantize bias to uint32
Expand Down Expand Up @@ -258,15 +268,17 @@ def quantize_output(self, output, wrapper, **kwargs):
quant_start_step = config.get('quant_start_step', 0)
assert output_bits >= 1, "quant bits length should be at least 1"

if quant_start_step > self.steps:
if quant_start_step > self.bound_model.steps:
return output

current_min, current_max = torch.min(output), torch.max(output)
module.tracked_min_biased, module.tracked_min = update_ema(module.tracked_min_biased, current_min,
module.ema_decay, self.steps)
module.tracked_max_biased, module.tracked_max = update_ema(module.tracked_max_biased, current_max,
module.ema_decay, self.steps)
module.scale, module.zero_point = update_quantization_param(output_bits, module.tracked_min, module.tracked_max)
# we dont update output quantization parameters in evaluation stage
if wrapper.training:
current_min, current_max = torch.min(output), torch.max(output)
module.tracked_min_biased, module.tracked_min = update_ema(module.tracked_min_biased, current_min,
module.ema_decay, self.bound_model.steps)
module.tracked_max_biased, module.tracked_max = update_ema(module.tracked_max_biased, current_max,
module.ema_decay, self.bound_model.steps)
module.scale, module.zero_point = update_quantization_param(output_bits, module.tracked_min, module.tracked_max)
out = self._quantize(output_bits, module, output)
out = self._dequantize(module, out)
return out
Expand All @@ -279,7 +291,7 @@ def step_with_optimizer(self):
"""
override `compressor` `step` method, quantization only happens after certain number of steps
"""
self.steps += 1
self.bound_model.steps +=1


class DoReFaQuantizer(Quantizer):
Expand Down
36 changes: 36 additions & 0 deletions nni/tools/nnictl/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,14 @@
import sys
import json
import tempfile
import time
import socket
import string
import random
import ruamel.yaml as yaml
import psutil
import filelock
import glob
from colorama import Fore

from .constants import ERROR_INFO, NORMAL_INFO, WARNING_INFO
Expand Down Expand Up @@ -95,3 +98,36 @@ def generate_folder_name():
temp_dir = generate_folder_name()
os.makedirs(temp_dir)
return temp_dir

class SimplePreemptiveLock(filelock.SoftFileLock):
'''this is a lock support check lock expiration, if you do not need check expiration, you can use SoftFileLock'''
def __init__(self, lock_file, stale=-1):
super(__class__, self).__init__(lock_file, timeout=-1)
self._lock_file_name = '{}.{}'.format(self._lock_file, os.getpid())
self._stale = stale

def _acquire(self):
open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lock_file_names = glob.glob(self._lock_file + '.*')
for file_name in lock_file_names:
if os.path.exists(file_name) and (self._stale < 0 or time.time() - os.stat(file_name).st_mtime < self._stale):
return None
fd = os.open(self._lock_file_name, open_mode)
except (IOError, OSError):
pass
else:
self._lock_file_fd = fd
return None

def _release(self):
os.close(self._lock_file_fd)
self._lock_file_fd = None
try:
os.remove(self._lock_file_name)
except OSError:
pass
return None

def get_file_lock(path: string, stale=-1):
return SimplePreemptiveLock(path + '.lock', stale=-1)
72 changes: 44 additions & 28 deletions nni/tools/nnictl/config_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
import os
import json
import shutil
import time
from .constants import NNICTL_HOME_DIR
from .command_utils import print_error
from .common_utils import get_file_lock

class Config:
'''a util class to load and save config'''
Expand Down Expand Up @@ -34,7 +36,7 @@ def write_file(self):
if self.config:
try:
with open(self.config_file, 'w') as file:
json.dump(self.config, file)
json.dump(self.config, file, indent=4)
except IOError as error:
print('Error:', error)
return
Expand All @@ -54,39 +56,53 @@ class Experiments:
def __init__(self, home_dir=NNICTL_HOME_DIR):
os.makedirs(home_dir, exist_ok=True)
self.experiment_file = os.path.join(home_dir, '.experiment')
self.experiments = self.read_file()
self.lock = get_file_lock(self.experiment_file, stale=2)
with self.lock:
self.experiments = self.read_file()

def add_experiment(self, expId, port, startTime, file_name, platform, experiment_name, endTime='N/A', status='INITIALIZED'):
'''set {key:value} paris to self.experiment'''
self.experiments[expId] = {}
self.experiments[expId]['port'] = port
self.experiments[expId]['startTime'] = startTime
self.experiments[expId]['endTime'] = endTime
self.experiments[expId]['status'] = status
self.experiments[expId]['fileName'] = file_name
self.experiments[expId]['platform'] = platform
self.experiments[expId]['experimentName'] = experiment_name
self.write_file()
def add_experiment(self, expId, port, startTime, platform, experiment_name, endTime='N/A', status='INITIALIZED',
tag=[], pid=None, webuiUrl=[], logDir=[]):
'''set {key:value} pairs to self.experiment'''
with self.lock:
self.experiments = self.read_file()
self.experiments[expId] = {}
self.experiments[expId]['id'] = expId
self.experiments[expId]['port'] = port
self.experiments[expId]['startTime'] = startTime
self.experiments[expId]['endTime'] = endTime
self.experiments[expId]['status'] = status
self.experiments[expId]['platform'] = platform
self.experiments[expId]['experimentName'] = experiment_name
self.experiments[expId]['tag'] = tag
self.experiments[expId]['pid'] = pid
self.experiments[expId]['webuiUrl'] = webuiUrl
self.experiments[expId]['logDir'] = logDir
self.write_file()

def update_experiment(self, expId, key, value):
'''Update experiment'''
if expId not in self.experiments:
return False
self.experiments[expId][key] = value
self.write_file()
return True
with self.lock:
self.experiments = self.read_file()
if expId not in self.experiments:
return False
self.experiments[expId][key] = value
self.write_file()
return True

def remove_experiment(self, expId):
'''remove an experiment by id'''
if expId in self.experiments:
fileName = self.experiments.pop(expId).get('fileName')
if fileName:
logPath = os.path.join(NNICTL_HOME_DIR, fileName)
try:
shutil.rmtree(logPath)
except FileNotFoundError:
print_error('{0} does not exist.'.format(logPath))
self.write_file()
with self.lock:
self.experiments = self.read_file()
if expId in self.experiments:
self.experiments.pop(expId)
fileName = expId
if fileName:
logPath = os.path.join(NNICTL_HOME_DIR, fileName)
try:
shutil.rmtree(logPath)
except FileNotFoundError:
print_error('{0} does not exist.'.format(logPath))
self.write_file()

def get_all_experiments(self):
'''return all of experiments'''
Expand All @@ -96,7 +112,7 @@ def write_file(self):
'''save config to local file'''
try:
with open(self.experiment_file, 'w') as file:
json.dump(self.experiments, file)
json.dump(self.experiments, file, indent=4)
except IOError as error:
print('Error:', error)
return ''
Expand Down
2 changes: 1 addition & 1 deletion nni/tools/nnictl/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import os
from colorama import Fore

NNICTL_HOME_DIR = os.path.join(os.path.expanduser('~'), '.local', 'nnictl')
NNICTL_HOME_DIR = os.path.join(os.path.expanduser('~'), 'nni-experiments')

NNI_HOME_DIR = os.path.join(os.path.expanduser('~'), 'nni-experiments')

Expand Down
Loading

0 comments on commit cff51cc

Please sign in to comment.