Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[numpy] Change d2l chapters cv and gan to use numpy (#15368)
Browse files Browse the repository at this point in the history
* Change op name style to lower case underscore

* Add ops under image to npx

* Add image submodule to npx

* Fix split_and_load use np

* Fix fine tuning

* Fix bbox and anchor

* Fix odd

* Fix ssd and rcnn

* Remove restriction on binary element-wise scalar

* Fix gan

* Fix sanity

* Try to fix website build failure

* Add npx.random.seed

* Fix doc
  • Loading branch information
reminisce authored and haojin2 committed Jul 19, 2019
1 parent 505e86a commit ca31e72
Show file tree
Hide file tree
Showing 48 changed files with 505 additions and 112 deletions.
5 changes: 4 additions & 1 deletion python/mxnet/_numpy_op_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@


def _np_reshape(a, newshape, order='C'):
"""Gives a new shape to an array without changing its data.
"""
reshape(a, newshape, order='C')
Gives a new shape to an array without changing its data.
Parameters
----------
Expand Down
3 changes: 2 additions & 1 deletion python/mxnet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,6 +744,7 @@ def write_all_str(module_file, module_all_list):
_NP_OP_SUBMODULE_LIST = ['_random_', '_linalg_']

_NP_EXT_OP_PREFIX = '_npx_'
_NP_EXT_OP_SUBMODULE_LIST = ['_image_']

_NP_INTERNAL_OP_PREFIX = '_npi_'

Expand Down Expand Up @@ -784,7 +785,7 @@ def _init_np_op_module(root_module_name, np_module_name, mx_module_name, make_op
submodule_name_list = _NP_OP_SUBMODULE_LIST
elif np_module_name == 'numpy_extension':
op_name_prefix = _NP_EXT_OP_PREFIX
submodule_name_list = []
submodule_name_list = _NP_EXT_OP_SUBMODULE_LIST
elif np_module_name == 'numpy._internal':
op_name_prefix = _NP_INTERNAL_OP_PREFIX
submodule_name_list = []
Expand Down
23 changes: 20 additions & 3 deletions python/mxnet/gluon/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import re
from collections import OrderedDict


from ..base import mx_real_t, MXNetError
from .. import symbol, ndarray, initializer
from ..symbol import Symbol
Expand All @@ -37,7 +36,7 @@
from .utils import _check_same_symbol_type, _check_all_np_ndarrays
from .. import numpy_extension as _mx_npx
from .. import numpy as _mx_np, numpy_extension as _mx_npx
from .. util import is_np_array
from .. util import is_np_array, np_shape, np_array


class _BlockScope(object):
Expand Down Expand Up @@ -387,7 +386,25 @@ def load_parameters(self, filename, ctx=None, allow_missing=False,
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
if is_np_array():
loaded = _mx_npx.load(filename)
# failure may happen when loading parameters saved as NDArrays within
# NumPy semantics. Check the failure type and recover from it if it happens.
try:
loaded = _mx_npx.load(filename)
except MXNetError as e:
err_msg = str(e)
if 'is_np_shape' in err_msg:
# Loading failure due to parameters saved without numpy semantics.
# Temporarily disable numpy semantics and load parameters. After it's
# done, resume the numpy semantics. This is fine because the cases
# numpy ndarray covers is a superset of the legacy ndarray's.
with np_array(False):
with np_shape(False):
loaded_nds = ndarray.load(filename)
assert isinstance(loaded_nds, dict),\
'expecting a dict type, got {}'.format(str(type(loaded_nds)))
loaded = {k: loaded_nds[k].as_np_ndarray() for k in loaded_nds}
else:
raise ValueError(err_msg)
else:
loaded = ndarray.load(filename)
params = self._collect_params_with_prefix()
Expand Down
5 changes: 3 additions & 2 deletions python/mxnet/gluon/data/vision/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,9 @@ def _get_data(self):
data = np.concatenate(data)
label = np.concatenate(label)

self._data = nd.array(data, dtype=data.dtype)
self._label = label
array_fn = _mx_np.array if is_np_array() else nd.array
self._data = array_fn(data, dtype=data.dtype)
self._label = array_fn(label, dtype=label.dtype) if is_np_array() else label


class CIFAR100(CIFAR10):
Expand Down
28 changes: 25 additions & 3 deletions python/mxnet/gluon/data/vision/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from ...nn import Sequential, HybridSequential
from .... import image
from ....base import numeric_types
from ...utils import _adapt_np_array
from ....util import is_np_array


class Compose(Sequential):
Expand Down Expand Up @@ -93,6 +93,8 @@ def __init__(self, dtype='float32'):
self._dtype = dtype

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.cast(x, self._dtype)


Expand Down Expand Up @@ -134,8 +136,9 @@ class ToTensor(HybridBlock):
def __init__(self):
super(ToTensor, self).__init__()

@_adapt_np_array
def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.to_tensor(x)


Expand Down Expand Up @@ -189,6 +192,8 @@ def __init__(self, mean=0.0, std=1.0):
self._std = std

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.normalize(x, self._mean, self._std)


Expand Down Expand Up @@ -370,8 +375,9 @@ def __init__(self, size, keep_ratio=False, interpolation=1):
self._size = size
self._interpolation = interpolation

@_adapt_np_array
def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.resize(x, self._size, self._keep, self._interpolation)

class RandomFlipLeftRight(HybridBlock):
Expand All @@ -388,6 +394,8 @@ def __init__(self):
super(RandomFlipLeftRight, self).__init__()

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_flip_left_right(x)


Expand All @@ -405,6 +413,8 @@ def __init__(self):
super(RandomFlipTopBottom, self).__init__()

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_flip_top_bottom(x)


Expand All @@ -430,6 +440,8 @@ def __init__(self, brightness):
self._args = (max(0, 1-brightness), 1+brightness)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_brightness(x, *self._args)


Expand All @@ -455,6 +467,8 @@ def __init__(self, contrast):
self._args = (max(0, 1-contrast), 1+contrast)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_contrast(x, *self._args)


Expand All @@ -480,6 +494,8 @@ def __init__(self, saturation):
self._args = (max(0, 1-saturation), 1+saturation)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_saturation(x, *self._args)


Expand All @@ -505,6 +521,8 @@ def __init__(self, hue):
self._args = (max(0, 1-hue), 1+hue)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_hue(x, *self._args)


Expand Down Expand Up @@ -539,6 +557,8 @@ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self._args = (brightness, contrast, saturation, hue)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_color_jitter(x, *self._args)


Expand All @@ -562,4 +582,6 @@ def __init__(self, alpha):
self._alpha = alpha

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_lighting(x, self._alpha)
39 changes: 28 additions & 11 deletions python/mxnet/gluon/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,30 +258,47 @@ def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid

@_adapt_np_array
def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
label = _reshape_like(F, label, pred)
if is_np_array():
relu_fn = F.npx.relu
act_fn = F.npx.activation
abs_fn = F.np.abs
mul_fn = F.np.multiply
log_fn = F.np.log
else:
relu_fn = F.relu
act_fn = F.Activation
abs_fn = F.abs
mul_fn = F.broadcast_mul
log_fn = F.log
if not self._from_sigmoid:
if pos_weight is None:
# We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
loss = F.relu(pred) - pred * label + \
F.Activation(-F.abs(pred), act_type='softrelu')
loss = relu_fn(pred) - pred * label + \
act_fn(-abs_fn(pred), act_type='softrelu')
else:
# We use the stable formula: x - x * z + (1 + z * pos_weight - z) * \
# (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + F.broadcast_mul(pos_weight - 1, label)
loss = pred - pred * label + log_weight * \
(F.Activation(-F.abs(pred), act_type='softrelu') + F.relu(-pred))
log_weight = 1 + mul_fn(pos_weight - 1, label)
loss = pred - pred * label + log_weight *\
(act_fn(-abs_fn(pred), act_type='softrelu') + relu_fn(-pred))
else:
eps = 1e-12
if pos_weight is None:
loss = -(F.log(pred + eps) * label
+ F.log(1. - pred + eps) * (1. - label))
loss = -(log_fn(pred + eps) * label
+ log_fn(1. - pred + eps) * (1. - label))
else:
loss = -(F.broadcast_mul(F.log(pred + eps) * label, pos_weight)
+ F.log(1. - pred + eps) * (1. - label))
loss = -(mul_fn(log_fn(pred + eps) * label, pos_weight)
+ log_fn(1. - pred + eps) * (1. - label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
if is_np_array():
if F is ndarray:
return F.np.mean(loss, axis=tuple(range(1, loss.ndim)))
else:
return F.npx.batch_flatten(loss).mean(axis=1)
else:
return F.mean(loss, axis=self._batch_axis, exclude=True)


SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
Expand Down
19 changes: 12 additions & 7 deletions python/mxnet/gluon/model_zoo/vision/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from ...block import HybridBlock
from ... import nn
from .... import base
from .... util import is_np_array

# Helpers
def _conv3x3(channels, stride, in_channels):
Expand Down Expand Up @@ -81,7 +82,8 @@ def hybrid_forward(self, F, x):
if self.downsample:
residual = self.downsample(residual)

x = F.Activation(residual+x, act_type='relu')
act = F.npx.activation if is_np_array() else F.Activation
x = act(residual+x, act_type='relu')

return x

Expand Down Expand Up @@ -129,7 +131,8 @@ def hybrid_forward(self, F, x):
if self.downsample:
residual = self.downsample(residual)

x = F.Activation(x + residual, act_type='relu')
act = F.npx.activation if is_np_array() else F.Activation
x = act(x + residual, act_type='relu')
return x


Expand Down Expand Up @@ -165,13 +168,14 @@ def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
act = F.npx.activation if is_np_array() else F.Activation
x = act(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)

x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = act(x, act_type='relu')
x = self.conv2(x)

return x + residual
Expand Down Expand Up @@ -211,17 +215,18 @@ def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
act = F.npx.activation if is_np_array() else F.Activation
x = act(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)

x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = act(x, act_type='relu')
x = self.conv2(x)

x = self.bn3(x)
x = F.Activation(x, act_type='relu')
x = act(x, act_type='relu')
x = self.conv3(x)

return x + residual
Expand Down
8 changes: 4 additions & 4 deletions python/mxnet/gluon/nn/activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,8 @@ def _alias(self):
return self._act_type

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.Activation(x, act_type=self._act_type, name='fwd')
act = F.npx.activation if is_np_array() else F.Activation
return act(x, act_type=self._act_type, name='fwd')

def __repr__(self):
s = '{name}({_act_type})'
Expand Down Expand Up @@ -91,7 +90,8 @@ def __init__(self, alpha, **kwargs):
self._alpha = alpha

def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
leaky_relu = F.npx.leaky_relu if is_np_array() else F.LeakyReLU
return leaky_relu(x, act_type='leaky', slope=self._alpha, name='fwd')

def __repr__(self):
s = '{name}({alpha})'
Expand Down
Loading

0 comments on commit ca31e72

Please sign in to comment.