Skip to content

Commit

Permalink
add unittest,test=develop
Browse files Browse the repository at this point in the history
  • Loading branch information
ceci3 committed Aug 9, 2020
1 parent 44db6f5 commit 38bb371
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 16 deletions.
25 changes: 10 additions & 15 deletions python/paddle/fluid/dygraph/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -3267,11 +3267,9 @@ class SyncBatchNorm(layers.Layer):
x = np.random.random(size=(3, 10, 3, 7)).astype('float32')
with fluid.dygraph.guard():
x = to_variable(x)
if paddle.fluid.is_compiled_with_cuda():
if fluid.is_compiled_with_cuda():
sync_batch_norm = nn.SyncBatchNorm(10)
hidden1 = sync_batch_norm(x)
else:
raise NotImplemented("SyncBatchNorm only support GPU")
"""

def __init__(self,
Expand Down Expand Up @@ -3340,19 +3338,12 @@ def forward(self, input):
variance_out = self._variance

### train mode: use mini-batch stats, eval mode: use global stats
if self.training:
use_global_stats = False
trainable_statistics = False
else:
use_global_stats = True
trainable_statistics = False

if in_dygraph_mode():
attrs = ("momentum", self._momentum, "epsilon", self._eps,
"is_test", not self.training, "data_layout",
self._data_layout, "use_mkldnn", False, "fuse_with_relu",
False, "use_global_stats", use_global_stats,
'trainable_statistics', trainable_statistics)
False, "use_global_stats", not self.training,
'trainable_statistics', False)
sync_batch_norm_out, _, _, _, _, _ = core.ops.sync_batch_norm(
input, self.weight, self.bias, self._mean, self._variance,
mean_out, variance_out, *attrs)
Expand All @@ -3369,8 +3360,8 @@ def forward(self, input):
"data_layout": self._data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"trainable_statistics": trainable_statistics,
"use_global_stats": not self.training,
"trainable_statistics": False,
}

inputs = {
Expand All @@ -3385,7 +3376,7 @@ def forward(self, input):
dtype=self._dtype, stop_gradient=True)
saved_variance = self._helper.create_variable_for_type_inference(
dtype=self._dtype, stop_gradient=True)
sync_batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(
sync_batch_norm_out = self._helper.create_variable_for_type_inference(
self._dtype)

outputs = {
Expand Down Expand Up @@ -3415,6 +3406,10 @@ class Flatten(layers.Layer):
start_axis(int): first dim to flatten (default = 1)
stop_axis(int): last dim to flatten (default = -1).
Returns:
None
Examples:
.. code-block:: python
Expand Down
18 changes: 18 additions & 0 deletions python/paddle/fluid/tests/unittests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,24 @@ def test_layer_norm(self):
with self.assertRaises(ValueError):
lm(base.to_variable(inp))

def test_SyncBatchNorm(self):
if core.is_compiled_with_cuda():
with self.static_graph():
t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32')
my_sync_bn = nn.SyncBatchNorm(3)
ret = my_sync_bn(t)
static_ret = self.get_static_graph_result(
feed={'t': np.ones(
[3, 3, 5, 5], dtype='float32')},
fetch_list=[ret])[0]

with self.dynamic_graph():
t = np.ones([3, 3, 5, 5], dtype='float32')
my_syncbn = paddle.nn.SyncBatchNorm(3)
dy_ret = my_syncbn(base.to_variable(t))
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.array_equal(static_ret, static_ret))

def test_relu(self):
with self.static_graph():
t = layers.data(name='t', shape=[3, 3], dtype='float32')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class TestParallelDygraphMnist(TestDistBase):
def _setup_config(self):
self._sync_mode = False
self._nccl2_mode = True
self._dygraph = True
self._dygraph = False #True

def test_mnist(self):
if fluid.core.is_compiled_with_cuda():
Expand Down
18 changes: 18 additions & 0 deletions python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler
from paddle.fluid import Program, program_guard

from op_test import OpTest, _set_use_system_allocator

Expand Down Expand Up @@ -202,5 +203,22 @@ def setUp(self):
self.atol = 1e-2


class TestDygraphSyncBatchNormAPIError(unittest.TestCase):
def test_errors(self):
if not core.is_compiled_with_cuda():
return

with program_guard(Program(), Program()):
my_sync_batch_norm = fluid.dygraph.SyncBatchNorm(10)
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0))
self.assertRaises(TypeError, my_sync_batch_norm, x1)

# the input dtype of SyncBatchNorm must be float16 or float32 or float64
# float16 only can be set on GPU place
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32")
self.assertRaises(TypeError, my_sync_batch_norm, x2)


if __name__ == '__main__':
unittest.main()

0 comments on commit 38bb371

Please sign in to comment.