Skip to content

Commit

Permalink
fix UTs on physical ipu (#44647)
Browse files Browse the repository at this point in the history
  • Loading branch information
gglin001 authored Jul 27, 2022
1 parent be13271 commit 8a07d02
Show file tree
Hide file tree
Showing 10 changed files with 102 additions and 85 deletions.
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def wrapper(self, *args, **kwargs):
@classmethod
def cast_model_to_fp16(cls, main_program):
amp_list = paddle.static.amp.CustomOpLists()
amp_list.unsupported_list = {}
amp_list.unsupported_list = {'scale'}
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_program, amp_list, use_fp16_guard=False)
paddle.static.amp.cast_parameters_to_fp16(
Expand Down
19 changes: 12 additions & 7 deletions python/paddle/fluid/tests/unittests/ipu/test_cast_op_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def fp16_enabled(self):

def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 3, 3])
self.feed_fp32 = {'x': data.astype(np.float32)}
self.feed_fp32 = {'x': data.astype(np.float16)}

def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
Expand All @@ -44,7 +44,7 @@ def set_feed_attr(self):

def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float16'
self.attrs['dtype'] = 'float32'

@IPUOpTest.static_graph
def build_model(self):
Expand Down Expand Up @@ -86,14 +86,19 @@ def set_op_attrs(self):

class TestCase2(TestBase):

def set_atol(self):
super().set_atol()
self.atol = 1e-3
self.rtol = 1e-3

def set_data_feed(self):
self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'),
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
}

def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float32'
self.attrs['dtype'] = 'float16'


class TestCase3(TestBase):
Expand Down Expand Up @@ -145,15 +150,15 @@ def set_op_attrs(self):


@unittest.skip('float64 is not supported')
class TestCase2(TestBase):
class TestCase7(TestBase):

def set_op_attrs(self):
self.attrs = {}
self.attrs['dtype'] = 'float64'


@unittest.skip('skip float16 to float32')
class TestCase3(TestBase):
class TestCase8(TestBase):

def set_data_feed(self):
self.feed_fp32 = {
Expand All @@ -166,7 +171,7 @@ def set_op_attrs(self):


@unittest.skip('int32 to int8 is not supported')
class TestCase4(TestBase):
class TestCase9(TestBase):

def set_atol(self):
super().set_atol()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ class TestTrainCase2(TestBase):
def set_atol(self):
self.atol = 7e-4
self.rtol = 1e-6
self.atol_fp16 = 4e-3
self.rtol_fp16 = 1e-3
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-2

def set_op_attrs(self):
self.attrs = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUp(self):
self.set_op_attrs()

def set_data_feed(self):
x = np.random.uniform(size=[2, 3, 6, 10])
x = np.random.uniform(size=[1, 2, 6, 10])
self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def set_atol(self):
self.rtol_fp16 = 1e-3

def set_feed(self):
data1 = np.random.uniform(size=[100])
data2 = np.random.uniform(size=[200])
data1 = np.random.uniform(size=[10])
data2 = np.random.uniform(size=[20])
self.feed_fp32 = {
'x': data1.astype(np.float32),
'y': data2.astype(np.float32)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,18 @@ def setUp(self):
self.set_attrs()

def set_atol(self):
self.atol = 2e-6
self.rtol = 1e-5
super().set_atol()
self.atol = 1e-6
self.rtol = 1e-3
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3
self.rtol_fp16 = 1e-1

def set_training(self):
self.is_training = True
self.epoch = 20

def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 28, 28])
data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)}

def set_feed_attr(self):
Expand Down Expand Up @@ -73,7 +74,7 @@ def build_model(self):

# using fp16
with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2)

Expand All @@ -82,9 +83,9 @@ def build_model(self):
loss = paddle.mean(x)

# optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
optimizer.minimize(loss, self.startup_prog)
self.fetch_list = [loss.name]
self.fetch_list = [x.name]

def run_model(self, exec_mode):
# cast model to fp16
Expand Down
129 changes: 65 additions & 64 deletions python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,78 +45,73 @@ def set_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory()

def set_optimizer(self):
self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1)

def _test_base(self, save_otherwise_load):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
@IPUOpTest.static_graph
def build_model(self):
generator = paddle.fluid.unique_name.UniqueNameGenerator()

with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
conv1 = paddle.static.nn.conv2d(x,
num_filters=3,
filter_size=3,
bias_attr=False,
name='conv2d')
loss = paddle.mean(conv1)

# apply optimizer
self.optimizer().minimize(loss)
fetch_list = [loss.name]

place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)

if not save_otherwise_load:
paddle.static.load(main_prog, self.attrs['model_path'].name)

ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
ipu_strategy.set_precision_config(
enable_fp16=self.attrs['enable_fp16'])
ipu_program = paddle.static.IpuCompiledProgram(
main_prog, ipu_strategy=ipu_strategy)
program = ipu_program.compile(self.feed_list, fetch_list)

result = []
run_steps = self.attrs['steps'] if save_otherwise_load \
else self.attrs['steps'] - self.attrs['save_at_step']

feed = self.feed_fp16 if self.attrs[
'enable_fp16'] else self.feed_fp32
for i in range(run_steps):
tmp = exe.run(program, feed=feed, fetch_list=fetch_list)

if save_otherwise_load and \
i == self.attrs['save_at_step'] - 1:
ipu_program._backend.weights_to_host()
paddle.static.save(main_prog,
self.attrs['model_path'].name)

if save_otherwise_load and i >= self.attrs['save_at_step']:
result.append(tmp)
elif not save_otherwise_load:
result.append(tmp)

return np.asarray(result).flatten()
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
conv1 = paddle.static.nn.conv2d(x,
num_filters=3,
filter_size=3,
bias_attr=False,
name='conv2d')
loss = paddle.mean(conv1)
# apply optimizer
self.optimizer().minimize(loss)
self.fetch_list = [loss.name]

def run_model(self, exec_mode, save_otherwise_load):
self.build_model()

place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)

if not save_otherwise_load:
paddle.static.load(self.main_prog, self.attrs['model_path'].name)

ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True)
if self.is_fp16_mode(exec_mode):
ipu_strategy.set_precision_config(enable_fp16=True)
IPUOpTest.cast_model_to_fp16(self.main_prog)
ipu_compiler = paddle.static.IpuCompiledProgram(
self.main_prog, ipu_strategy=ipu_strategy)
program = ipu_compiler.compile(self.feed_list, self.fetch_list)

feed = self.feed_fp32
if self.is_fp16_mode(exec_mode):
feed = self.feed_fp16

result = []
run_steps = self.attrs['steps'] if save_otherwise_load \
else self.attrs['steps'] - self.attrs['save_at_step']
for i in range(run_steps):
tmp = exe.run(program, feed=feed, fetch_list=self.fetch_list)

if save_otherwise_load and \
i == self.attrs['save_at_step'] - 1:
ipu_compiler._backend.weights_to_host()
paddle.static.save(self.main_prog,
self.attrs['model_path'].name)

if save_otherwise_load and i >= self.attrs['save_at_step']:
result.append(tmp)
elif not save_otherwise_load:
result.append(tmp)

return np.asarray(result)

def test_base(self):
res0 = self._test_base(True)
res1 = self._test_base(False)

res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, True)
res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
self.attrs['model_path'].cleanup()
Expand Down Expand Up @@ -185,12 +180,18 @@ def set_attrs(self):
self.attrs = {}
self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory()

def set_optimizer(self):
self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1)

def test_base(self):
res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, True)
res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
self.attrs['model_path'].cleanup()


class TestMomentumFp16(TestSGDFP16):

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ def setUp(self):
self.set_feed_attr()
self.set_attrs()

@property
def fp16_enabled(self):
return False

def set_training(self):
self.is_training = True
self.epoch = 100
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def exclude_fn(param):

loss = paddle.mean(conv1)
opt = paddle.optimizer.Lamb(
learning_rate=1e-1,
learning_rate=1e-3,
lamb_weight_decay=self.attrs['weight_decay'],
exclude_from_weight_decay_fn=exclude_fn)
opt.minimize(loss)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@ def setUp(self):
self.set_feed_attr()
self.set_op_attrs()

def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-6
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-2

def set_data_feed(self):
data = np.random.uniform(size=[1, 255, 13, 13])
self.feed_fp32 = {"in_0": data.astype(np.float32)}
Expand Down

0 comments on commit 8a07d02

Please sign in to comment.