Skip to content

Commit

Permalink
Add test cases.
Browse files Browse the repository at this point in the history
  • Loading branch information
GhostScreaming committed Aug 14, 2023
1 parent 2c08b3d commit 8666133
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 0 deletions.
36 changes: 36 additions & 0 deletions test/collective/fleet/hybrid_parallel_mp_broadcast_obj.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import random
import unittest

Expand Down Expand Up @@ -129,6 +130,41 @@ def test_mp_model(self):
loss_a.numpy(), loss_b.numpy(), rtol=1e-6
)

def test_mp_model_async_allreduce(self):
(
model_a,
optimizer_a,
model_b,
optimizer_b,
) = self.build_model_optimizer()

os.environ['Flags_mp_aysnc_allreduce'] = "True"
os.environ['Flags_skip_mp_c_identity'] = "True"
if paddle.version.cuda() >= '11.6':
os.environ['Flags_fused_linear_param_grad_add'] = "True"

for _ in range(5):
img = np.random.randint(
0,
vocab_size,
(
batch_size,
seq_length,
),
)
text = [
random.sample('zyxwvutsrqponmlkjihgfedcba', 5)
for i in range(batch_size)
]
batch = (img, text)

loss_a = self.train_batch(batch, model_a, optimizer_a, True)
loss_b = self.train_batch(batch, model_b, optimizer_b, False)

np.testing.assert_allclose(
loss_a.numpy(), loss_b.numpy(), rtol=1e-6
)


if __name__ == "__main__":
unittest.main()
31 changes: 31 additions & 0 deletions test/collective/fleet/hybrid_parallel_mp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import random
import unittest

Expand Down Expand Up @@ -450,6 +451,36 @@ def test_mp_model(self):
loss_a.numpy(), loss_b.numpy(), rtol=1e-6
)

def test_mp_model_async_allreduce(self):
(
model_a,
optimizer_a,
model_b,
optimizer_b,
) = self.build_model_optimizer()

os.environ['Flags_mp_aysnc_allreduce'] = "True"
os.environ['Flags_skip_mp_c_identity'] = "True"
if paddle.version.cuda() >= '11.6':
os.environ['Flags_fused_linear_param_grad_add'] = "True"

for _ in range(5):
np_data = np.random.randint(
0,
vocab_size,
(
batch_size,
seq_length,
),
)
batch = paddle.to_tensor(np_data)
loss_a = self.train_batch(batch, model_a, optimizer_a, True)
loss_b = self.train_batch(batch, model_b, optimizer_b, False)

np.testing.assert_allclose(
loss_a.numpy(), loss_b.numpy(), rtol=1e-6
)


if __name__ == "__main__":
unittest.main()

0 comments on commit 8666133

Please sign in to comment.