Skip to content

Commit

Permalink
edit misspelling (#1768)
Browse files Browse the repository at this point in the history
Co-authored-by: vfdev <vfdev.5@gmail.com>
  • Loading branch information
KickItLikeShika and vfdev-5 authored Mar 12, 2021
1 parent 86576d8 commit 6afb4aa
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 24 deletions.
32 changes: 16 additions & 16 deletions tests/ignite/contrib/metrics/test_average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def update_fn(engine, batch):
_test(y_pred, y, batch_size)


def _test_distirb_binary_input_N(device):
def _test_distrib_binary_input_N(device):

rank = idist.get_rank()
torch.manual_seed(12)
Expand Down Expand Up @@ -252,7 +252,7 @@ def _test(y_pred, y, n_iters, metric_device):
_test(y_pred, y, batch_size, idist.device())


def _test_distirb_multilabel_input_N(device):
def _test_distrib_multilabel_input_N(device):

rank = idist.get_rank()
torch.manual_seed(12)
Expand Down Expand Up @@ -404,8 +404,8 @@ def update(engine, i):
def test_distrib_gpu(distributed_context_single_node_nccl):

device = torch.device(f"cuda:{distributed_context_single_node_nccl['local_rank']}")
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)

Expand All @@ -415,8 +415,8 @@ def test_distrib_gpu(distributed_context_single_node_nccl):
def test_distrib_cpu(distributed_context_single_node_gloo):

device = torch.device("cpu")
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)

Expand All @@ -429,8 +429,8 @@ def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()

gloo_hvd_executor(_test_distirb_binary_input_N, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distirb_multilabel_input_N, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_binary_input_N, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_multilabel_input_N, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_binary, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_integration_multilabel, (device,), np=nproc, do_init=True)

Expand All @@ -441,8 +441,8 @@ def test_distrib_hvd(gloo_hvd_executor):
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):

device = torch.device("cpu")
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)

Expand All @@ -453,8 +453,8 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):

device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)

Expand All @@ -465,16 +465,16 @@ def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
def test_distrib_single_device_xla():

device = idist.device()
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)


def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distirb_binary_input_N(device)
_test_distirb_multilabel_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_multilabel_input_N(device)
_test_distrib_integration_binary(device)
_test_distrib_integration_multilabel(device)

Expand Down
16 changes: 8 additions & 8 deletions tests/ignite/contrib/metrics/test_cohen_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def update_fn(engine, batch):
_test(y_pred, y, batch_size)


def _test_distirb_binary_input_N(device):
def _test_distrib_binary_input_N(device):

rank = idist.get_rank()
torch.manual_seed(12)
Expand Down Expand Up @@ -261,7 +261,7 @@ def update(engine, i):
def test_distrib_gpu(distributed_context_single_node_nccl):

device = torch.device(f"cuda:{distributed_context_single_node_nccl['local_rank']}")
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


Expand All @@ -270,7 +270,7 @@ def test_distrib_gpu(distributed_context_single_node_nccl):
def test_distrib_cpu(distributed_context_single_node_gloo):

device = torch.device("cpu")
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


Expand All @@ -283,7 +283,7 @@ def test_distrib_hvd(gloo_hvd_executor):
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()

gloo_hvd_executor(
_test_distirb_binary_input_N, (device,), np=nproc, do_init=True,
_test_distrib_binary_input_N, (device,), np=nproc, do_init=True,
)
gloo_hvd_executor(
_test_distrib_integration_binary, (device,), np=nproc, do_init=True,
Expand All @@ -296,7 +296,7 @@ def test_distrib_hvd(gloo_hvd_executor):
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):

device = torch.device("cpu")
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


Expand All @@ -306,7 +306,7 @@ def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):

device = torch.device(f"cuda:{distributed_context_multi_node_nccl['local_rank']}")
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


Expand All @@ -316,14 +316,14 @@ def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
def test_distrib_single_device_xla():

device = idist.device()
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


def _test_distrib_xla_nprocs(index):

device = idist.device()
_test_distirb_binary_input_N(device)
_test_distrib_binary_input_N(device)
_test_distrib_integration_binary(device)


Expand Down

0 comments on commit 6afb4aa

Please sign in to comment.