diff --git a/python/mxnet/gluon/contrib/estimator/estimator.py b/python/mxnet/gluon/contrib/estimator/estimator.py index bd1e166165b8..73ddcbc0ff8e 100644 --- a/python/mxnet/gluon/contrib/estimator/estimator.py +++ b/python/mxnet/gluon/contrib/estimator/estimator.py @@ -392,7 +392,7 @@ def fit(self, train_data, for handler in epoch_begin: handler.epoch_begin(estimator_ref) - for i, batch in enumerate(train_data): + for batch in train_data: # batch begin for handler in batch_begin: handler.batch_begin(estimator_ref, batch=batch) diff --git a/python/mxnet/operator.py b/python/mxnet/operator.py index b34fe0d0fa32..a40c94f7610f 100644 --- a/python/mxnet/operator.py +++ b/python/mxnet/operator.py @@ -824,7 +824,7 @@ def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _): "entries in returned aux stypes, " \ "got %d."%(len(tensors[4]), len(ret[4])) rstype = [] - for i, ret_list in enumerate(ret): + for ret_list in ret: rstype.extend(ret_list) for i, stype in enumerate(rstype): diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index bd0e73c6e2f5..2fdb913f7872 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -1584,7 +1584,7 @@ def smaller_dtype(dt1, dt2): else: arg_params[n] = np.random.normal(size=arr.shape, scale=scale).astype(rand_type) - for n, arr in exe_list[0].aux_dict.items(): + for n in exe_list[0].aux_dict: if n not in aux_params: aux_params[n] = 0 for exe in exe_list: diff --git a/tests/nightly/dist_device_sync_kvstore_byteps.py b/tests/nightly/dist_device_sync_kvstore_byteps.py index 676be92611e1..4ddde57de919 100644 --- a/tests/nightly/dist_device_sync_kvstore_byteps.py +++ b/tests/nightly/dist_device_sync_kvstore_byteps.py @@ -65,7 +65,7 @@ def check_default_keys(nrepeat=3): # init kv dns keys kv.broadcast('3', mx.nd.ones(shape, ctx=get_current_context(device=True)), mx.nd.ones(shape, ctx=get_current_context(device=True))) kv.broadcast('99', mx.nd.ones(big_shape, ctx=get_current_context(device=True)), mx.nd.ones(big_shape, ctx=get_current_context(device=True))) - for i in range(nrepeat): + for _ in range(nrepeat): scale = my_rank + 1 num = (my_num_workers + 1) * my_num_workers / 2 diff --git a/tests/nightly/dist_device_sync_kvstore_custom.py b/tests/nightly/dist_device_sync_kvstore_custom.py index 6d3608ca04c2..17e0d3278ab1 100644 --- a/tests/nightly/dist_device_sync_kvstore_custom.py +++ b/tests/nightly/dist_device_sync_kvstore_custom.py @@ -54,7 +54,7 @@ def check_default_keys(nrepeat=3): # init kv dns keys kv.broadcast('3', mx.nd.ones(shape, ctx=mx.gpu()), mx.nd.ones(shape, ctx=mx.gpu())) kv.broadcast('99', mx.nd.ones(big_shape, ctx=mx.gpu()), mx.nd.ones(big_shape, ctx=mx.gpu())) - for i in range(nrepeat): + for _ in range(nrepeat): scale = my_rank + 1 num = (my_num_workers + 1) * my_num_workers * num_gpus / 2 diff --git a/tests/nightly/dist_sync_kvstore.py b/tests/nightly/dist_sync_kvstore.py index 1a89f5ce9f30..28e4af82e8fc 100644 --- a/tests/nightly/dist_sync_kvstore.py +++ b/tests/nightly/dist_sync_kvstore.py @@ -153,7 +153,7 @@ def check_row_sparse_keys_with_zeros(dtype, nrepeat): v = mx.nd.sparse.zeros('row_sparse', shape, dtype=dtype) big_v = mx.nd.sparse.zeros('row_sparse', big_shape, dtype=dtype) # push - for i in range(nrepeat): + for _ in range(nrepeat): kv.push(k1, v) kv.push(k2, big_v) # pull a subset of rows this worker is interested in @@ -301,7 +301,7 @@ def check_compr_random(threshold, nrepeat): kv.init(k, mx.nd.zeros(s)) for k,s in compr_random_keys_shapes: curr_residual = np.zeros(s) - for l in range(nrepeat): + for _ in range(nrepeat): orig_val = mx.nd.zeros(s) kv.pull(k, orig_val) @@ -399,7 +399,7 @@ def check_compr_random(threshold, nrepeat): kv.init(k, mx.nd.zeros(s)) for k,s in compr_random_keys_shapes: curr_residual = np.zeros(s) - for l in range(nrepeat): + for _ in range(nrepeat): orig_val = mx.nd.zeros(s) kv.pull(k, orig_val) diff --git a/tests/nightly/test_kvstore.py b/tests/nightly/test_kvstore.py index 65f6c4a13202..54801c2fbdee 100755 --- a/tests/nightly/test_kvstore.py +++ b/tests/nightly/test_kvstore.py @@ -150,7 +150,7 @@ def pull_init_test(kv): assert_almost_equal(o.asnumpy(), exp) def pull_before_push(kv): - for i in range(nrepeat): + for _ in range(nrepeat): for j in range(len(keys)): out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)] kv.pull(keys[j], out=out) @@ -209,7 +209,7 @@ def verify_residual_1bit(kv, threshold, rate): check_diff_to_scalar(o, curr_val) def push_zeros(kv): - for i in range(nrepeat): + for _ in range(nrepeat): for j in range(len(keys)): kv.push(keys[j], [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]) out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)] @@ -249,7 +249,7 @@ def verify_residual_2bit(kv, threshold, rate): return curval def check_neg(kv, neg, rate, curval): - for r in range(nrepeat): + for _ in range(nrepeat): curval = curval + rate*nworker*neg for j in range(len(keys)): kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*neg for g in range(nworker)]) diff --git a/tests/nightly/test_np_random.py b/tests/nightly/test_np_random.py index ccb74e16e6c9..753a23261e18 100644 --- a/tests/nightly/test_np_random.py +++ b/tests/nightly/test_np_random.py @@ -130,7 +130,7 @@ def test_np_gamma(): # Generation test trials = 8 num_buckets = 5 - for dtype in types: + for _ in types: for alpha, beta in [(2.0, 3.0), (0.5, 1.0)]: buckets, probs = gen_buckets_probs_with_ppf( lambda x: ss.gamma.ppf(x, a=alpha, loc=0, scale=beta), num_buckets) diff --git a/tests/nightly/test_server_profiling.py b/tests/nightly/test_server_profiling.py index 7d157a3e4189..7024acf73f72 100644 --- a/tests/nightly/test_server_profiling.py +++ b/tests/nightly/test_server_profiling.py @@ -35,7 +35,7 @@ def check_default_keys(kv, my_rank): nrepeat = 10 # checks pull after push in loop, because behavior during # consecutive pushes doesn't offer any guarantees - for i in range(nrepeat): + for _ in range(nrepeat): kv.push(key, mx.nd.ones(shape, dtype='float32') * (my_rank+1)) val = mx.nd.zeros(shape, dtype='float32') kv.pull(key, out=val) diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index 9b42e7452516..134eab397640 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -108,7 +108,7 @@ def test_lstmp(): assert_almost_equal(layer_output, cell_output, rtol=rtol, atol=atol) layer_output.backward() cell_output.backward() - for k, v in weights.items(): + for k, _ in weights.items(): layer_grad = layer_params['l0_' + k].grad() cell_grad = cell_params[k].grad() print('checking gradient for {}'.format('lstm0_l0_' + k)) @@ -414,7 +414,7 @@ def get_num_devices(): return ndev = 2 # check with unsync version - for i in range(10): + for _ in range(10): _check_batchnorm_result(mx.np.random.uniform(size=(4, 1, 4, 4)), num_devices=ndev, cuda=True) diff --git a/tests/python/gpu/test_gluon_model_zoo_gpu.py b/tests/python/gpu/test_gluon_model_zoo_gpu.py index c4cde1d550ce..4d3ba3c2b822 100644 --- a/tests/python/gpu/test_gluon_model_zoo_gpu.py +++ b/tests/python/gpu/test_gluon_model_zoo_gpu.py @@ -79,7 +79,7 @@ def test_inference(model_name): gpu_param.set_data(cpu_param.data().as_in_context(mx.gpu())) cpu_data = mx.np.array(data, ctx=mx.cpu()) - for i in range(5): + for _ in range(5): # Run inference. with autograd.record(train_mode=False): cpu_out = cpu_model(cpu_data) diff --git a/tests/python/gpu/test_kvstore_gpu.py b/tests/python/gpu/test_kvstore_gpu.py index 4a2530a8a3bb..a756296c512c 100644 --- a/tests/python/gpu/test_kvstore_gpu.py +++ b/tests/python/gpu/test_kvstore_gpu.py @@ -65,7 +65,7 @@ def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False): total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows)) row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)] else: - for i in range(count): + for _ in range(count): row_id = np.random.randint(num_rows, size=num_rows) row_ids.append(mx.nd.array(row_id)) row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 03aef70934b0..9ce005b2f72f 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -180,7 +180,7 @@ def check_fft(shape): def test_fft(): nrepeat = 2 maxdim = 10 - for repeat in range(nrepeat): + for _ in range(nrepeat): for order in [2,4]: shape = tuple(np.random.randint(1, maxdim, size=order)) check_fft(shape) @@ -339,7 +339,7 @@ def _flatten_list(nested_list): rescale_grad=0.95, momentum=momentum, out=mx_p_w) def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol): - for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)): + for _, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)): assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol) if dtype == 'float16': rtol = 1e-3 @@ -1788,7 +1788,7 @@ def test_autograd_save_memory(): x.attach_grad() with mx.autograd.record(): - for i in range(200): + for _ in range(200): x = x + 1 x.wait_to_read() x.backward() @@ -1848,7 +1848,7 @@ def test_cross_device_autograd(): with mx.autograd.record(): y = x - for i in range(3): + for _ in range(3): y = mx.nd.tanh(y) y.backward() @@ -2224,7 +2224,7 @@ def math_square(shape, dtype, check_value): def run_math(op, shape, dtype="float32", check_value=True): run_num = 10 - for i in range(run_num): + for _ in range(run_num): if op == 'log': math_log(shape=shape, dtype=dtype, check_value=check_value) elif op == 'erf': diff --git a/tests/python/mkl/subgraphs/test_conv_subgraph.py b/tests/python/mkl/subgraphs/test_conv_subgraph.py index a4efab405d5b..e965fab9e4c7 100644 --- a/tests/python/mkl/subgraphs/test_conv_subgraph.py +++ b/tests/python/mkl/subgraphs/test_conv_subgraph.py @@ -254,7 +254,7 @@ class SingleConcat(nn.HybridBlock): def __init__(self, input_num, dim, **kwargs): super(SingleConcat, self).__init__(**kwargs) self.concat = nn.HybridConcatenate(axis=dim) - for i in range(input_num): + for _ in range(input_num): self.concat.add(nn.Identity()) def forward(self, x): diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index aa7a8f4be40b..52de29c59aa3 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -235,7 +235,7 @@ def test_mkldnn_sum_with_mkldnn_layout(): num_inputs = [2, 3, 4, 5] for i in num_inputs: inputs = [] - for n in range(i): + for _ in range(i): inputs.append(z) y = mx.sym.add_n(*inputs) # (only MKLDNN data input) exe = y._simple_bind(ctx=mx.cpu(), x=x_shape, w=w_shape) diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index 640469e0e7f6..60a11d6508aa 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -1312,7 +1312,7 @@ def test_optimal_threshold_adversarial_case(): hist_edges = [] min_val = -2 max_val = 2 - for i in range(0, 998): + for _ in range(0, 998): hist.append(0) for i in range(0, 999): hist_edges.append((max_val - min_val) / 999 * i + min_val) diff --git a/tests/python/unittest/test_autograd.py b/tests/python/unittest/test_autograd.py index b9d11f5af62d..554d830512b6 100644 --- a/tests/python/unittest/test_autograd.py +++ b/tests/python/unittest/test_autograd.py @@ -387,7 +387,7 @@ def backward(self, dY): with mx.autograd.record(): X = mx.nd.zeros((3, 4)) #X.attach_grad() # uncommenting this line works - for i in range(5): + for _ in range(5): f = Foo() X = f(X) X.wait_to_read() @@ -446,7 +446,7 @@ def backward(self, dY): with mx.autograd.record(): X = mx.np.zeros((3, 4)) #X.attach_grad() # uncommenting this line works - for i in range(5): + for _ in range(5): f = Foo() X = f(X) X.wait_to_read() diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index 1a2a6ccf1c63..20833dc115e1 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -990,7 +990,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms, e = out._bind(ctx=default_context(), args=arg_dict) # the inputs to forward and backward are the same so forward and backward # should always return the same outputs. - for i in range(num_iters): + for _ in range(num_iters): e.forward(is_train=is_train) if (is_train): # backward @@ -1000,7 +1000,7 @@ def verify_foreach(step, in_syms, state_syms, free_syms, # Below we use imperative to reimplement foreach and compute its gradients. res = [] - for i in range(len(_as_list(out_grads[0]))): + for _ in range(len(_as_list(out_grads[0]))): res.append([]) for arr in _as_list(in_arrs): arr.attach_grad() diff --git a/tests/python/unittest/test_contrib_gluon_data_vision.py b/tests/python/unittest/test_contrib_gluon_data_vision.py index 83b14471af3a..2ed11e54a9f1 100644 --- a/tests/python/unittest/test_contrib_gluon_data_vision.py +++ b/tests/python/unittest/test_contrib_gluon_data_vision.py @@ -92,7 +92,7 @@ def test_imageiter(self): rand_crop=1, rand_gray=0.1, rand_mirror=True) ] for it in imageiter_list: - for batch in it: + for _ in it: pass def test_image_bbox_iter(self): @@ -141,6 +141,6 @@ def test_bbox_augmenters(self): pca_noise=0.1, hue=0.1, inter_method=10, max_aspect_ratio=5, area_range=(0.1, 4.0), max_attempts=50) - for batch in det_iter: + for _ in det_iter: pass mx.npx.waitall() diff --git a/tests/python/unittest/test_contrib_io.py b/tests/python/unittest/test_contrib_io.py index ada65aef18bb..5adff933cebd 100644 --- a/tests/python/unittest/test_contrib_io.py +++ b/tests/python/unittest/test_contrib_io.py @@ -33,7 +33,7 @@ def test_mnist_batches(batch_size, expected, last_batch='discard'): assert batch.label[0].shape == (batch_size,) count = 0 test_iter.reset() - for batch in test_iter: + for _ in test_iter: count += 1 assert count == expected, "expected {} batches, given {}".format(expected, count) diff --git a/tests/python/unittest/test_dgl_graph.py b/tests/python/unittest/test_dgl_graph.py index 89533fb119aa..5337f907021e 100644 --- a/tests/python/unittest/test_dgl_graph.py +++ b/tests/python/unittest/test_dgl_graph.py @@ -227,7 +227,7 @@ def test_subgraph(): assert np.sum(remain == row_start) == len(remain) break row = subgs[0].indices[row_start:row_end] - for j, subv2 in enumerate(row.asnumpy()): + for _, subv2 in enumerate(row.asnumpy()): v2 = vertices[subv2] assert sp_g[v1, v2] == sp_subg[subv1, subv2] diff --git a/tests/python/unittest/test_engine.py b/tests/python/unittest/test_engine.py index 642d9e1f169e..296bc2b7061c 100644 --- a/tests/python/unittest/test_engine.py +++ b/tests/python/unittest/test_engine.py @@ -28,7 +28,7 @@ def test_bulk(): x.wait_to_read() x += 1 assert (x.asnumpy() == 4).all() - for i in range(100): + for _ in range(100): x += 1 assert (x.asnumpy() == 104).all() diff --git a/tests/python/unittest/test_executor.py b/tests/python/unittest/test_executor.py index fe30b3bb9dad..b735c8322b27 100644 --- a/tests/python/unittest/test_executor.py +++ b/tests/python/unittest/test_executor.py @@ -77,7 +77,7 @@ def test_bind(): 'MXNET_EXEC_BULK_EXEC_TRAIN': enable_bulking}): nrepeat = 10 maxdim = 4 - for repeat in range(nrepeat): + for _ in range(nrepeat): for dim in range(1, maxdim): check_bind_with_uniform(lambda x, y: x + y, lambda g, x, y: (g, g), @@ -107,7 +107,7 @@ def test_bind(): def test_dot(): nrepeat = 10 maxdim = 4 - for repeat in range(nrepeat): + for _ in range(nrepeat): s =tuple(np.random.randint(1, 200, size=3)) check_bind_with_uniform(lambda x, y: np.dot(x, y), lambda g, x, y: (np.dot(g, y.T), np.dot(x.T, g)), @@ -115,7 +115,7 @@ def test_dot(): lshape=(s[0], s[1]), rshape=(s[1], s[2]), sf = mx.symbol.dot) - for repeat in range(nrepeat): + for _ in range(nrepeat): s =tuple(np.random.randint(1, 200, size=1)) check_bind_with_uniform(lambda x, y: np.dot(x, y), lambda g, x, y: (g * y, g * x), diff --git a/tests/python/unittest/test_gluon.py b/tests/python/unittest/test_gluon.py index 6d5e40c31ecf..791ecefa6c58 100644 --- a/tests/python/unittest/test_gluon.py +++ b/tests/python/unittest/test_gluon.py @@ -692,7 +692,7 @@ def _syncParameters(bn1, bn2, ctx): # check with unsync version for shape in [(batch_size, 2), (batch_size, 3, 4), (batch_size, 4, 4, 4), (batch_size, 5, 6, 4, 4)]: print(str((ndev, cuda, shape))) - for i in range(10): + for _ in range(10): _check_batchnorm_result(mx.np.random.uniform(size=shape, ctx=mx.cpu(0)), num_devices=ndev, cuda=cuda) @@ -1741,7 +1741,7 @@ def forward(self, in1): for param in params: t = TestIOForward() t.hybridize(**param) - for i in range(5): + for _ in range(5): d1.attach_grad() out_grad = mx.np.random.uniform(size=(10)) res = t(d1) @@ -1751,7 +1751,7 @@ def forward(self, in1): for param in params: t = TestIOBackward() t.hybridize(**param) - for i in range(5): + for _ in range(5): d1.attach_grad() d2.attach_grad() out_grad = mx.np.random.uniform(size=(10)) @@ -1946,13 +1946,13 @@ def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.concat = nn.HybridConcatenate(axis=check_dim) - for i in range(input_num): + for _ in range(input_num): self.concat.add(gluon.nn.Conv2D(chn_num, (kernel, kernel))) def forward(self, x): return self.concat(x) - for s in range(len(shape_list)): + for _ in range(len(shape_list)): shape = (batch_size,) + (3,) + shape_list[i] x = mx.np.random.uniform(-1.0, 1.0, size=shape) for i in range(len(chn_list)): diff --git a/tests/python/unittest/test_gluon_data.py b/tests/python/unittest/test_gluon_data.py index fe0cf7dfe665..aadc372c9160 100644 --- a/tests/python/unittest/test_gluon_data.py +++ b/tests/python/unittest/test_gluon_data.py @@ -218,7 +218,7 @@ def test_image_list_dataset_handle(prepare_record): def test_list_dataset(): for num_worker in range(0, 3): data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker) - for d, l in data: + for _ in data: pass @@ -325,14 +325,14 @@ def _batchify(data): def test_multi_worker_forked_data_loader(): data = _Dummy(False) loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2) - for epoch in range(1): - for i, data in enumerate(loader): + for _ in range(1): + for _ in loader: pass data = _Dummy(True) loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2) - for epoch in range(1): - for i, data in enumerate(loader): + for _ in range(1): + for _ in loader: pass def test_multi_worker_dataloader_release_pool(): @@ -382,12 +382,12 @@ def test_dataset_filter(): a = mx.gluon.data.SimpleDataset([i for i in range(length)]) a_filtered = a.filter(lambda x: x % 10 == 0) assert(len(a_filtered) == 10) - for idx, sample in enumerate(a_filtered): + for sample in a_filtered: assert sample % 10 == 0 a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0) assert(len(a_xform_filtered) == 10) # the filtered data is already transformed - for idx, sample in enumerate(a_xform_filtered): + for sample in a_xform_filtered: assert sample % 10 == 0 def test_dataset_filter_handle(): @@ -395,12 +395,12 @@ def test_dataset_filter_handle(): a = mx.gluon.data.SimpleDataset(np.arange(length)) a_filtered = a.filter(lambda x: x % 10 == 0).__mx_handle__() assert(len(a_filtered) == 10) - for idx, sample in enumerate(a_filtered): + for sample in a_filtered: assert sample % 10 == 0 a_xform_filtered = a.transform(lambda x: x + 1).filter(lambda x: x % 10 == 0) assert(len(a_xform_filtered) == 10) # the filtered data is already transformed - for idx, sample in enumerate(a_xform_filtered): + for sample in a_xform_filtered: assert sample % 10 == 0 def test_dataset_shard(): @@ -417,7 +417,7 @@ def test_dataset_shard(): assert len(shard_3) == 2 total = 0 for shard in [shard_0, shard_1, shard_2, shard_3]: - for idx, sample in enumerate(shard): + for sample in shard: total += sample assert total == sum(a) @@ -435,7 +435,7 @@ def test_dataset_shard_handle(): assert len(shard_3) == 2 total = 0 for shard in [shard_0, shard_1, shard_2, shard_3]: - for idx, sample in enumerate(shard): + for sample in shard: total += sample assert total == sum(a) @@ -451,7 +451,7 @@ def test_dataset_take(): assert len(a_take_10) == count expected_total = sum([i for i in range(count)]) total = 0 - for idx, sample in enumerate(a_take_10): + for sample in a_take_10: assert sample < count total += sample assert total == expected_total @@ -460,7 +460,7 @@ def test_dataset_take(): assert len(a_xform_take_10) == count expected_total = sum([i * 10 for i in range(count)]) total = 0 - for idx, sample in enumerate(a_xform_take_10): + for sample in a_xform_take_10: assert sample < count * 10 total += sample assert total == expected_total @@ -477,7 +477,7 @@ def test_dataset_take_handle(): assert len(a_take_10) == count expected_total = sum([i for i in range(count)]) total = 0 - for idx, sample in enumerate(a_take_10): + for sample in a_take_10: assert sample < count total += sample assert total == expected_total @@ -486,7 +486,7 @@ def test_dataset_take_handle(): assert len(a_xform_take_10) == count expected_total = sum([i for i in range(count)]) total = 0 - for idx, sample in enumerate(a_xform_take_10): + for sample in a_xform_take_10: assert sample < count total += sample assert total == expected_total diff --git a/tests/python/unittest/test_gluon_probability_v2.py b/tests/python/unittest/test_gluon_probability_v2.py index 33f8e27e5cf9..c25fa7115e37 100644 --- a/tests/python/unittest/test_gluon_probability_v2.py +++ b/tests/python/unittest/test_gluon_probability_v2.py @@ -77,7 +77,7 @@ def forward(self, low, high, *args): net = TestUniform("log_prob") if hybridize: net.hybridize() - for i in range(2): + for _ in range(2): mx_out = net(low, high, samples).asnumpy() np_out = ss.uniform(low.asnumpy(), (high - low).asnumpy()).logpdf(samples.asnumpy()) diff --git a/tests/python/unittest/test_gluon_rnn.py b/tests/python/unittest/test_gluon_rnn.py index 5a2661dddb54..6af620969ff8 100644 --- a/tests/python/unittest/test_gluon_rnn.py +++ b/tests/python/unittest/test_gluon_rnn.py @@ -86,7 +86,7 @@ def test_lstmp(): num_layers=num_layers, layout='TNC', bidirectional=False) stack_layer = mx.gluon.rnn.HybridSequentialRNNCell() - for i in range(num_layers): + for _ in range(num_layers): stack_layer.add(gluon.rnn.LSTMPCell(hidden_size, projection_size=projection_size)) fused_layer.initialize() @@ -118,7 +118,7 @@ def test_lstmp(): num_layers=num_layers, layout='TNC', bidirectional=True) stack_layer = mx.gluon.rnn.HybridSequentialRNNCell() - for i in range(num_layers): + for _ in range(num_layers): stack_layer.add( gluon.rnn.BidirectionalCell(gluon.rnn.LSTMPCell(hidden_size, projection_size=projection_size), @@ -738,7 +738,7 @@ def check_rnn_unidir_layer_gradients(mode, input_size, hidden_size, num_layers, fused_layer.initialize() stack_layer = mx.gluon.rnn.HybridSequentialRNNCell() - for n in range(num_layers): + for _ in range(num_layers): stack_layer.add(stack_op(hidden_size)) stack_layer.initialize() check_rnn_consistency(fused_layer, stack_layer, loss, input_size, hidden_size) @@ -751,7 +751,7 @@ def check_rnn_bidir_layer_gradients(mode, input_size, hidden_size, num_layers, l fused_layer.initialize() stack_layer = mx.gluon.rnn.HybridSequentialRNNCell() - for n in range(num_layers): + for _ in range(num_layers): stack_layer.add(gluon.rnn.BidirectionalCell(stack_op(hidden_size), stack_op(hidden_size))) stack_layer.initialize() diff --git a/tests/python/unittest/test_gluon_trainer.py b/tests/python/unittest/test_gluon_trainer.py index e454be848ec1..e25685662805 100644 --- a/tests/python/unittest/test_gluon_trainer.py +++ b/tests/python/unittest/test_gluon_trainer.py @@ -285,13 +285,13 @@ def test_gluon_trainer_param_order(): net = mx.gluon.nn.Sequential() # layers may be added in a random order for all workers layers = {'ones_': 1, 'zeros_': 0} - for name, init in layers.items(): + for _, init in layers.items(): net.add(mx.gluon.nn.Dense(10, in_units=10, weight_initializer=mx.init.Constant(init), use_bias=False)) net.initialize() params = net.collect_params() trainer = gluon.Trainer(params, 'sgd') - for name, init in layers.items(): + for name, _ in layers.items(): expected_idx = 0 if name == 'ones_' else 1 expected_name = '{}.weight'.format(expected_idx) assert trainer._params[expected_idx].name == params[expected_name].name diff --git a/tests/python/unittest/test_image.py b/tests/python/unittest/test_image.py index 775721980050..bbc689b234be 100644 --- a/tests/python/unittest/test_image.py +++ b/tests/python/unittest/test_image.py @@ -63,7 +63,7 @@ def _test_imageiter_last_batch(imageiter_list, assert_data_shape): # test last batch handle(discard) test_iter = imageiter_list[1] i = 0 - for batch in test_iter: + for _ in test_iter: i += 1 assert i == 5 # test last_batch_handle(pad) @@ -294,7 +294,7 @@ def test_augmenters(self): resize=640, rand_crop=True, rand_resize=True, rand_mirror=True, mean=True, std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, pca_noise=0.1, rand_gray=0.2, inter_method=10, path_root=self.IMAGES_DIR, shuffle=True) - for batch in test_iter: + for _ in test_iter: pass def test_image_detiter(self): @@ -346,7 +346,7 @@ def test_det_augmenters(self): pca_noise=0.1, hue=0.1, inter_method=10, min_object_covered=0.5, aspect_ratio_range=(0.2, 5), area_range=(0.1, 4.0), min_eject_coverage=0.5, max_attempts=50) - for batch in det_iter: + for _ in det_iter: pass def test_random_size_crop(self): diff --git a/tests/python/unittest/test_io.py b/tests/python/unittest/test_io.py index 72da325ddb2d..c16c7e6ad2da 100644 --- a/tests/python/unittest/test_io.py +++ b/tests/python/unittest/test_io.py @@ -55,7 +55,7 @@ def test_MNISTIter(tmpdir): # test_loop nbatch = 60000 / batch_size batch_count = 0 - for batch in train_dataiter: + for _ in train_dataiter: batch_count += 1 assert(nbatch == batch_count) # test_reset @@ -104,7 +104,7 @@ def test_inter_methods_in_augmenter(inter_method, cifar10): data_shape=(3, 28, 28), batch_size=100, inter_method=inter_method) - for batch in dataiter: + for _ in dataiter: pass def test_image_iter_exception(cifar10): @@ -121,7 +121,7 @@ def test_image_iter_exception(cifar10): prefetch_buffer=1) labelcount = [0 for i in range(10)] batchcount = 0 - for batch in dataiter: + for _ in dataiter: pass def _init_NDArrayIter_data(data_type, is_image=False): @@ -345,7 +345,7 @@ def check_libSVMIter_synthetic(): first = mx.nd.array([[0.5, 0., 1.2], [0., 0., 0.], [0.6, 2.4, 1.2]]) second = mx.nd.array([[0., 0., -1.2], [0.5, 0., 1.2], [0., 0., 0.]]) i = 0 - for batch in iter(data_train): + for _ in iter(data_train): expected = first.asnumpy() if i == 0 else second.asnumpy() data = data_train.getdata() data.check_format(True) @@ -369,7 +369,7 @@ def check_libSVMIter_news_data(): path = os.path.join(data_dir, news_metadata['name']) data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],), batch_size=batch_size) - for epoch in range(2): + for _ in range(2): num_batches = 0 for batch in data_train: # check the range of labels @@ -401,7 +401,7 @@ def check_libSVMIter_exception(): data_dir = os.path.join(str(tmpdir), 'data') data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path, data_shape=(3, ), label_shape=(3, ), batch_size=3) - for batch in iter(data_train): + for _ in iter(data_train): data_train.get_data().asnumpy() check_libSVMIter_synthetic() @@ -432,16 +432,16 @@ def check_CSVIter_synthetic(dtype='float32'): if dtype is 'int64': entry_str = '2147483648' with open(data_path, 'w') as fout: - for i in range(1000): + for _ in range(1000): fout.write(','.join([entry_str for _ in range(8*8)]) + '\n') with open(label_path, 'w') as fout: - for i in range(1000): + for _ in range(1000): fout.write('0\n') data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8, 8), label_csv=label_path, batch_size=100, dtype=dtype) expected = mx.nd.ones((100, 8, 8), dtype=dtype) * int(entry_str) - for batch in iter(data_train): + for _ in iter(data_train): data_batch = data_train.getdata() assert_almost_equal(data_batch.asnumpy(), expected.asnumpy()) assert data_batch.asnumpy().dtype == expected.asnumpy().dtype diff --git a/tests/python/unittest/test_kvstore.py b/tests/python/unittest/test_kvstore.py index 61ae409babf8..3fcd15b03e7b 100644 --- a/tests/python/unittest/test_kvstore.py +++ b/tests/python/unittest/test_kvstore.py @@ -74,7 +74,7 @@ def check_row_sparse_pull(kv, count): vals = [] row_ids = [] all_row_ids = np.arange(num_rows) - for i in range(count): + for _ in range(count): vals.append(mx.nd.zeros(shape).tostype('row_sparse')) row_id = np.random.randint(num_rows, size=num_rows) row_ids.append(mx.nd.array(row_id).reshape((2, num_rows//2))) @@ -254,7 +254,7 @@ def check_updater(kv, key, key_list, stype): outs = [[mx.nd.empty(shape, d) for d in devs]] * len(key_list) num_push = 4 - for i in range(num_push): + for _ in range(num_push): kv.push(key_list, vals) kv.pull(key_list, out=outs) diff --git a/tests/python/unittest/test_loss.py b/tests/python/unittest/test_loss.py index 103ccc663d03..8c28691c2e19 100644 --- a/tests/python/unittest/test_loss.py +++ b/tests/python/unittest/test_loss.py @@ -126,7 +126,7 @@ def test_sdml_loss(): model.initialize(mx.init.Xavier(), ctx=mx.current_context()) trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate' : 0.1}) - for i in range(EPOCHS): # Training loop + for _ in range(EPOCHS): # Training loop data_iter.reset() for iter_batch in data_iter: batch = [datum.as_in_context(mx.current_context()) for datum in iter_batch.data] diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index b93529065485..a2aae07bd362 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -175,7 +175,7 @@ def test_ndarray_elementwise(): maxdim = 4 all_type = [np.float32, np.float64, np.float16, np.uint8, np.int8, np.int32, np.int64] real_type = [np.float32, np.float64, np.float16] - for repeat in range(nrepeat): + for _ in range(nrepeat): for dim in range(1, maxdim): check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type) check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type) @@ -295,7 +295,7 @@ def test_ndarray_choose(): npy = np.arange(np.prod(shape)).reshape(shape) arr = mx.nd.array(npy) nrepeat = 3 - for repeat in range(nrepeat): + for _ in range(nrepeat): indices = np.random.randint(shape[1], size=shape[0]) assert same(npy[np.arange(shape[0]), indices], mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy()) @@ -307,7 +307,7 @@ def test_ndarray_fill(): arr = mx.nd.array(npy) new_npy = npy.copy() nrepeat = 3 - for repeat in range(nrepeat): + for _ in range(nrepeat): indices = np.random.randint(shape[1], size=shape[0]) val = np.random.randint(shape[1], size=shape[0]) new_npy[:] = npy @@ -321,7 +321,7 @@ def test_ndarray_onehot(): npy = np.arange(np.prod(shape)).reshape(shape) arr = mx.nd.array(npy) nrepeat = 3 - for repeat in range(nrepeat): + for _ in range(nrepeat): indices = np.random.randint(shape[1], size=shape[0]) npy[:] = 0.0 npy[np.arange(shape[0]), indices] = 1.0 @@ -374,10 +374,10 @@ def test_ndarray_pickle(): def test_ndarray_saveload(save_fn): nrepeat = 10 fname = 'tmp_list' - for repeat in range(nrepeat): + for _ in range(nrepeat): data = [] # test save/load as list - for i in range(10): + for _ in range(10): data.append(random_ndarray(np.random.randint(1, 5))) if save_fn is mx.nd.save: save_fn(fname, data) @@ -437,7 +437,7 @@ def test_ndarray_load_fortran_order(tmp_path): def test_ndarray_legacy_load(): data = [] - for i in range(6): + for _ in range(6): data.append(mx.nd.arange(128)) path = os.path.dirname(os.path.realpath(__file__)) legacy_data = mx.nd.load(os.path.join(path, 'legacy_ndarray.v0')) @@ -452,7 +452,7 @@ def test_buffer_load(): for repeat in range(nrepeat): # test load_buffer as list data = [] - for i in range(10): + for _ in range(10): data.append(random_ndarray(np.random.randint(1, 5))) fname = os.path.join(tmpdir, 'list_{0}.param'.format(repeat)) mx.nd.save(fname, data) @@ -616,7 +616,7 @@ def test_reduce_inner(numpy_reduce_func, nd_reduce_func, multi_axes, dtypes = [(np.float16, 1), (np.float32, 4), (np.double, 6)] - for i in range(sample_num): + for _ in range(sample_num): dtype, decimal = random.choice(dtypes) ndim = np.random.randint(1, 6) shape = np.random.randint(1, 11, size=ndim) @@ -857,7 +857,7 @@ def test_errors(): def test_arange(): - for i in range(5): + for _ in range(5): start = np.random.rand() * 10 stop = start + np.random.rand() * 100 step = np.random.rand() * 4 @@ -873,7 +873,7 @@ def test_arange(): def test_linspace(): - for i in range(5): + for _ in range(5): start = np.random.rand() * 100 stop = np.random.rand() * 100 num = np.random.randint(20) @@ -1791,7 +1791,7 @@ def test_ndarray_cpu_shared_ctx(): @pytest.mark.serial def test_dlpack(): - for dtype in [np.float32, np.int32]: + for _ in [np.float32, np.int32]: for shape in [(3, 4, 5, 6), (2, 10), (15,)]: a = mx.nd.random.uniform(shape = shape) a_np = a.copy() diff --git a/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py b/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py index 1722a5f97cc9..feaf17e406e1 100644 --- a/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py +++ b/tests/python/unittest/test_numpy_contrib_gluon_data_vision.py @@ -92,7 +92,7 @@ def test_imageiter(self): rand_crop=1, rand_gray=0.1, rand_mirror=True) ] for it in imageiter_list: - for batch in it: + for _ in it: pass @use_np diff --git a/tests/python/unittest/test_numpy_gluon.py b/tests/python/unittest/test_numpy_gluon.py index 6c1fa670317f..c79b1633fea2 100644 --- a/tests/python/unittest/test_numpy_gluon.py +++ b/tests/python/unittest/test_numpy_gluon.py @@ -41,7 +41,7 @@ def check_block_params(x, TestBlock, hybridize, expected_type, initializer): net.hybridize() net(x) params = net.collect_params() - for k, v in params.items(): + for _, v in params.items(): assert type(v.data()) is expected_type @use_np @@ -101,7 +101,7 @@ def forward(self, pred, label): 'sgd', {'learning_rate': 1e-3, 'momentum': 0.9}) - for t in range(2): + for _ in range(2): with autograd.record(): output = regressor(x) # output is a type of np.ndarray because np.dot is the last op in the network loss = total_loss(output, y) # loss is a scalar np.ndarray diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 1b8fe4d132c9..db643d1090dc 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -421,7 +421,7 @@ def _add_workload_swapaxes(): # assertRaises(np.AxisError, np.swapaxes, -5, 0) for i in range(-4, 4): for j in range(-4, 4): - for k, src in enumerate((a, b)): + for src in (a, b): OpArgMngr.add_workload('swapaxes', src, i, j) @@ -448,7 +448,7 @@ def _add_workload_tile(): shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = np.random.randint(0, 10, size=s) - for r in reps: + for _ in reps: # RuntimeError to be tracked # where s = (3, 4, 3), r = (2, 3, 2) # OpArgMngr.add_workload('tile', b, r) diff --git a/tests/python/unittest/test_numpy_loss.py b/tests/python/unittest/test_numpy_loss.py index d452566f9c26..93fd2f9a3fc2 100644 --- a/tests/python/unittest/test_numpy_loss.py +++ b/tests/python/unittest/test_numpy_loss.py @@ -220,7 +220,7 @@ def test_sdml_loss(): model.initialize(mx.init.Xavier(), ctx=mx.current_context()) trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate' : 0.1}) - for i in range(EPOCHS): # Training loop + for _ in range(EPOCHS): # Training loop data_iter.reset() for iter_batch in data_iter: batch = [datum.as_in_ctx(mx.current_context()).as_np_ndarray() for datum in iter_batch.data] diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py index c6221c5c9eae..559b8a575f5d 100644 --- a/tests/python/unittest/test_numpy_ndarray.py +++ b/tests/python/unittest/test_numpy_ndarray.py @@ -1129,7 +1129,7 @@ def test_np_multinomial(): if pvals_mx_np_array: pvals = mx.np.array(pvals) x = np.random.multinomial(small_exp, pvals) - for i in range(total_exp // small_exp): + for _ in range(total_exp // small_exp): x = x + np.random.multinomial(20, pvals) freq = (x.asnumpy() / _np.float32(total_exp)).reshape((-1, len(pvals))) for i in range(freq.shape[0]): diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 053fa9d64daa..e21e8fdc49b8 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -1437,7 +1437,7 @@ def index_add_bwd(out_grad, a_grad, ind, val_grad, ind_ndim, ind_num, grad_req_a ind = onp.array(ind).astype(onp.int32) # case: val is scalar configs.append(tuple([shape, ind, (), ind_ndim, ind_num])) - for val_ndim in range(1, 5 - ind_ndim): + for _ in range(1, 5 - ind_ndim): val_shape = [1 if onp.random.randint(0, 5)==0 else ind_num] for val_dim in range(ind_ndim, 4): val_shape.append(1 if onp.random.randint(0, 5)==0 else shape[val_dim]) @@ -1600,7 +1600,7 @@ def index_update_bwd(out_grad, a_grad, ind, val_grad, ind_ndim, ind_num, grad_re ind = onp.array(ind).astype(onp.int32) # case: val is scalar configs.append(tuple([shape, ind, (), ind_ndim, ind_num])) - for val_ndim in range(1, 5 - ind_ndim): + for _ in range(1, 5 - ind_ndim): val_shape = [1 if onp.random.randint(0, 5)==0 else ind_num] for val_dim in range(ind_ndim, 4): val_shape.append(1 if onp.random.randint(0, 5)==0 else shape[val_dim]) @@ -4831,15 +4831,16 @@ def shape_formatter(s): return (s,) class TestSampleN(HybridBlock): - def __init__(self, shape, op_name): + def __init__(self, shape, op_name, dtype): super(TestSampleN, self).__init__() self._shape = shape self._op_name = op_name + self._dtype = dtype def forward(self, param1, param2): op = getattr(npx.random, self._op_name, None) assert op is not None - return op(param1, param2, batch_shape=self._shape) + return op(param1, param2, batch_shape=self._shape, dtype=self._dtype) batch_shapes = [(10,), (2, 3), 6, ()] event_shapes = [(), (2,), (2,2)] @@ -4848,7 +4849,7 @@ def forward(self, param1, param2): for bshape, eshape, dtype, op in itertools.product(batch_shapes, event_shapes, dtypes, op_names): for hybridize in [True, False]: - net = TestSampleN(bshape, op) + net = TestSampleN(bshape, op, dtype) if hybridize: net.hybridize() expected_shape = (shape_formatter(bshape) + @@ -5498,7 +5499,7 @@ def test_sample_without_replacement(sampler, num_classes, shape, num_trials, wei bins = onp.zeros((num_classes)) expected_freq = (weight.asnumpy() if weight is not None else onp.array([1 / num_classes] * num_classes)) - for i in range(num_trials): + for _ in range(num_trials): out = sampler(num_classes, 1, replace=False, p=weight).item() bins[out] += 1 bins /= num_trials @@ -8396,7 +8397,7 @@ def dbg(name, data): dtype=dtype)) for optimize in [False, True]: x = [] - for (iop, op) in enumerate(operands): + for iop in range(len(operands)): x.append(np.array(x_np[iop], dtype=dtype)) x[-1].attach_grad() test_einsum = TestEinsum(subscripts, optimize) @@ -8410,10 +8411,10 @@ def dbg(name, data): assert_almost_equal(out_mx.asnumpy(), expected_np, rtol=rtol, atol=atol) out_mx.backward() cur_grad = [] - for (iop, op) in enumerate(x): + for op in x: cur_grad.append(op.grad.asnumpy()) grad.append(cur_grad) - for (iop, op) in enumerate(grad[0]): + for iop in range(len(grad[0])): assert_almost_equal(grad[0][iop], grad[1][iop], rtol=rtol, atol=atol) @@ -8482,7 +8483,7 @@ def forward(self, A, **kwargs): if (type(shape) == int): pw += (2,3) else: - for i in range(len(shape)): + for _ in range(len(shape)): pw += ((2,3),) test_pad = TestPad(pw, m) if hybridize: @@ -8934,7 +8935,7 @@ def forward(self, a): def test_np_diff(): def np_diff_backward(ograd, n, axis): res = ograd - for i in range(n): + for _ in range(n): res = onp.negative(onp.diff(res, n=1, axis=axis, prepend=0, append=0)) return res diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index cbae11e5b452..610b70032853 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -193,7 +193,7 @@ def check_elementwise_sum_with_shape(shape, n): def test_elementwise_sum(): nrepeat = 2 maxdim = 4 - for repeat in range(nrepeat): + for _ in range(nrepeat): for dim in range(1, maxdim): shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim)) check_elementwise_sum_with_shape(shape, np.random.randint(1, 8)) @@ -2518,7 +2518,7 @@ def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, def test_broadcast(): sample_num = 200 - for i in range(sample_num): + for _ in range(sample_num): # Generate random data that has ndim between 1-7 and all the shape dims between 1-5 ndim = np.random.randint(1, 6) target_shape = np.random.randint(1, 6, size=(ndim,)) @@ -2561,7 +2561,7 @@ def test_broadcasting_ele(sym_bcast): def test_transpose(): for ndim in range(1, 10): - for t in range(5): + for _ in range(5): dims = list(np.random.randint(1, 5, size=ndim)) axes = list(range(ndim)) random.shuffle(axes) @@ -2634,12 +2634,12 @@ def test_expand_dims(): def test_crop(): for ndim in range(1, 6): - for t in range(5): + for _ in range(5): dims = [] begin = [] end = [] idx = [] - for i in range(ndim): + for _ in range(ndim): d = random.randint(1, 5) b = random.randint(0, d-1) e = random.randint(b+1, d) @@ -2779,7 +2779,7 @@ def test_broadcast_like_different_types(): def test_flip(): for ndim in range(1, 6): - for t in range(5): + for _ in range(5): dims = [random.randint(1,10) for i in range(ndim)] axis = random.randint(0, ndim-1) idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)] @@ -4179,56 +4179,56 @@ def grad_helper(grad_in, axis, idx): grad_in[:, :, :, :, idx] += 1.0 else: raise ValueError("axis %d is not supported..." % axis) - + for axis in range(-data_ndim, data_ndim): - data_shape = () - for _ in range(data_ndim): - data_shape += (np.random.randint(low=1, high=5), ) - idx_shape = () - for _ in range(idx_ndim): - idx_shape += (np.random.randint(low=1, high=5), ) - - data = mx.sym.Variable('a') - idx = mx.sym.Variable('indices') - idx = mx.sym.BlockGrad(idx) - result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode) - exe = result._simple_bind(default_context(), a=data_shape, - indices=idx_shape) - data_real = np.random.normal(size=data_shape).astype('float32') - if out_of_range: - idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape) - if mode == 'raise': - idx_real[idx_real == 0] = 1 - idx_real *= data_shape[axis] - else: - idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape) - if axis < 0: - axis += len(data_shape) + data_shape = () + for _ in range(data_ndim): + data_shape += (np.random.randint(low=1, high=5), ) + idx_shape = () + for _ in range(idx_ndim): + idx_shape += (np.random.randint(low=1, high=5), ) + + data = mx.sym.Variable('a') + idx = mx.sym.Variable('indices') + idx = mx.sym.BlockGrad(idx) + result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode) + exe = result._simple_bind(default_context(), a=data_shape, + indices=idx_shape) + data_real = np.random.normal(size=data_shape).astype('float32') + if out_of_range: + idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape) + if mode == 'raise': + idx_real[idx_real == 0] = 1 + idx_real *= data_shape[axis] + else: + idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape) + if axis < 0: + axis += len(data_shape) - grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32') - grad_in = np.zeros(data_shape, dtype='float32') + grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32') + grad_in = np.zeros(data_shape, dtype='float32') - exe.arg_dict['a'][:] = mx.nd.array(data_real) - exe.arg_dict['indices'][:] = mx.nd.array(idx_real) - exe.forward(is_train=True) - if out_of_range and mode == 'raise': - try: - mx_out = exe.outputs[0].asnumpy() - except MXNetError as e: - return - else: - # Did not raise exception - assert False, "did not raise %s" % MXNetError.__name__ + exe.arg_dict['a'][:] = mx.nd.array(data_real) + exe.arg_dict['indices'][:] = mx.nd.array(idx_real) + exe.forward(is_train=True) + if out_of_range and mode == 'raise': + try: + mx_out = exe.outputs[0].asnumpy() + except MXNetError as e: + return + else: + # Did not raise exception + assert False, "did not raise %s" % MXNetError.__name__ - assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode)) + assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode)) - for i in np.nditer(idx_real): - if mode == 'clip': - i = np.clip(i, 0, data_shape[axis]) - grad_helper(grad_in, axis, i) + for i in np.nditer(idx_real): + if mode == 'clip': + i = np.clip(i, 0, data_shape[axis]) + grad_helper(grad_in, axis, i) - exe.backward([mx.nd.array(grad_out)]) - assert_almost_equal(exe.grad_dict['a'], grad_in) + exe.backward([mx.nd.array(grad_out)]) + assert_almost_equal(exe.grad_dict['a'], grad_in) def test_grid_generator(): @@ -4443,7 +4443,7 @@ def test_repeat_forward(): repeats = 3 for ndim in range(1, ndim_max+1): shape = () - for i in range(0, ndim): + for _ in range(0, ndim): shape += (np.random.randint(1, size_max+1), ) a = np.random.random_sample(size=shape) aa = np.repeat(a, repeats) @@ -4530,7 +4530,7 @@ def test_normal_case(): rep_max = 10 # max number of tiling in each dim for ndim in range(ndim_min, ndim_max+1): shape = [] - for i in range(1, ndim+1): + for _ in range(1, ndim+1): shape.append(np.random.randint(1, size_max+1)) shape = tuple(shape) a = np.random.randint(0, 100, shape) @@ -4538,7 +4538,7 @@ def test_normal_case(): reps_len = np.random.randint(1, length_max+1) reps_tuple = () - for i in range(1, reps_len): + for _ in range(1, reps_len): reps_tuple += (np.random.randint(1, rep_max), ) reps_array = np.asarray(reps_tuple) @@ -4623,7 +4623,7 @@ def test_normal_case(index_type=np.int32): off_value = 0 for ndim in range(1, ndim_max+1): shape = () - for i in range(1, ndim+1): + for _ in range(1, ndim+1): shape += (np.random.randint(1, dim_size_max+1), ) indices = np.random.randint(-dim_size_max, dim_size_max+1, size=np.prod(shape)).reshape(shape) @@ -8703,7 +8703,7 @@ def check_concat(shape1, shape2, axis): check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) - for active in [True, False]: + for _ in [True, False]: check_concat((0, 3, 4), (5, 3, 4), 0) check_concat((8, 0, 5), (8, 7, 5), 1) check_concat((8, 0, 0), (8, 0, 0), 2) @@ -9523,7 +9523,7 @@ def get_grads(model, grads, ctx=mx.cpu()): def run_model(model, loss, X, Y, num_iters=5): grads = [] - for i in range(num_iters): + for _ in range(num_iters): with autograd.record(): Y_hat = model(X) ll = loss(Y_hat, Y) diff --git a/tests/python/unittest/test_profiler.py b/tests/python/unittest/test_profiler.py index f284c7adf644..d3d3eafad65c 100644 --- a/tests/python/unittest/test_profiler.py +++ b/tests/python/unittest/test_profiler.py @@ -262,12 +262,12 @@ def check_ascending(lst, asc): def check_sorting(debug_str, sort_by, ascending): target_dict = json.loads(debug_str, object_pairs_hook=OrderedDict) lst = [] - for domain_name, domain in target_dict['Time'].items(): + for _, domain in target_dict['Time'].items(): lst = [item[sort_by_options[sort_by]] for item_name, item in domain.items()] check_ascending(lst, ascending) # Memory items do not have stat 'Total' if sort_by != 'total': - for domain_name, domain in target_dict['Memory'].items(): + for _, domain in target_dict['Memory'].items(): lst = [item[sort_by_options[sort_by]] for item_name, item in domain.items()] check_ascending(lst, ascending) diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py index 9cd935dc4707..e03d6d015006 100644 --- a/tests/python/unittest/test_random.py +++ b/tests/python/unittest/test_random.py @@ -917,7 +917,7 @@ def testSmall(data, repeat1, repeat2): # Check that the shuffling is along the first axis. # The order of the elements in each subarray must not change. # This takes long time so `repeat1` need to be small. - for i in range(repeat1): + for _ in range(repeat1): ret = mx.nd.random.shuffle(data) check_first_axis_shuffle(ret) # Count the number of each different outcome. @@ -925,7 +925,7 @@ def testSmall(data, repeat1, repeat2): # the outcomes as long as the order of the elements in each subarray does not change. count = {} stride = int(data.size / data.shape[0]) - for i in range(repeat2): + for _ in range(repeat2): ret = mx.nd.random.shuffle(data) h = str(ret.reshape((ret.size,))[::stride]) c = count.get(h, 0) @@ -953,7 +953,7 @@ def testLarge(data, repeat): # and count the number of different outcomes. stride = int(data.size / data.shape[0]) count = {} - for i in range(repeat): + for _ in range(repeat): ret = mx.nd.random.shuffle(data) check_first_axis_shuffle(ret) h = str(ret.reshape((ret.size,))[::stride]) diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index e427377e62f6..160cb605aad1 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -57,7 +57,7 @@ def check_sparse_nd_elemwise_binary(shapes, stypes, f, g): num_repeats = 3 g = lambda x,y: x + y op = mx.nd.elemwise_add - for i in range(num_repeats): + for _ in range(num_repeats): shape = [rand_shape_2d()] * 2 check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g) check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g) @@ -163,7 +163,7 @@ def check_concat(arrays): nds = [] zero_nds = [] ncols = rnd.randint(2, 10) - for i in range(3): + for _ in range(3): shape = (rnd.randint(2, 10), ncols) A, _ = rand_sparse_ndarray(shape, 'csr') nds.append(A) @@ -548,7 +548,7 @@ def test_sparse_nd_save_load(save_fn): densities = [0, 0.5] fname = 'tmp_list.npz' data_list1 = [] - for i in range(num_data): + for _ in range(num_data): stype = stypes[np.random.randint(0, len(stypes))] shape = rand_shape_2d(dim0=40, dim1=40) density = densities[np.random.randint(0, len(densities))] diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index 731f9081042e..10bfb65c1f1b 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -490,7 +490,7 @@ def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape, with warnings.catch_warnings(): warnings.simplefilter("ignore") - for ii in range(1): + for _ in range(1): # Run defaults check_elemwise_binary_ops('default', 'default', rand_shape_2d(5, 5)) @@ -1733,7 +1733,7 @@ def check_sparse_elementwise_sum_with_shape(stypes, shape, n): test_len = np.random.randint(5, 10) # at least one default type stypes = ['default'] - for i in range(test_len): + for _ in range(test_len): pick_side = np.random.randint(2) pick_type = np.random.randint(3) stypes = ([all_stypes[pick_type]] if pick_side is 0 else []) + stypes + ([all_stypes[pick_type]] if pick_side is 1 else []) diff --git a/tests/python/unittest/test_symbol.py b/tests/python/unittest/test_symbol.py index f93649afbe78..c10a5a00257f 100644 --- a/tests/python/unittest/test_symbol.py +++ b/tests/python/unittest/test_symbol.py @@ -313,7 +313,7 @@ def test_simple_bind_gradient_graph_possible_with_cycle(): def test_children_same_name(): a = mx.sym.Variable('data') b = a + a - for c in b.get_children(): + for _ in b.get_children(): pass def test_transpose_nullop():