Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix flaky - test_operator_gpu.test_np_insert (#17620)
Browse files Browse the repository at this point in the history
* fix flaky

* retrigger CI
  • Loading branch information
JiangZhaoh authored Feb 20, 2020
1 parent 23ff424 commit ab48a43
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion src/operator/numpy/np_insert_op_slice-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ void NumpyInsertSliceCompute(const nnvm::NodeAttrs& attrs,
Tensor<xpu, 1, int64_t> indices(indices_ptr, Shape1(indices_len), s);
Tensor<xpu, 1, int64_t> sorted_indices(sorted_indices_ptr, Shape1(indices_len), s);
Tensor<xpu, 1, int> order(order_ptr, Shape1(indices_len), s);
int num_bits = common::ilog2ui(static_cast<unsigned int>(indices_len) - 1);
int num_bits = 8 * sizeof(int64_t);
Kernel<SliceToIndices, xpu>::Launch(s, indices_len, indices_ptr, start, step);
Kernel<range_fwd, xpu>::Launch(s, indices_len, 1, 0, 1, kWriteTo, order_ptr);
mxnet::op::SortByKey(indices, order, true, &temp_storage, 0, num_bits, &sorted_indices);
Expand Down
2 changes: 1 addition & 1 deletion src/operator/numpy/np_insert_op_tensor-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void NumpyInsertTensorCompute(const nnvm::NodeAttrs& attrs,
Tensor<xpu, 1, int64_t> indices(indices_ptr, Shape1(indices_len), s);
Tensor<xpu, 1, int64_t> sorted_indices(sorted_indices_ptr, Shape1(indices_len), s);
Tensor<xpu, 1, int> order(order_ptr, Shape1(indices_len), s);
int num_bits = common::ilog2ui(static_cast<unsigned int>(indices_len) - 1);
int num_bits = 8 * sizeof(int64_t);
Kernel<ObjToIndices, xpu>::Launch(s, indices_len, indices_ptr, N,
inputs[obj_pos].dptr<int64_t>());
Kernel<range_fwd, xpu>::Launch(s, indices_len, 1, 0, 1, kWriteTo, order_ptr);
Expand Down
8 changes: 4 additions & 4 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2661,7 +2661,7 @@ def GetNdim(tp):
E = (1)
F = (3, 1)
G = (3, 2)
H = (5, 2, 3, 4)
H = (2, 2, 3, 8)
config = []
# test scale index
for idx in range(-1 * GetSize(A), GetSize(A) + 1):
Expand Down Expand Up @@ -2705,7 +2705,7 @@ def GetNdim(tp):
config.append(tuple([H, 0, 1, 3]))
config.append(tuple([H, [1], E, 2]))
config.append(tuple([H, [1], 1, 2]))
idx = _np.random.randint(-1 * H[3], H[3] + 1, size = (3)).tolist()
idx = _np.random.randint(-1 * H[3], H[3] + 1, size = (5)).tolist()
config.append(tuple([H, idx, E, 3]))
config.append(tuple([H, idx, 1, 3]))
# test slice
Expand All @@ -2728,9 +2728,9 @@ def GetNdim(tp):
obj_onp = obj
test_insert = TestInsert(obj=obj_mxnp, axis=axis)

a = mx.nd.random.uniform(-1.0, 1.0, shape=arr_shape).as_np_ndarray().astype(atype)
a = mx.nd.random.uniform(-10.0, 10.0, shape=arr_shape).as_np_ndarray().astype(atype)
a.attach_grad()
b = mx.nd.random.uniform(-1.0, 1.0, shape=val_shape).as_np_ndarray().astype(btype)
b = mx.nd.random.uniform(-10.0, 10.0, shape=val_shape).as_np_ndarray().astype(btype)
b.attach_grad()
expected_ret = _np.insert(a.asnumpy(), obj_onp, b.asnumpy(), axis=axis)
with mx.autograd.record():
Expand Down

0 comments on commit ab48a43

Please sign in to comment.