diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 28a86b8db8a3c..f88e2a1321875 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -1066,13 +1066,11 @@ static PyObject* tensor__getitem_from_offset(TensorObject* self, T b = paddle::pybind::TensorGetElement(tensor, offset); \ Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; \ Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; \ - py_dims[0] = 1; \ - py_strides[0] = 1; \ auto& api = pybind11::detail::npy_api::get(); \ PyObject* array = api.PyArray_NewFromDescr_( \ api.PyArray_Type_, \ api.PyArray_DescrFromType_(numpy_dtype), \ - 1, \ + 0, \ py_dims, \ py_strides, \ nullptr, \ diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 77abddce03d82..908fedf7c4b9c 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -46,10 +46,6 @@ } -def copy_bits_from_float_to_uint16(f): - return struct.unpack('> 16 - - def convert_float_to_uint16(data, data_format="NCHW"): if data.size == 0: return data.view(np.uint16) @@ -57,16 +53,25 @@ def convert_float_to_uint16(data, data_format="NCHW"): if data_format == "NHWC": data = np.transpose(data, [0, 3, 1, 2]) - new_data = [] - for x in np.nditer(data): - new_data.append(np.uint16(copy_bits_from_float_to_uint16(x))) - new_data = np.reshape(new_data, data.shape).view(np.uint16) + new_data = np.vectorize( + lambda x: struct.unpack('> 16, + otypes=[np.uint16], + )(data.flat) + new_data = np.reshape(new_data, data.shape) if data_format == "NHWC": - new_data = np.transpose(new_output, [0, 2, 3, 1]) + new_data = np.transpose(new_data, [0, 2, 3, 1]) return new_data +def convert_uint16_to_float(data): + new_data = np.vectorize( + lambda x: struct.unpack('