diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index b7a1471ae5a9e..9c906a00bd4fe 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -104,10 +104,7 @@ cdef class IndexEngine: loc = self.get_loc(key) value = convert_scalar(arr, value) - if PySlice_Check(loc) or util.is_array(loc): - arr[loc] = value - else: - util.set_value_at(arr, loc, value) + arr[loc] = value cpdef get_loc(self, object val): if is_definitely_invalid_key(val): diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 654e7eaf92ff0..a6078da28a3ba 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -492,9 +492,7 @@ def astype_intsafe(ndarray[object] arr, new_dtype): if is_datelike and checknull(v): result[i] = NPY_NAT else: - # we can use the unsafe version because we know `result` is mutable - # since it was created from `np.empty` - util.set_value_at_unsafe(result, i, v) + result[i] = v return result @@ -505,9 +503,7 @@ cpdef ndarray[object] astype_unicode(ndarray arr): ndarray[object] result = np.empty(n, dtype=object) for i in range(n): - # we can use the unsafe version because we know `result` is mutable - # since it was created from `np.empty` - util.set_value_at_unsafe(result, i, unicode(arr[i])) + result[i] = unicode(arr[i]) return result @@ -518,9 +514,7 @@ cpdef ndarray[object] astype_str(ndarray arr): ndarray[object] result = np.empty(n, dtype=object) for i in range(n): - # we can use the unsafe version because we know `result` is mutable - # since it was created from `np.empty` - util.set_value_at_unsafe(result, i, str(arr[i])) + result[i] = str(arr[i]) return result diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index fe993ecc0cdd7..4a5e859b8f50b 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -282,8 +282,7 @@ cdef class SeriesBinGrouper: result = _get_result_array(res, self.ngroups, len(self.dummy_arr)) - - util.assign_value_1d(result, i, res) + result[i] = res islider.advance(group_size) vslider.advance(group_size) @@ -408,7 +407,7 @@ cdef class SeriesGrouper: self.ngroups, len(self.dummy_arr)) - util.assign_value_1d(result, lab, res) + result[lab] = res counts[lab] = group_size islider.advance(group_size) vslider.advance(group_size) diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h deleted file mode 100644 index d9d0fb74da73c..0000000000000 --- a/pandas/_libs/src/numpy_helper.h +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright (c) 2016, PyData Development Team -All rights reserved. - -Distributed under the terms of the BSD Simplified License. - -The full license is in the LICENSE file, distributed with this software. -*/ - -#ifndef PANDAS__LIBS_SRC_NUMPY_HELPER_H_ -#define PANDAS__LIBS_SRC_NUMPY_HELPER_H_ - -#include "Python.h" -#include "inline_helper.h" -#include "numpy/arrayobject.h" -#include "numpy/arrayscalars.h" - - -PANDAS_INLINE int assign_value_1d(PyArrayObject* ap, Py_ssize_t _i, - PyObject* v) { - npy_intp i = (npy_intp)_i; - char* item = (char*)PyArray_DATA(ap) + i * PyArray_STRIDE(ap, 0); - return PyArray_DESCR(ap)->f->setitem(v, item, ap); -} - -PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { - char* item = (char*)PyArray_DATA(ap) + i * PyArray_STRIDE(ap, 0); - return PyArray_Scalar(item, PyArray_DESCR(ap), (PyObject*)ap); -} - -#endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_ diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd index e78f132ada2ca..e05795d74c503 100644 --- a/pandas/_libs/util.pxd +++ b/pandas/_libs/util.pxd @@ -30,11 +30,6 @@ cdef extern from *: const char *get_c_string(object) except NULL -cdef extern from "src/numpy_helper.h": - int assign_value_1d(ndarray, Py_ssize_t, object) except -1 - object get_value_1d(ndarray, Py_ssize_t) - - cdef extern from "src/headers/stdint.h": enum: UINT8_MAX enum: UINT16_MAX @@ -116,26 +111,4 @@ cdef inline object get_value_at(ndarray arr, object loc): Py_ssize_t i i = validate_indexer(arr, loc) - return get_value_1d(arr, i) - - -cdef inline set_value_at_unsafe(ndarray arr, object loc, object value): - """Sets a value into the array without checking the writeable flag. - - This should be used when setting values in a loop, check the writeable - flag above the loop and then eschew the check on each iteration. - """ - cdef: - Py_ssize_t i - - i = validate_indexer(arr, loc) - assign_value_1d(arr, i, value) - - -cdef inline set_value_at(ndarray arr, object loc, object value): - """Sets a value into the array after checking that the array is mutable. - """ - if not cnp.PyArray_ISWRITEABLE(arr): - raise ValueError('assignment destination is read-only') - - set_value_at_unsafe(arr, loc, value) + return arr[i] diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 6f0ffbff22028..eb07e5ef6c85f 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -446,7 +446,10 @@ def _get_val_at(self, loc): if sp_loc == -1: return self.fill_value else: - return libindex.get_value_at(self, sp_loc) + # libindex.get_value_at will end up calling __getitem__, + # so to avoid recursing we need to unwrap `self` so the + # ndarray.__getitem__ implementation is called. + return libindex.get_value_at(np.asarray(self), sp_loc) @Appender(_index_shared_docs['take'] % _sparray_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, diff --git a/setup.py b/setup.py index 964167737c9c6..19438d950e8a7 100755 --- a/setup.py +++ b/setup.py @@ -491,8 +491,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): ts_include = ['pandas/_libs/tslibs/src'] -lib_depends = ['pandas/_libs/src/numpy_helper.h', - 'pandas/_libs/src/parse_helper.h', +lib_depends = ['pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] np_datetime_headers = [