Skip to content

Commit

Permalink
remove unused (#18533)
Browse files Browse the repository at this point in the history
  • Loading branch information
jbrockmendel authored and jreback committed Nov 28, 2017
1 parent 6148e58 commit 94f3923
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 332 deletions.
45 changes: 0 additions & 45 deletions pandas/_libs/algos.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -212,51 +212,6 @@ cpdef numeric median(numeric[:] arr):
kth_smallest(arr, n // 2 - 1)) / 2


# -------------- Min, Max subsequence

@cython.boundscheck(False)
@cython.wraparound(False)
def max_subseq(ndarray[double_t] arr):
cdef:
Py_ssize_t i=0, s=0, e=0, T, n
double m, S

n = len(arr)

if len(arr) == 0:
return (-1, -1, None)

m = arr[0]
S = m
T = 0

with nogil:
for i in range(1, n):
# S = max { S + A[i], A[i] )
if (S > 0):
S = S + arr[i]
else:
S = arr[i]
T = i
if S > m:
s = T
e = i
m = S

return (s, e, m)


@cython.boundscheck(False)
@cython.wraparound(False)
def min_subseq(ndarray[double_t] arr):
cdef:
Py_ssize_t s, e
double m

(s, e, m) = max_subseq(-arr)

return (s, e, -m)

# ----------------------------------------------------------------------
# Pairwise correlation/covariance

Expand Down
101 changes: 0 additions & 101 deletions pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -75,57 +75,6 @@ def group_nth_object(ndarray[object, ndim=2] out,
out[i, j] = resx[i, j]


@cython.boundscheck(False)
@cython.wraparound(False)
def group_nth_bin_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] bins, int64_t rank):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, ngroups, b
object val
float64_t count
ndarray[object, ndim=2] resx
ndarray[float64_t, ndim=2] nobs

nobs = np.zeros((<object> out).shape, dtype=np.float64)
resx = np.empty((<object> out).shape, dtype=object)

if len(bins) == 0:
return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
ngroups = len(bins) + 1

N, K = (<object> values).shape

b = 0
for i in range(N):
while b < ngroups - 1 and i >= bins[b]:
b += 1

counts[b] += 1
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[b, j] += 1
if nobs[b, j] == rank:
resx[b, j] = val

for i in range(ngroups):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = nan
else:
out[i, j] = resx[i, j]


@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_object(ndarray[object, ndim=2] out,
Expand Down Expand Up @@ -169,56 +118,6 @@ def group_last_object(ndarray[object, ndim=2] out,
out[i, j] = resx[i, j]


@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_bin_object(ndarray[object, ndim=2] out,
ndarray[int64_t] counts,
ndarray[object, ndim=2] values,
ndarray[int64_t] bins):
"""
Only aggregates on axis=0
"""
cdef:
Py_ssize_t i, j, N, K, ngroups, b
object val
float64_t count
ndarray[object, ndim=2] resx
ndarray[float64_t, ndim=2] nobs

nobs = np.zeros((<object> out).shape, dtype=np.float64)
resx = np.empty((<object> out).shape, dtype=object)

if len(bins) == 0:
return
if bins[len(bins) - 1] == len(values):
ngroups = len(bins)
else:
ngroups = len(bins) + 1

N, K = (<object> values).shape

b = 0
for i in range(N):
while b < ngroups - 1 and i >= bins[b]:
b += 1

counts[b] += 1
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[b, j] += 1
resx[b, j] = val

for i in range(ngroups):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = nan
else:
out[i, j] = resx[i, j]


cdef inline float64_t _median_linear(float64_t* a, int n) nogil:
cdef int i, j, na_count = 0
cdef float64_t result
Expand Down
5 changes: 0 additions & 5 deletions pandas/_libs/hashing.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,6 @@ cdef inline void u32to8_le(uint8_t* p, uint32_t v) nogil:
p[3] = <uint8_t>(v >> 24)


cdef inline void u64to8_le(uint8_t* p, uint64_t v) nogil:
u32to8_le(p, <uint32_t>v)
u32to8_le(p + 4, <uint32_t>(v >> 32))


cdef inline uint64_t u8to64_le(uint8_t* p) nogil:
return (<uint64_t>p[0] |
<uint64_t>p[1] << 8 |
Expand Down
24 changes: 0 additions & 24 deletions pandas/_libs/join.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -240,28 +240,4 @@ def ffill_indexer(ndarray[int64_t] indexer):
return result


def ffill_by_group(ndarray[int64_t] indexer, ndarray[int64_t] group_ids,
int64_t max_group):
cdef:
Py_ssize_t i, n = len(indexer)
ndarray[int64_t] result, last_obs
int64_t gid, val

result = np.empty(n, dtype=np.int64)

last_obs = np.empty(max_group, dtype=np.int64)
last_obs.fill(-1)

for i in range(n):
gid = group_ids[i]
val = indexer[i]
if val == -1:
result[i] = last_obs[gid]
else:
result[i] = val
last_obs[gid] = val

return result


include "join_helper.pxi"
106 changes: 0 additions & 106 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -76,27 +76,6 @@ def values_from_object(object o):
return o


cpdef map_indices_list(list index):
"""
Produce a dict mapping the values of the input array to their respective
locations.
Example:
array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
Better to do this with Cython because of the enormous speed boost.
"""
cdef Py_ssize_t i, length
cdef dict result = {}

length = len(index)

for i from 0 <= i < length:
result[index[i]] = i

return result


@cython.wraparound(False)
@cython.boundscheck(False)
def memory_usage_of_objects(ndarray[object, ndim=1] arr):
Expand Down Expand Up @@ -1094,27 +1073,6 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label,
return out


def group_count(ndarray[int64_t] values, Py_ssize_t size):
cdef:
Py_ssize_t i, n = len(values)
ndarray[int64_t] counts

counts = np.zeros(size, dtype=np.int64)
for i in range(n):
counts[values[i]] += 1
return counts


def lookup_values(ndarray[object] values, dict mapping):
cdef:
Py_ssize_t i, n = len(values)

result = np.empty(n, dtype='O')
for i in range(n):
result[i] = mapping[values[i]]
return maybe_convert_objects(result)


@cython.boundscheck(False)
@cython.wraparound(False)
def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
Expand Down Expand Up @@ -1145,70 +1103,6 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
return counts


cdef class _PandasNull:

def __richcmp__(_PandasNull self, object other, int op):
if op == 2: # ==
return isinstance(other, _PandasNull)
elif op == 3: # !=
return not isinstance(other, _PandasNull)
else:
return False

def __hash__(self):
return 0

pandas_null = _PandasNull()


def fast_zip_fillna(list ndarrays, fill_value=pandas_null):
"""
For zipping multiple ndarrays into an ndarray of tuples
"""
cdef:
Py_ssize_t i, j, k, n
ndarray[object] result
flatiter it
object val, tup

k = len(ndarrays)
n = len(ndarrays[0])

result = np.empty(n, dtype=object)

# initialize tuples on first pass
arr = ndarrays[0]
it = <flatiter> PyArray_IterNew(arr)
for i in range(n):
val = PyArray_GETITEM(arr, PyArray_ITER_DATA(it))
tup = PyTuple_New(k)

if val != val:
val = fill_value

PyTuple_SET_ITEM(tup, 0, val)
Py_INCREF(val)
result[i] = tup
PyArray_ITER_NEXT(it)

for j in range(1, k):
arr = ndarrays[j]
it = <flatiter> PyArray_IterNew(arr)
if len(arr) != n:
raise ValueError('all arrays must be same length')

for i in range(n):
val = PyArray_GETITEM(arr, PyArray_ITER_DATA(it))
if val != val:
val = fill_value

PyTuple_SET_ITEM(result[i], j, val)
Py_INCREF(val)
PyArray_ITER_NEXT(it)

return result


def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups):
cdef:
Py_ssize_t i, group_size, n, start
Expand Down
Loading

0 comments on commit 94f3923

Please sign in to comment.