diff --git a/.travis.yml b/.travis.yml
index 897d31cf23a3b..034e2a32bb75c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -37,7 +37,7 @@ matrix:
- JOB="3.5_OSX" TEST_ARGS="--skip-slow --skip-network"
- dist: trusty
env:
- - JOB="2.7_LOCALE" TEST_ARGS="--only-slow --skip-network" LOCALE_OVERRIDE="zh_CN.UTF-8"
+ - JOB="2.7_LOCALE" LOCALE_OVERRIDE="zh_CN.UTF-8" SLOW=true
addons:
apt:
packages:
@@ -62,7 +62,7 @@ matrix:
# In allow_failures
- dist: trusty
env:
- - JOB="2.7_SLOW" TEST_ARGS="--only-slow --skip-network"
+ - JOB="2.7_SLOW" SLOW=true
# In allow_failures
- dist: trusty
env:
@@ -82,7 +82,7 @@ matrix:
allow_failures:
- dist: trusty
env:
- - JOB="2.7_SLOW" TEST_ARGS="--only-slow --skip-network"
+ - JOB="2.7_SLOW" SLOW=true
- dist: trusty
env:
- JOB="2.7_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true
diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index ad8f0bdd8a597..d26689f2e6b4b 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -47,7 +47,7 @@ which conda
echo
echo "[update conda]"
conda config --set ssl_verify false || exit 1
-conda config --set always_yes true --set changeps1 false || exit 1
+conda config --set quiet true --set always_yes true --set changeps1 false || exit 1
conda update -q conda
echo
diff --git a/ci/requirements-2.7_COMPAT.build b/ci/requirements-2.7_COMPAT.build
index 0e1ccf9eac9bf..d9c932daa110b 100644
--- a/ci/requirements-2.7_COMPAT.build
+++ b/ci/requirements-2.7_COMPAT.build
@@ -1,5 +1,5 @@
python=2.7*
-numpy=1.7.1
+numpy=1.9.2
cython=0.23
dateutil=1.5
pytz=2013b
diff --git a/ci/requirements-2.7_COMPAT.run b/ci/requirements-2.7_COMPAT.run
index b94f4ab7b27d1..39bf720140733 100644
--- a/ci/requirements-2.7_COMPAT.run
+++ b/ci/requirements-2.7_COMPAT.run
@@ -1,11 +1,12 @@
-numpy=1.7.1
+numpy=1.9.2
dateutil=1.5
pytz=2013b
-scipy=0.11.0
+scipy=0.14.0
xlwt=0.7.5
xlrd=0.9.2
-numexpr=2.2.2
-pytables=3.0.0
+bottleneck=1.0.0
+numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr
+pytables=3.2.2
psycopg2
pymysql=0.6.0
sqlalchemy=0.7.8
diff --git a/ci/requirements-2.7_LOCALE.build b/ci/requirements-2.7_LOCALE.build
index 4a37ce8fbe161..96cb184ec2665 100644
--- a/ci/requirements-2.7_LOCALE.build
+++ b/ci/requirements-2.7_LOCALE.build
@@ -1,5 +1,5 @@
python=2.7*
python-dateutil
pytz=2013b
-numpy=1.8.2
+numpy=1.9.2
cython=0.23
diff --git a/ci/requirements-2.7_LOCALE.run b/ci/requirements-2.7_LOCALE.run
index 8e360cf74b081..00006106f7009 100644
--- a/ci/requirements-2.7_LOCALE.run
+++ b/ci/requirements-2.7_LOCALE.run
@@ -1,11 +1,12 @@
python-dateutil
pytz=2013b
-numpy=1.8.2
+numpy=1.9.2
xlwt=0.7.5
openpyxl=1.6.2
xlsxwriter=0.5.2
xlrd=0.9.2
-matplotlib=1.3.1
+bottleneck=1.0.0
+matplotlib=1.4.3
sqlalchemy=0.8.1
lxml=3.2.1
scipy
diff --git a/ci/requirements-2.7_SLOW.build b/ci/requirements-2.7_SLOW.build
index 0f4a2c6792e6b..a665ab9edd585 100644
--- a/ci/requirements-2.7_SLOW.build
+++ b/ci/requirements-2.7_SLOW.build
@@ -1,5 +1,5 @@
python=2.7*
python-dateutil
pytz
-numpy=1.8.2
+numpy=1.10*
cython
diff --git a/ci/requirements-2.7_SLOW.run b/ci/requirements-2.7_SLOW.run
index 0a549554f5219..f7708283ad04a 100644
--- a/ci/requirements-2.7_SLOW.run
+++ b/ci/requirements-2.7_SLOW.run
@@ -1,7 +1,7 @@
python-dateutil
pytz
-numpy=1.8.2
-matplotlib=1.3.1
+numpy=1.10*
+matplotlib=1.4.3
scipy
patsy
xlwt
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index d79fc43fbe175..ee9fbcaad5ef5 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -36,9 +36,15 @@ elif [ "$COVERAGE" ]; then
echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+elif [ "$SLOW" ]; then
+ TEST_ARGS="--only-slow --skip-network"
+ echo pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+ pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+
else
echo pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas # TODO: doctest
+
fi
RET="$?"
diff --git a/ci/script_single.sh b/ci/script_single.sh
index 245b4e6152c4d..375e9879e950f 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -12,16 +12,24 @@ if [ -n "$LOCALE_OVERRIDE" ]; then
python -c "$pycmd"
fi
+if [ "$SLOW" ]; then
+ TEST_ARGS="--only-slow --skip-network"
+fi
+
if [ "$BUILD_TEST" ]; then
echo "We are not running pytest as this is a build test."
+
elif [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
+
elif [ "$COVERAGE" ]; then
echo pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+
else
echo pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas
pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas # TODO: doctest
+
fi
RET="$?"
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 99d299b75b59b..f92c43839ee31 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -203,7 +203,7 @@ Dependencies
------------
* `setuptools `__
-* `NumPy `__: 1.7.1 or higher
+* `NumPy `__: 1.9.0 or higher
* `python-dateutil `__: 1.5 or higher
* `pytz `__: Needed for time zone support
@@ -233,7 +233,7 @@ Optional Dependencies
* `Cython `__: Only necessary to build development
version. Version 0.23 or higher.
-* `SciPy `__: miscellaneous statistical functions
+* `SciPy `__: miscellaneous statistical functions, Version 0.14.0 or higher
* `xarray `__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
* `PyTables `__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
* `Feather Format `__: necessary for feather-based storage, version 0.3.1 or higher.
@@ -244,7 +244,7 @@ Optional Dependencies
* `pymysql `__: for MySQL.
* `SQLite `__: for SQLite, this is included in Python's standard library by default.
-* `matplotlib `__: for plotting
+* `matplotlib `__: for plotting, Version 1.4.3 or higher.
* For Excel I/O:
* `xlrd/xlwt `__: Excel reading (xlrd) and writing (xlwt)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt
index e395264c723f0..7532730f6be27 100644
--- a/doc/source/whatsnew/v0.21.0.txt
+++ b/doc/source/whatsnew/v0.21.0.txt
@@ -138,6 +138,27 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_0210.api_breaking.deps:
+
+Dependencies have increased minimum versions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have updated our minimum supported versions of dependencies (:issue:`15206`, :issue:`15543`, :issue:`15214`)
+). If installed, we now require:
+
+ +--------------+-----------------+----------+
+ | Package | Minimum Version | Required |
+ +======================+=========+==========+
+ | Numpy | 1.9.0 | X |
+ +--------------+-----------------+----------+
+ | Matplotlib | 1.4.3 | |
+ +--------------+-----------------+----------+
+ | Scipy | 0.14.0 | |
+ +--------------+-----------------+----------+
+ | Bottleneck | 1.0.0 | |
+ +--------------+-----------------+----------+
+
.. _whatsnew_0210.api_breaking.pandas_eval:
Improved error handling during item assignment in pd.eval
@@ -259,7 +280,6 @@ Other API Changes
^^^^^^^^^^^^^^^^^
- Support has been dropped for Python 3.4 (:issue:`15251`)
-- Support has been dropped for bottleneck < 1.0.0 (:issue:`15214`)
- The Categorical constructor no longer accepts a scalar for the ``categories`` keyword. (:issue:`16022`)
- Accessing a non-existent attribute on a closed :class:`~pandas.HDFStore` will now
raise an ``AttributeError`` rather than a ``ClosedFileError`` (:issue:`16301`)
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index 0c2e056ead7fa..1cc7f5ace95ea 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -12,8 +12,6 @@ from distutils.version import LooseVersion
# numpy versioning
_np_version = np.version.short_version
-_np_version_under1p8 = LooseVersion(_np_version) < '1.8'
-_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
_np_version_under1p10 = LooseVersion(_np_version) < '1.10'
_np_version_under1p11 = LooseVersion(_np_version) < '1.11'
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 2c5a18973afa8..5112957b49875 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -9,19 +9,18 @@
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
-_np_version_under1p8 = _nlv < '1.8'
-_np_version_under1p9 = _nlv < '1.9'
_np_version_under1p10 = _nlv < '1.10'
_np_version_under1p11 = _nlv < '1.11'
_np_version_under1p12 = _nlv < '1.12'
_np_version_under1p13 = _nlv < '1.13'
_np_version_under1p14 = _nlv < '1.14'
+_np_version_under1p15 = _nlv < '1.15'
-if _nlv < '1.7.0':
+if _nlv < '1.9':
raise ImportError('this version of pandas is incompatible with '
- 'numpy < 1.7.0\n'
+ 'numpy < 1.9.0\n'
'your numpy version is {0}.\n'
- 'Please upgrade numpy to >= 1.7.0 to use '
+ 'Please upgrade numpy to >= 1.9.0 to use '
'this pandas version'.format(_np_version))
@@ -70,11 +69,10 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
__all__ = ['np',
- '_np_version_under1p8',
- '_np_version_under1p9',
'_np_version_under1p10',
'_np_version_under1p11',
'_np_version_under1p12',
'_np_version_under1p13',
- '_np_version_under1p14'
+ '_np_version_under1p14',
+ '_np_version_under1p15'
]
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index f2359f3ff1a9d..ffd03096e2a27 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -6,7 +6,6 @@
from warnings import warn, catch_warnings
import numpy as np
-from pandas import compat, _np_version_under1p8
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndex,
@@ -407,14 +406,12 @@ def isin(comps, values):
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
- # GH11232
- # work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
+
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
- if ((_np_version_under1p8 and compat.PY3) or len(comps) > 1000000 and
- not is_object_dtype(comps)):
+ if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e84e4eac3f34d..f8366c804e3e7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1827,11 +1827,8 @@ def _box_item_values(self, key, values):
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
-
- numpy < 1.8 has an issue with object arrays and aliasing
- GH6026
"""
- self._data.set(item, value, check=pd._np_version_under1p8)
+ self._data.set(item, value, check=False)
@property
def _is_cached(self):
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index a388892e925b6..aa7c4517c0a01 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -13,7 +13,7 @@
)
from pandas import compat
-from pandas.compat.numpy import function as nv, _np_version_under1p8
+from pandas.compat.numpy import function as nv
from pandas.compat import set_function_name
from pandas.core.dtypes.common import (
@@ -3257,11 +3257,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
- if _np_version_under1p8:
- mi, ml = algorithms.factorize(m)
- d[ml] = d[ml] - np.bincount(mi)
- else:
- np.add.at(d, m, -1)
+ np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index b616270e47aa6..83b382ec0ed72 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -69,8 +69,7 @@
import pandas.core.computation.expressions as expressions
from pandas.util._decorators import cache_readonly
from pandas.util._validators import validate_bool_kwarg
-
-from pandas import compat, _np_version_under1p9
+from pandas import compat
from pandas.compat import range, map, zip, u
@@ -857,9 +856,6 @@ def _is_empty_indexer(indexer):
# set
else:
- if _np_version_under1p9:
- # Work around GH 6168 to support old numpy
- indexer = getattr(indexer, 'values', indexer)
values[indexer] = value
# coerce and try to infer the dtypes of the result
@@ -1482,15 +1478,7 @@ def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
tuple of (axis, block)
"""
- if _np_version_under1p9:
- if interpolation != 'linear':
- raise ValueError("Interpolation methods other than linear "
- "are not supported in numpy < 1.9.")
-
- kw = {}
- if not _np_version_under1p9:
- kw.update({'interpolation': interpolation})
-
+ kw = {'interpolation': interpolation}
values = self.get_values()
values, _, _, _ = self._try_coerce_args(values, values)
diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py
index 2482e493dbefd..2f264874378bc 100644
--- a/pandas/tests/frame/test_quantile.py
+++ b/pandas/tests/frame/test_quantile.py
@@ -12,7 +12,6 @@
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
-from pandas import _np_version_under1p9
from pandas.tests.frame.common import TestData
@@ -103,9 +102,6 @@ def test_quantile_axis_parameter(self):
def test_quantile_interpolation(self):
# see gh-10174
- if _np_version_under1p9:
- pytest.skip("Numpy version under 1.9")
-
from numpy import percentile
# interpolation = linear (default case)
@@ -166,44 +162,6 @@ def test_quantile_interpolation(self):
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
- def test_quantile_interpolation_np_lt_1p9(self):
- # see gh-10174
- if not _np_version_under1p9:
- pytest.skip("Numpy version is greater than 1.9")
-
- from numpy import percentile
-
- # interpolation = linear (default case)
- q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
- assert q['A'] == percentile(self.tsframe['A'], 10)
- q = self.intframe.quantile(0.1)
- assert q['A'] == percentile(self.intframe['A'], 10)
-
- # test with and without interpolation keyword
- q1 = self.intframe.quantile(0.1)
- assert q1['A'] == np.percentile(self.intframe['A'], 10)
- assert_series_equal(q, q1)
-
- # interpolation method other than default linear
- msg = "Interpolation methods other than linear"
- df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
- with tm.assert_raises_regex(ValueError, msg):
- df.quantile(.5, axis=1, interpolation='nearest')
-
- with tm.assert_raises_regex(ValueError, msg):
- df.quantile([.5, .75], axis=1, interpolation='lower')
-
- # test degenerate case
- df = DataFrame({'x': [], 'y': []})
- with tm.assert_raises_regex(ValueError, msg):
- q = df.quantile(0.1, axis=0, interpolation='higher')
-
- # multi
- df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
- columns=['a', 'b', 'c'])
- with tm.assert_raises_regex(ValueError, msg):
- df.quantile([.25, .5], interpolation='midpoint')
-
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index acf887d047c9e..58f4d9b770173 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
@@ -26,8 +27,7 @@ class TestRank(TestData):
}
def test_rank(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
+ rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
@@ -120,8 +120,7 @@ def test_rank2(self):
tm.assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
+ rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
@@ -193,10 +192,9 @@ def test_rank_axis(self):
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_rank_methods_frame(self):
- tm.skip_if_no_package('scipy', min_version='0.13',
- app='scipy.stats.rankdata')
+ pytest.importorskip('scipy.stats.special')
+ rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
- from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index f99dcee9e5c8a..47f53f53cfd02 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -9,7 +9,7 @@
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, Series, DataFrame,
- Timestamp, datetime, offsets, _np_version_under1p8)
+ Timestamp, datetime, offsets)
from pandas.util.testing import assert_series_equal, assert_almost_equal
@@ -276,11 +276,7 @@ def test_comparisons_nat(self):
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
- if _np_version_under1p8:
- # cannot test array because np.datetime('nat') returns today's date
- cases = [(fidx1, fidx2), (didx1, didx2)]
- else:
- cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
+ cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index d4dac1cf88fff..efc13a56cd77e 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -8,7 +8,7 @@
from pandas.compat import lrange
from pandas._libs import tslib
from pandas import (PeriodIndex, Series, DatetimeIndex,
- period_range, Period, _np_version_under1p9)
+ period_range, Period)
class TestGetItem(object):
@@ -149,16 +149,12 @@ def test_getitem_seconds(self):
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
- if _np_version_under1p9:
- with pytest.raises(ValueError):
- idx[v]
- else:
- # GH7116
- # these show deprecations as we are trying
- # to slice with non-integer indexers
- # with pytest.raises(IndexError):
- # idx[v]
- continue
+ # GH7116
+ # these show deprecations as we are trying
+ # to slice with non-integer indexers
+ # with pytest.raises(IndexError):
+ # idx[v]
+ continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
@@ -178,16 +174,12 @@ def test_getitem_day(self):
'2013/02/01 09:00']
for v in values:
- if _np_version_under1p9:
- with pytest.raises(ValueError):
- idx[v]
- else:
- # GH7116
- # these show deprecations as we are trying
- # to slice with non-integer indexers
- # with pytest.raises(IndexError):
- # idx[v]
- continue
+ # GH7116
+ # these show deprecations as we are trying
+ # to slice with non-integer indexers
+ # with pytest.raises(IndexError):
+ # idx[v]
+ continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 59e4b1432b8bc..0b3bd0b03bccf 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -7,7 +7,7 @@
import pandas.util.testing as tm
from pandas import (timedelta_range, date_range, Series, Timedelta,
DatetimeIndex, TimedeltaIndex, Index, DataFrame,
- Int64Index, _np_version_under1p8)
+ Int64Index)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_index_equal)
@@ -379,11 +379,7 @@ def test_comparisons_nat(self):
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
- if _np_version_under1p8:
- # cannot test array because np.datetime('nat') returns today's date
- cases = [(tdidx1, tdidx2)]
- else:
- cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
+ cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 3ab443b223f20..dfab539e9474c 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -39,7 +39,8 @@ def _ok_for_gaussian_kde(kind):
from scipy.stats import gaussian_kde # noqa
except ImportError:
return False
- return True
+
+ return plotting._compat._mpl_ge_1_5_0()
class TestPlotBase(object):
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index e9c7d806fd65d..cff0c1c0b424e 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -610,6 +610,8 @@ def test_secondary_y_ts(self):
@pytest.mark.slow
def test_secondary_kde(self):
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 6d813ac76cc4e..67098529a0111 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -473,7 +473,6 @@ def test_subplots_multiple_axes(self):
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
- warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
@@ -1290,6 +1289,9 @@ def test_boxplot_subplots_return_type(self):
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
@@ -1311,6 +1313,9 @@ def test_kde_df(self):
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@@ -1835,6 +1840,8 @@ def test_hist_colors(self):
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
from matplotlib import cm
@@ -1858,6 +1865,8 @@ def test_kde_colors(self):
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
@@ -2160,71 +2169,74 @@ def test_pie_df_nan(self):
@pytest.mark.slow
def test_errorbar_plot(self):
- d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
- df = DataFrame(d)
- d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
- df_err = DataFrame(d_err)
-
- # check line plots
- ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
+ with warnings.catch_warnings():
+ d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
+ df = DataFrame(d)
+ d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
+ df_err = DataFrame(d_err)
- kinds = ['line', 'bar', 'barh']
- for kind in kinds:
- ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
+ # check line plots
+ ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
+ ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
- kind=kind)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'],
- kind=kind)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
- ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
- # _check_plot_works adds an ax so catch warning. see GH #13188
- with tm.assert_produces_warning(UserWarning):
+ ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ kinds = ['line', 'bar', 'barh']
+ for kind in kinds:
+ ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
+ kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+ ax = _check_plot_works(df.plot, yerr=df_err['x'],
+ xerr=df_err['x'],
+ kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+ ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+
+ # _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
- self._check_has_errorbars(axes, xerr=1, yerr=1)
-
- ax = _check_plot_works((df + 1).plot, yerr=df_err,
- xerr=df_err, kind='bar', log=True)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
+ self._check_has_errorbars(axes, xerr=1, yerr=1)
- # yerr is raw error values
- ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works((df + 1).plot, yerr=df_err,
+ xerr=df_err, kind='bar', log=True)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
- # yerr is iterator
- import itertools
- ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
- self._check_has_errorbars(ax, xerr=0, yerr=2)
+ # yerr is raw error values
+ ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+ ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
- # yerr is column name
- for yerr in ['yerr', u('誤差')]:
- s_df = df.copy()
- s_df[yerr] = np.ones(12) * 0.2
- ax = _check_plot_works(s_df.plot, yerr=yerr)
+ # yerr is iterator
+ import itertools
+ ax = _check_plot_works(df.plot,
+ yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- with pytest.raises(ValueError):
- df.plot(yerr=np.random.randn(11))
+ # yerr is column name
+ for yerr in ['yerr', u('誤差')]:
+ s_df = df.copy()
+ s_df[yerr] = np.ones(12) * 0.2
+ ax = _check_plot_works(s_df.plot, yerr=yerr)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
- df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
- with pytest.raises((ValueError, TypeError)):
- df.plot(yerr=df_err)
+ with pytest.raises(ValueError):
+ df.plot(yerr=np.random.randn(11))
+
+ df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
+ with pytest.raises((ValueError, TypeError)):
+ df.plot(yerr=df_err)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
@@ -2262,33 +2274,34 @@ def test_errorbar_with_partial_columns(self):
@pytest.mark.slow
def test_errorbar_timeseries(self):
- d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
- d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
+ with warnings.catch_warnings():
+ d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
+ d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
- # check time-series plots
- ix = date_range('1/1/2000', '1/1/2001', freq='M')
- tdf = DataFrame(d, index=ix)
- tdf_err = DataFrame(d_err, index=ix)
+ # check time-series plots
+ ix = date_range('1/1/2000', '1/1/2001', freq='M')
+ tdf = DataFrame(d, index=ix)
+ tdf_err = DataFrame(d_err, index=ix)
- kinds = ['line', 'bar', 'barh']
- for kind in kinds:
- ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
- kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- # _check_plot_works adds an ax so catch warning. see GH #13188
- with tm.assert_produces_warning(UserWarning):
+ kinds = ['line', 'bar', 'barh']
+ for kind in kinds:
+ ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+ ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
+ kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+ ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+ ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ # _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
- self._check_has_errorbars(axes, xerr=0, yerr=1)
+ self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 684a943fb5a69..c4795ea1e1eca 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -4,7 +4,7 @@
import pytest
-from pandas import Series, DataFrame
+from pandas import DataFrame
from pandas.compat import lmap
import pandas.util.testing as tm
@@ -13,8 +13,7 @@
from numpy.random import randn
import pandas.plotting as plotting
-from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
- _ok_for_gaussian_kde)
+from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
tm._skip_if_no_mpl()
@@ -52,46 +51,6 @@ def test_bootstrap_plot(self):
class TestDataFramePlots(TestPlotBase):
- @pytest.mark.slow
- def test_scatter_plot_legacy(self):
- tm._skip_if_no_scipy()
-
- df = DataFrame(randn(100, 2))
-
- def scat(**kwds):
- return plotting.scatter_matrix(df, **kwds)
-
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat)
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, marker='+')
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, vmin=0)
- if _ok_for_gaussian_kde('kde'):
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, diagonal='kde')
- if _ok_for_gaussian_kde('density'):
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, diagonal='density')
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, diagonal='hist')
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, range_padding=.1)
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, color='rgb')
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, c='rgb')
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat, facecolor='rgb')
-
- def scat2(x, y, by=None, ax=None, figsize=None):
- return plotting._core.scatter_plot(df, x, y, by, ax, figsize=None)
-
- _check_plot_works(scat2, x=0, y=1)
- grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
- with tm.assert_produces_warning(UserWarning):
- _check_plot_works(scat2, x=0, y=1, by=grouper)
-
def test_scatter_matrix_axis(self):
tm._skip_if_no_scipy()
scatter_matrix = plotting.scatter_matrix
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index 9c9011ba1ca7b..8164ad74a190a 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -571,6 +571,9 @@ def test_plot_fails_with_dupe_color_and_style(self):
@pytest.mark.slow
def test_hist_kde(self):
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
@@ -596,6 +599,9 @@ def test_hist_kde(self):
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
@@ -611,6 +617,9 @@ def test_kde_kwargs(self):
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
@@ -638,6 +647,9 @@ def test_hist_kwargs(self):
@pytest.mark.slow
def test_hist_kde_color(self):
+ if not self.mpl_ge_1_5_0:
+ pytest.skip("mpl is not supported")
+
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax)
self._check_ax_scales(ax, yaxis='log')
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 4888f8fe996b6..114a055de8195 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -14,8 +14,7 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, isna, bdate_range,
- NaT, date_range, timedelta_range,
- _np_version_under1p8)
+ NaT, date_range, timedelta_range)
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.nanops as nanops
@@ -687,14 +686,13 @@ def run_ops(ops, get_ser, test_ser):
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
- if not _np_version_under1p8:
- result = td1[0] + dt1
- exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
- assert_series_equal(result, exp)
+ result = td1[0] + dt1
+ exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
+ assert_series_equal(result, exp)
- result = td2[0] + dt2
- exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
- assert_series_equal(result, exp)
+ result = td2[0] + dt2
+ exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
+ assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index 21379641a78d8..cf5e3fe4f29b0 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -1,11 +1,10 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
import numpy as np
import pandas as pd
-from pandas import (Index, Series, _np_version_under1p9)
+from pandas import Index, Series
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
@@ -68,8 +67,6 @@ def test_quantile_multi(self):
[], dtype=float))
tm.assert_series_equal(result, expected)
- @pytest.mark.skipif(_np_version_under1p9,
- reason="Numpy version is under 1.9")
def test_quantile_interpolation(self):
# see gh-10174
@@ -82,8 +79,6 @@ def test_quantile_interpolation(self):
# test with and without interpolation keyword
assert q == q1
- @pytest.mark.skipif(_np_version_under1p9,
- reason="Numpy version is under 1.9")
def test_quantile_interpolation_dtype(self):
# GH #10174
@@ -96,26 +91,6 @@ def test_quantile_interpolation_dtype(self):
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
- @pytest.mark.skipif(not _np_version_under1p9,
- reason="Numpy version is greater 1.9")
- def test_quantile_interpolation_np_lt_1p9(self):
- # GH #10174
-
- # interpolation = linear (default case)
- q = self.ts.quantile(0.1, interpolation='linear')
- assert q == np.percentile(self.ts.valid(), 10)
- q1 = self.ts.quantile(0.1)
- assert q1 == np.percentile(self.ts.valid(), 10)
-
- # interpolation other than linear
- msg = "Interpolation methods other than "
- with tm.assert_raises_regex(ValueError, msg):
- self.ts.quantile(0.9, interpolation='nearest')
-
- # object dtype
- with tm.assert_raises_regex(ValueError, msg):
- Series(self.ts, dtype=object).quantile(0.7, interpolation='higher')
-
def test_quantile_nan(self):
# GH 13098
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index ff489eb7f15b1..128a4cdd845e6 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -28,8 +28,8 @@ class TestSeriesRank(TestData):
}
def test_rank(self):
- tm._skip_if_no_scipy()
- from scipy.stats import rankdata
+ pytest.importorskip('scipy.stats.special')
+ rankdata = pytest.importorskip('scipy.stats.rankdata')
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
@@ -246,10 +246,9 @@ def _check(s, expected, method='average'):
_check(series, results[method], method=method)
def test_rank_methods_series(self):
- tm.skip_if_no_package('scipy', min_version='0.13',
- app='scipy.stats.rankdata')
+ pytest.importorskip('scipy.stats.special')
+ rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
- from scipy.stats import rankdata
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py
index 4ce03f72dbba6..b0a9182a265fe 100644
--- a/pandas/tests/sparse/test_array.py
+++ b/pandas/tests/sparse/test_array.py
@@ -8,7 +8,6 @@
from numpy import nan
import numpy as np
-from pandas import _np_version_under1p8
from pandas.core.sparse.api import SparseArray, SparseSeries
from pandas._libs.sparse import IntIndex
from pandas.util.testing import assert_almost_equal
@@ -150,10 +149,8 @@ def test_take(self):
assert np.isnan(self.arr.take(0))
assert np.isscalar(self.arr.take(2))
- # np.take in < 1.8 doesn't support scalar indexing
- if not _np_version_under1p8:
- assert self.arr.take(2) == np.take(self.arr_data, 2)
- assert self.arr.take(6) == np.take(self.arr_data, 6)
+ assert self.arr.take(2) == np.take(self.arr_data, 2)
+ assert self.arr.take(6) == np.take(self.arr_data, 6)
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 2a22fc9d32919..9305504f8d5e3 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -8,7 +8,7 @@
import numpy as np
import pandas as pd
-from pandas import Series, isna, _np_version_under1p9
+from pandas import Series, isna
from pandas.core.dtypes.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
@@ -340,15 +340,13 @@ def test_nanmean_overflow(self):
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
- # numpy < 1.9.0 is not computing this correctly
- if not _np_version_under1p9:
- for a in [2 ** 55, -2 ** 55, 20150515061816532]:
- s = Series(a, index=range(500), dtype=np.int64)
- result = s.mean()
- np_result = s.values.mean()
- assert result == a
- assert result == np_result
- assert result.dtype == np.float64
+ for a in [2 ** 55, -2 ** 55, 20150515061816532]:
+ s = Series(a, index=range(500), dtype=np.int64)
+ result = s.mean()
+ np_result = s.values.mean()
+ assert result == a
+ assert result == np_result
+ assert result.dtype == np.float64
def test_returned_dtype(self):
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index d938d5bf9f3ab..d42e37048d87f 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -1688,7 +1688,7 @@ def test_resample_dtype_preservation(self):
def test_resample_dtype_coerceion(self):
- pytest.importorskip('scipy')
+ pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index 664a97640387e..1d13ba93ba759 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -3,7 +3,7 @@
import numpy as np
import pandas as pd
-from pandas import to_numeric, _np_version_under1p9
+from pandas import to_numeric
from pandas.util import testing as tm
from numpy import iinfo
@@ -355,9 +355,6 @@ def test_downcast(self):
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
- # Check to make sure numpy is new enough to run this test.
- if _np_version_under1p9:
- pytest.skip("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
diff --git a/setup.py b/setup.py
index a912b25328954..04a5684c20fcd 100755
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ def is_platform_mac():
_have_setuptools = False
setuptools_kwargs = {}
-min_numpy_ver = '1.7.0'
+min_numpy_ver = '1.9.0'
if sys.version_info[0] >= 3:
setuptools_kwargs = {