diff --git a/pandas/computation/align.py b/pandas/computation/align.py index 9834dd1a9e7fc..b5f730378c3cf 100644 --- a/pandas/computation/align.py +++ b/pandas/computation/align.py @@ -101,7 +101,8 @@ def _align_core(terms): 'than an order of magnitude on term {1!r}, ' 'by more than {2:.4g}; performance may ' 'suffer'.format(axis, terms[i].name, ordm), - category=pd.io.common.PerformanceWarning) + category=pd.io.common.PerformanceWarning, + stacklevel=6) if transpose: f = partial(ti.reindex, index=reindexer, copy=False) diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py index 4290be3e1abba..bc4e60f70f2b4 100644 --- a/pandas/computation/pytables.py +++ b/pandas/computation/pytables.py @@ -535,7 +535,7 @@ def parse_back_compat(self, w, op=None, value=None): w, op, value = w warnings.warn("passing a tuple into Expr is deprecated, " "pass the where as a single string", - DeprecationWarning) + DeprecationWarning, stacklevel=10) if op is not None: if not isinstance(w, string_types): diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 36d31d493b10d..8f1dab4f8b511 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -125,8 +125,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex """ if order is not None: - warn("order is deprecated." - "See https://github.com/pydata/pandas/issues/6926", FutureWarning) + msg = "order is deprecated. See https://github.com/pydata/pandas/issues/6926" + warn(msg, FutureWarning, stacklevel=2) from pandas.core.index import Index from pandas.core.series import Series diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 4a6a26f21b5bf..2f465ded12bd6 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -414,7 +414,7 @@ def _get_labels(self): Deprecated, use .codes! """ - warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=3) + warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=2) return self.codes labels = property(fget=_get_labels, fset=_set_codes) @@ -456,7 +456,7 @@ def _validate_categories(cls, categories, fastpath=False): # NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748 msg = ('\nSetting NaNs in `categories` is deprecated and ' 'will be removed in a future version of pandas.') - warn(msg, FutureWarning, stacklevel=5) + warn(msg, FutureWarning, stacklevel=3) # categories must be unique @@ -491,12 +491,12 @@ def _get_categories(self): def _set_levels(self, levels): """ set new levels (deprecated, use "categories") """ - warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3) + warn("Assigning to 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2) self.categories = levels def _get_levels(self): """ Gets the levels (deprecated, use "categories") """ - warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=3) + warn("Accessing 'levels' is deprecated, use 'categories'", FutureWarning, stacklevel=2) return self.categories # TODO: Remove after deprecation period in 2017/ after 0.18 @@ -507,7 +507,7 @@ def _get_levels(self): def _set_ordered(self, value): """ Sets the ordered attribute to the boolean value """ warn("Setting 'ordered' directly is deprecated, use 'set_ordered'", FutureWarning, - stacklevel=3) + stacklevel=2) self.set_ordered(value, inplace=True) def set_ordered(self, value, inplace=False): @@ -1200,7 +1200,7 @@ def order(self, inplace=False, ascending=True, na_position='last'): Category.sort """ warn("order is deprecated, use sort_values(...)", - FutureWarning, stacklevel=3) + FutureWarning, stacklevel=2) return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position) def sort(self, inplace=True, ascending=True, na_position='last'): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1cf2de69b2a66..c5c0f9e82fa94 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1468,7 +1468,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", - FutureWarning) + FutureWarning, stacklevel=2) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, @@ -1517,7 +1517,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", - FutureWarning) + FutureWarning, stacklevel=2) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, @@ -2919,7 +2919,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, return result @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) - @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') + @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3) def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only @@ -2953,7 +2953,7 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False): return self[-duplicated] @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) - @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') + @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset', stacklevel=3) def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9c170286006f2..d3a63f9f5d851 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -706,7 +706,7 @@ def iterkv(self, *args, **kwargs): "iteritems alias used to get around 2to3. Deprecated" warnings.warn("iterkv is deprecated and will be removed in a future " "release, use ``iteritems`` instead.", - FutureWarning) + FutureWarning, stacklevel=2) return self.iteritems(*args, **kwargs) def __len__(self): @@ -3376,11 +3376,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None, For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0 - + Examples -------- - + Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') @@ -3409,11 +3409,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None, Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, - which it labels. For example, in the original series the + which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed - value in the resampled bucket with the label``2000-01-01 00:03:00`` + value in the resampled bucket with the label``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). - To include this value close the right side of the bin interval as + To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', how='sum', label='right') @@ -3424,7 +3424,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, Downsample the series into 3 minute bins as above, but close the right side of the bin interval. - + >>> series.resample('3T', how='sum', label='right', closed='right') 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 @@ -3453,7 +3453,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 - Upsample the series into 30 second bins and fill the + Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S', fill_method='bfill')[0:5] @@ -3468,7 +3468,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, >>> def custom_resampler(array_like): ... return np.sum(array_like)+5 - + >>> series.resample('3T', how=custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 diff --git a/pandas/core/index.py b/pandas/core/index.py index 4d0b395a401ac..14ba2dea0b76c 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -842,14 +842,14 @@ def to_int(): elif is_float(key): key = to_int() warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( - type(self).__name__),FutureWarning, stacklevel=8) + type(self).__name__), FutureWarning, stacklevel=5) return key return self._invalid_indexer('label', key) if is_float(key): if not self.is_floating(): warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( - type(self).__name__),FutureWarning, stacklevel=8) + type(self).__name__), FutureWarning, stacklevel=3) return to_int() return key @@ -887,7 +887,7 @@ def f(c): # warn if it's a convertible float if v == int(v): warnings.warn("slice indexers when using iloc should be integers " - "and not floating point",FutureWarning) + "and not floating point", FutureWarning, stacklevel=7) return int(v) self._invalid_indexer('slice {0} value'.format(c), v) @@ -1415,7 +1415,7 @@ def argsort(self, *args, **kwargs): def __add__(self, other): if com.is_list_like(other): warnings.warn("using '+' to provide set union with Indexes is deprecated, " - "use '|' or .union()", FutureWarning) + "use '|' or .union()", FutureWarning, stacklevel=2) if isinstance(other, Index): return self.union(other) return Index(np.array(self) + other) @@ -1423,14 +1423,14 @@ def __add__(self, other): def __radd__(self, other): if com.is_list_like(other): warnings.warn("using '+' to provide set union with Indexes is deprecated, " - "use '|' or .union()", FutureWarning) + "use '|' or .union()", FutureWarning, stacklevel=2) return Index(other + np.array(self)) __iadd__ = __add__ def __sub__(self, other): warnings.warn("using '-' to provide set differences with Indexes is deprecated, " - "use .difference()",FutureWarning) + "use .difference()",FutureWarning, stacklevel=2) return self.difference(other) def __and__(self, other): diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 7837fb60da9d6..dddc1f4898908 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -204,7 +204,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): if regex.groups > 0: warnings.warn("This pattern has match groups. To actually get the" - " groups, use str.extract.", UserWarning) + " groups, use str.extract.", UserWarning, stacklevel=3) f = lambda x: bool(regex.search(x)) else: @@ -377,11 +377,12 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): # Do this first, to make sure it happens even if the re.compile # raises below. warnings.warn("In future versions of pandas, match will change to" - " always return a bool indexer.", UserWarning) + " always return a bool indexer.", FutureWarning, + stacklevel=3) if as_indexer and regex.groups > 0: warnings.warn("This pattern has match groups. To actually get the" - " groups, use str.extract.", UserWarning) + " groups, use str.extract.", UserWarning, stacklevel=3) # If not as_indexer and regex.groups == 0, this returns empty lists # and is basically useless, so we will not warn. diff --git a/pandas/io/data.py b/pandas/io/data.py index 829ff4f28ca1b..1a4c45628a256 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -608,7 +608,7 @@ def __init__(self, symbol, data_source=None): self.symbol = symbol.upper() if data_source is None: warnings.warn("Options(symbol) is deprecated, use Options(symbol," - " data_source) instead", FutureWarning) + " data_source) instead", FutureWarning, stacklevel=2) data_source = "yahoo" if data_source != "yahoo": raise NotImplementedError("currently only yahoo supported") @@ -1072,7 +1072,8 @@ def get_forward_data(self, months, call=True, put=False, near=False, Note: Format of returned data frame is dependent on Yahoo and may change. """ - warnings.warn("get_forward_data() is deprecated", FutureWarning) + warnings.warn("get_forward_data() is deprecated", FutureWarning, + stacklevel=2) end_date = dt.date.today() + MonthEnd(months) dates = (date for date in self.expiry_dates if date <= end_date.date()) data = self._get_data_in_date_range(dates, call=call, put=put) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 6801e8935e079..f0c994ba17e27 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -647,7 +647,7 @@ def _clean_options(self, options, engine): warnings.warn(("Falling back to the 'python' engine because" " {0}; you can avoid this warning by specifying" " engine='python'.").format(fallback_reason), - ParserWarning) + ParserWarning, stacklevel=5) index_col = options['index_col'] names = options['names'] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ea0a59ce2ab31..b5a3577b36d4c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1596,7 +1596,7 @@ def update_info(self, info): # frequency/name just warn if key in ['freq', 'index_name']: ws = attribute_conflict_doc % (key, existing_value, value) - warnings.warn(ws, AttributeConflictWarning) + warnings.warn(ws, AttributeConflictWarning, stacklevel=6) # reset idx[key] = None @@ -2581,7 +2581,7 @@ def write_array(self, key, value, items=None): except: pass ws = performance_doc % (inferred_type, key, items) - warnings.warn(ws, PerformanceWarning) + warnings.warn(ws, PerformanceWarning, stacklevel=7) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) @@ -3716,7 +3716,7 @@ def read(self, where=None, columns=None, **kwargs): objs.append(obj) else: - warnings.warn(duplicate_doc, DuplicateWarning) + warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5) # reconstruct long_index = MultiIndex.from_arrays( diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b587ec128c016..c0b69e435f494 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -200,7 +200,7 @@ def tquery(sql, con=None, cur=None, retry=True): warnings.warn( "tquery is deprecated, and will be removed in future versions. " "You can use ``execute(...).fetchall()`` instead.", - FutureWarning) + FutureWarning, stacklevel=2) cur = execute(sql, con, cur=cur) result = _safe_fetch(cur) @@ -255,7 +255,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): warnings.warn( "uquery is deprecated, and will be removed in future versions. " "You can use ``execute(...).rowcount`` instead.", - FutureWarning) + FutureWarning, stacklevel=2) cur = execute(sql, con, cur=cur, params=params) @@ -328,7 +328,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None, read_sql """ - + con = _engine_builder(con) if not _is_sqlalchemy_connectable(con): raise NotImplementedError("read_sql_table only supported for " @@ -364,7 +364,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, ---------- sql : string SQL query to be executed - con : SQLAlchemy connectable(engine/connection) or database string URI + con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. @@ -618,7 +618,7 @@ def pandasSQL_builder(con, flavor=None, schema=None, meta=None, return SQLDatabase(con, schema=schema, meta=meta) else: if flavor == 'mysql': - warnings.warn(_MYSQL_WARNING, FutureWarning) + warnings.warn(_MYSQL_WARNING, FutureWarning, stacklevel=3) return SQLiteDatabase(con, flavor, is_cursor=is_cursor) @@ -957,7 +957,7 @@ def _sqlalchemy_type(self, col): if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " - "database.", UserWarning) + "database.", UserWarning, stacklevel=8) return BigInteger elif col_type == 'floating': if col.dtype == 'float32': @@ -1409,7 +1409,7 @@ def _create_table_setup(self): pat = re.compile('\s+') column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): - warnings.warn(_SAFE_NAMES_WARNING) + warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) flv = self.pd_sql.flavor escape = _SQL_GET_IDENTIFIER[flv] @@ -1450,7 +1450,7 @@ def _sql_type_name(self, col): if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " - "database.", UserWarning) + "database.", UserWarning, stacklevel=8) col_type = "integer" elif col_type == "datetime64": @@ -1672,7 +1672,8 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None, dtype=None): def read_frame(*args, **kwargs): """DEPRECATED - use read_sql """ - warnings.warn("read_frame is deprecated, use read_sql", FutureWarning) + warnings.warn("read_frame is deprecated, use read_sql", FutureWarning, + stacklevel=2) return read_sql(*args, **kwargs) @@ -1680,7 +1681,8 @@ def read_frame(*args, **kwargs): def frame_query(*args, **kwargs): """DEPRECATED - use read_sql """ - warnings.warn("frame_query is deprecated, use read_sql", FutureWarning) + warnings.warn("frame_query is deprecated, use read_sql", FutureWarning, + stacklevel=2) return read_sql(*args, **kwargs) @@ -1718,7 +1720,8 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): pandas.DataFrame.to_sql """ - warnings.warn("write_frame is deprecated, use to_sql", FutureWarning) + warnings.warn("write_frame is deprecated, use to_sql", FutureWarning, + stacklevel=2) # for backwards compatibility, set index=False when not specified index = kwargs.pop('index', False) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index b4f1e6a429198..0fb3237152db0 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -122,7 +122,7 @@ def _maybe_remove(store, key): pass -def compat_assert_produces_warning(w,f): +def compat_assert_produces_warning(w, f): """ don't produce a warning under PY3 """ if compat.PY3: f() @@ -2516,7 +2516,8 @@ def test_terms(self): [ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ] ] for t in terms: - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=False): Term(t) # valid terms @@ -2609,7 +2610,8 @@ def test_backwards_compat_without_term_object(self): major_axis=date_range('1/1/2000', periods=5), minor_axis=['A', 'B', 'C', 'D']) store.append('wp',wp) - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=not compat.PY3): result = store.select('wp', [('major_axis>20000102'), ('minor_axis', '=', ['A','B']) ]) expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']] @@ -2628,20 +2630,24 @@ def test_backwards_compat_without_term_object(self): store.append('wp',wp) # stringified datetimes - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=not compat.PY3): result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))]) expected = wp.loc[:,wp.major_axis>Timestamp('20000102')] assert_panel_equal(result, expected) - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=not compat.PY3): result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))]) expected = wp.loc[:,wp.major_axis>Timestamp('20000102')] assert_panel_equal(result, expected) - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=not compat.PY3): result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0), datetime.datetime(2000,1,3,0,0)])]) expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]] assert_panel_equal(result, expected) - with tm.assert_produces_warning(expected_warning=DeprecationWarning): + with tm.assert_produces_warning(expected_warning=DeprecationWarning, + check_stacklevel=not compat.PY3): result = store.select('wp', [('minor_axis','=',['A','B'])]) expected = wp.loc[:,:,['A','B']] assert_panel_equal(result, expected) @@ -4528,7 +4534,7 @@ def f(): s = Series(np.random.randn(len(unicode_values)), unicode_values) self._check_roundtrip(s, tm.assert_series_equal) - compat_assert_produces_warning(PerformanceWarning,f) + compat_assert_produces_warning(PerformanceWarning, f) def test_store_datetime_mixed(self): diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 5baef2e4f0225..c2916f2c0cfb8 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -1838,7 +1838,7 @@ def _concatenate_chunks(list chunks): warning_message = " ".join(["Columns (%s) have mixed types." % warning_names, "Specify dtype option on import or set low_memory=False." ]) - warnings.warn(warning_message, DtypeWarning) + warnings.warn(warning_message, DtypeWarning, stacklevel=8) return result #---------------------------------------------------------------------- diff --git a/pandas/rpy/__init__.py b/pandas/rpy/__init__.py index bad7ebc580ce2..8c92ce5842e15 100644 --- a/pandas/rpy/__init__.py +++ b/pandas/rpy/__init__.py @@ -8,7 +8,7 @@ "like rpy2. " "\nSee here for a guide on how to port your code to rpy2: " "http://pandas.pydata.org/pandas-docs/stable/r_interface.html", - FutureWarning) + FutureWarning, stacklevel=2) try: from .common import importr, r, load_data diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 586d507b27493..2e13082ee5366 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -122,7 +122,7 @@ When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on relative positions. For example, the weights of x and y used in calculating -the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is +the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is True), and 1-alpha and alpha (if adjust is False). """ @@ -344,7 +344,8 @@ def dataframe_from_int_dict(data, frame_template): def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None, freq=None, center=False): import warnings - warnings.warn("rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)", FutureWarning) + msg = "rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)" + warnings.warn(msg, FutureWarning, stacklevel=2) return rolling_corr(df1, df2, window=window, min_periods=min_periods, freq=freq, center=center, pairwise=True) @@ -399,7 +400,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, if center: result = _center_window(result, window, axis) - + return return_hook(result) @@ -998,7 +999,8 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None): @Appender(_doc_template) def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None): import warnings - warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning) + msg = "expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)" + warnings.warn(msg, FutureWarning, stacklevel=2) return expanding_corr(df1, df2, min_periods=min_periods, freq=freq, pairwise=True) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index d847638ff105e..f687ecbef35cb 100755 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -1590,7 +1590,7 @@ def test_nan_handling(self): # Changing categories should also make the replaced category np.nan s3 = Series(Categorical(["a","b","c","a"])) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s3.cat.categories = ["a","b",np.nan] self.assert_numpy_array_equal(s3.cat.categories, np.array(["a","b",np.nan], dtype=np.object_)) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 19fd45cdf6ad2..3bd76dfb9da61 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -382,32 +382,32 @@ def test_bool_ops_warn_on_arithmetic(self): fe = getattr(operator, sub_funcs[subs[op]]) with tm.use_numexpr(True, min_elements=5): - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(df, df) e = fe(df, df) tm.assert_frame_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(df.a, df.b) e = fe(df.a, df.b) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(df.a, True) e = fe(df.a, True) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(False, df.a) e = fe(False, df.a) tm.assert_series_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(False, df) e = fe(False, df) tm.assert_frame_equal(r, e) - with tm.assert_produces_warning(): + with tm.assert_produces_warning(check_stacklevel=False): r = f(df, True) e = fe(df, True) tm.assert_frame_equal(r, e) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 7877ee3c5a6cc..6c92bb7095d8b 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2734,7 +2734,7 @@ def test_round_dataframe(self): non_int_round_dict = {'col1': 1, 'col2': 0.5} if sys.version < LooseVersion('2.7'): # np.round([1.123, 2.123], 0.5) is only a warning in Python 2.6 - with self.assert_produces_warning(DeprecationWarning): + with self.assert_produces_warning(DeprecationWarning, check_stacklevel=False): df.round(non_int_round_dict) else: with self.assertRaises(TypeError): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 659a5925be6f1..aea165b907c05 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1440,7 +1440,8 @@ def test_getitem_setitem_float_labels(self): df = DataFrame(np.random.randn(5, 5), index=index) # positional slicing only via iloc! - with tm.assert_produces_warning(FutureWarning): + # stacklevel=False -> needed stacklevel depends on index type + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = df.iloc[1.0:5] expected = df.reindex([2.5, 3.5, 4.5, 5.0]) diff --git a/pandas/tests/test_graphics_others.py b/pandas/tests/test_graphics_others.py index f461a8ab624dc..641180c8010c0 100644 --- a/pandas/tests/test_graphics_others.py +++ b/pandas/tests/test_graphics_others.py @@ -677,7 +677,7 @@ def test_grouped_box_return_type(self): expected_keys=['height', 'weight', 'category']) # now for groupby - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = df.groupby('gender').boxplot() self._check_box_return_type(result, 'dict', expected_keys=['Male', 'Female']) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 30a5716831087..9b2c1bf1a09ee 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1884,7 +1884,7 @@ def test_contains(self): self.assertFalse(0 in ci) self.assertFalse(1 in ci) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): ci = CategoricalIndex(list('aabbca'), categories=list('cabdef') + [np.nan]) self.assertFalse(np.nan in ci) @@ -2101,7 +2101,7 @@ def test_equals(self): # tests # make sure that we are testing for category inclusion properly self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b']).equals(list('aabca'))) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b',np.nan]).equals(list('aabca'))) self.assertFalse(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b']).equals(list('aabca'))) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 6a9d4096ad4b3..c48807365913c 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -4180,11 +4180,16 @@ def test_slice_indexer(self): def check_iloc_compat(s): # invalid type for iloc (but works with a warning) - with self.assert_produces_warning(FutureWarning): + # check_stacklevel=False -> impossible to get it right for all + # index types + with self.assert_produces_warning( + FutureWarning, check_stacklevel=False): s.iloc[6.0:8] - with self.assert_produces_warning(FutureWarning): + with self.assert_produces_warning( + FutureWarning, check_stacklevel=False): s.iloc[6.0:8.0] - with self.assert_produces_warning(FutureWarning): + with self.assert_produces_warning( + FutureWarning, check_stacklevel=False): s.iloc[6:8.0] def check_slicing_positional(index): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 7886a63c6df46..31623d5c277c4 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -1896,10 +1896,8 @@ def test_match_findall_flags(self): pat = pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})' - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + with tm.assert_produces_warning(FutureWarning): result = data.str.match(pat, flags=re.IGNORECASE) - assert issubclass(w[-1].category, UserWarning) self.assertEqual(result[0], ('dave', 'google', 'com')) result = data.str.findall(pat, flags=re.IGNORECASE) @@ -1908,10 +1906,8 @@ def test_match_findall_flags(self): result = data.str.count(pat, flags=re.IGNORECASE) self.assertEqual(result[0], 1) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') + with tm.assert_produces_warning(UserWarning): result = data.str.contains(pat, flags=re.IGNORECASE) - assert issubclass(w[-1].category, UserWarning) self.assertEqual(result[0], True) def test_encode_decode(self): diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index 466ad3f220020..2b5443e6ff0d2 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -597,22 +597,22 @@ class TestDeprecatedTests(tm.TestCase): def test_warning(self): - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assertEquals(1, 1) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assertNotEquals(1, 2) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assert_(True) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assertAlmostEquals(1.0, 1.0000000001) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): self.assertNotAlmostEquals(1, 2) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): tm.assert_isinstance(Series([1, 2]), Series, msg='xxx') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 041c747286c51..e0d13287fcf3b 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -592,7 +592,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): return fig @deprecate_kwarg(old_arg_name='colors', new_arg_name='color') -@deprecate_kwarg(old_arg_name='data', new_arg_name='frame') +@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3) def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, use_columns=False, xticks=None, colormap=None, axvlines=True, **kwds): @@ -2624,7 +2624,7 @@ def plot_group(keys, values, ax): "now, set return_type='axes'.\n To keep the previous " "behavior and silence this warning, set " "return_type='dict'.") - warnings.warn(msg, FutureWarning) + warnings.warn(msg, FutureWarning, stacklevel=3) return_type = 'dict' if ax is None: ax = _gca() @@ -2972,7 +2972,7 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, if figsize == 'default': # allowed to specify mpl default with 'default' warnings.warn("figsize='default' is deprecated. Specify figure" - "size by tuple instead", FutureWarning) + "size by tuple instead", FutureWarning, stacklevel=4) figsize = None grouped = data.groupby(by) diff --git a/pandas/tools/rplot.py b/pandas/tools/rplot.py index 5996fceff8877..bc834689ffce8 100644 --- a/pandas/tools/rplot.py +++ b/pandas/tools/rplot.py @@ -17,7 +17,7 @@ "like seaborn for similar but more refined functionality. \n\n" "See our docs http://pandas.pydata.org/pandas-docs/stable/visualization.html#rplot " "for some example how to convert your existing code to these " - "packages.", FutureWarning) + "packages.", FutureWarning, stacklevel=2) class Scale: diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index 912a0c3f88405..a6b289b76af11 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -398,7 +398,7 @@ def __add__(self, other): raise TypeError("cannot add TimedeltaIndex and {typ}".format(typ=type(other))) elif isinstance(other, Index): warnings.warn("using '+' to provide set union with datetimelike Indexes is deprecated, " - "use .union()",FutureWarning) + "use .union()",FutureWarning, stacklevel=2) return self.union(other) elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)): return self._add_delta(other) @@ -423,7 +423,7 @@ def __sub__(self, other): return self._add_delta(-other) elif isinstance(other, Index): warnings.warn("using '-' to provide set differences with datetimelike Indexes is deprecated, " - "use .difference()",FutureWarning) + "use .difference()",FutureWarning, stacklevel=2) return self.difference(other) elif isinstance(other, (DateOffset, timedelta, np.timedelta64, tslib.Timedelta)): return self._add_delta(-other) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 9349e440eb9e9..e471e66616711 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -525,12 +525,12 @@ def get_offset(name): if name in _rule_aliases: new = _rule_aliases[name] warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), - FutureWarning) + FutureWarning, stacklevel=2) name = new elif name.lower() in _rule_aliases: new = _rule_aliases[name.lower()] warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), - FutureWarning) + FutureWarning, stacklevel=2) name = new name = _lite_rule_alias.get(name, name) @@ -540,7 +540,7 @@ def get_offset(name): if name in _rule_aliases: new = _rule_aliases[name] warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), - FutureWarning) + FutureWarning, stacklevel=2) name = new name = _lite_rule_alias.get(name, name) @@ -784,7 +784,7 @@ def _period_str_to_code(freqstr): if freqstr in _rule_aliases: new = _rule_aliases[freqstr] warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new), - FutureWarning) + FutureWarning, stacklevel=6) freqstr = new freqstr = _lite_rule_alias.get(freqstr, freqstr) @@ -793,7 +793,7 @@ def _period_str_to_code(freqstr): if lower in _rule_aliases: new = _rule_aliases[lower] warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new), - FutureWarning) + FutureWarning, stacklevel=6) freqstr = new freqstr = _lite_rule_alias.get(lower, freqstr) @@ -805,7 +805,7 @@ def _period_str_to_code(freqstr): try: alias = _period_alias_dict[freqstr] warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, alias), - FutureWarning) + FutureWarning, stacklevel=3) except KeyError: raise ValueError("Unknown freqstr: %s" % freqstr) diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index b783459cbfe95..a642c12786940 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -589,7 +589,7 @@ def test_series(self): s = Series(period_range('2013',periods=10,freq=freq)) self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) for freq in ['Y']: - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s = Series(period_range('2013',periods=10,freq=freq)) self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) @@ -610,7 +610,7 @@ def test_legacy_offset_warnings(self): exp = frequencies.get_offset(v) self.assertEqual(result, exp) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): idx = date_range('2011-01-01', periods=5, freq=k) exp = date_range('2011-01-01', periods=5, freq=v) self.assert_index_equal(idx, exp) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index b3ec88f4d0988..fada4a966c10b 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -3670,14 +3670,14 @@ def test_get_standard_freq(): assert fstr == get_standard_freq('1w') assert fstr == get_standard_freq(('W', 1)) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = get_standard_freq('WeEk') assert fstr == result fstr = get_standard_freq('5Q') assert fstr == get_standard_freq('5q') - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = get_standard_freq('5QuarTer') assert fstr == result diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index c828d6d7effb6..4b5d5dfedeee7 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2715,7 +2715,7 @@ def test_to_period_monthish(self): prng = rng.to_period() self.assertEqual(prng.freq, 'M') - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): rng = date_range('01-Jan-2012', periods=8, freq='EOM') prng = rng.to_period() self.assertEqual(prng.freq, 'M') diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py index 4544c3cdb8919..49806491ed1c6 100644 --- a/pandas/util/decorators.py +++ b/pandas/util/decorators.py @@ -10,12 +10,12 @@ def deprecate(name, alternative, alt_name=None): def wrapper(*args, **kwargs): warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name), - FutureWarning) + FutureWarning, stacklevel=2) return alternative(*args, **kwargs) return wrapper -def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None): +def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): """Decorator to deprecate a keyword argument of a function Parameters @@ -79,7 +79,7 @@ def wrapper(*args, **kwargs): msg = "the '%s' keyword is deprecated, " \ "use '%s' instead" % (old_arg_name, new_arg_name) - warnings.warn(msg, FutureWarning) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name, None) is not None: msg = "Can only specify '%s' or '%s', not both" % \ (old_arg_name, new_arg_name) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index aaa83da036c2f..a195455c116fb 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1918,7 +1918,8 @@ def handle_success(self, exc_type, exc_value, traceback): @contextmanager -def assert_produces_warning(expected_warning=Warning, filter_level="always", clear=None): +def assert_produces_warning(expected_warning=Warning, filter_level="always", + clear=None, check_stacklevel=True): """ Context manager for running code that expects to raise (or not raise) warnings. Checks that code raises the expected warning and only the @@ -1966,6 +1967,16 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", cle if (expected_warning and issubclass(actual_warning.category, expected_warning)): saw_warning = True + + if check_stacklevel and issubclass(actual_warning.category, + (FutureWarning, DeprecationWarning)): + from inspect import getframeinfo, stack + caller = getframeinfo(stack()[2][0]) + msg = ("Warning not set with correct stacklevel. File were warning" + " is raised: {0} != {1}. Warning message: {2}".format( + actual_warning.filename, caller.filename, + actual_warning.message)) + assert actual_warning.filename == caller.filename, msg else: extra_warnings.append(actual_warning.category.__name__) if expected_warning: