Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DEPR: Series.to_csv signature change #29809

Merged
merged 5 commits into from
Nov 25, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 0 additions & 95 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -4417,101 +4417,6 @@ def between(self, left, right, inclusive=True):

return lmask & rmask

@Appender(generic.NDFrame.to_csv.__doc__)
def to_csv(self, *args, **kwargs):

names = [
"path_or_buf",
"sep",
"na_rep",
"float_format",
"columns",
"header",
"index",
"index_label",
"mode",
"encoding",
"compression",
"quoting",
"quotechar",
"line_terminator",
"chunksize",
"date_format",
"doublequote",
"escapechar",
"decimal",
]

old_names = [
"path_or_buf",
"index",
"sep",
"na_rep",
"float_format",
"header",
"index_label",
"mode",
"encoding",
"compression",
"date_format",
"decimal",
]

if "path" in kwargs:
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'path' will be renamed to 'path_or_buf'.",
FutureWarning,
stacklevel=2,
)
kwargs["path_or_buf"] = kwargs.pop("path")

if len(args) > 1:
# Either "index" (old signature) or "sep" (new signature) is being
# passed as second argument (while the first is the same)
maybe_sep = args[1]

if not (isinstance(maybe_sep, str) and len(maybe_sep) == 1):
# old signature
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`. Note that the "
"order of arguments changed, and the new one "
"has 'sep' in first place, for which \"{}\" is "
"not a valid value. The old order will cease to "
"be supported in a future version. Please refer "
"to the documentation for `DataFrame.to_csv` "
"when updating your function "
"calls.".format(maybe_sep),
FutureWarning,
stacklevel=2,
)
names = old_names

pos_args = dict(zip(names[: len(args)], args))

for key in pos_args:
if key in kwargs:
raise ValueError(
"Argument given by name ('{}') and position "
"({})".format(key, names.index(key))
)
kwargs[key] = pos_args[key]

if kwargs.get("header", None) is None:
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'header' will change its default value from False "
"to True: please pass an explicit value to suppress "
"this warning.",
FutureWarning,
stacklevel=2,
)
kwargs["header"] = False # Backwards compatibility.
return self.to_frame().to_csv(**kwargs)

gfyoung marked this conversation as resolved.
Show resolved Hide resolved
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
def isna(self):
return super().isna()
Expand Down
42 changes: 13 additions & 29 deletions pandas/tests/io/test_compression.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import contextlib
import os
import subprocess
import sys
import textwrap
import warnings

import pytest

Expand All @@ -13,17 +11,6 @@
import pandas.io.common as icom


@contextlib.contextmanager
def catch_to_csv_depr():
# Catching warnings because Series.to_csv has
# been deprecated. Remove this context when
# Series.to_csv has been aligned.

with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
yield


@pytest.mark.parametrize(
"obj",
[
Expand All @@ -37,12 +24,11 @@ def catch_to_csv_depr():
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as path:
with catch_to_csv_depr():
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size


@pytest.mark.parametrize(
Expand All @@ -59,18 +45,16 @@ def test_compression_size(obj, method, compression_only):
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
f, handles = icom._get_handle(path, "w", compression=compression_only)
with catch_to_csv_depr():
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
compressed_size = os.path.getsize(path)
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
f, handles = icom._get_handle(path, "w", compression=None)
with catch_to_csv_depr():
with f:
getattr(obj, method)(f)
assert not f.closed
with f:
getattr(obj, method)(f)
assert not f.closed
assert f.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
Expand Down
18 changes: 0 additions & 18 deletions pandas/tests/series/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,24 +25,6 @@ def read_csv(self, path, **kwargs):

return out

@pytest.mark.parametrize("arg", ["path", "header", "both"])
def test_to_csv_deprecation(self, arg, datetime_series):
# see gh-19715
with tm.ensure_clean() as path:
if arg == "path":
kwargs = dict(path=path, header=False)
elif arg == "header":
kwargs = dict(path_or_buf=path)
else: # Both discrepancies match.
kwargs = dict(path=path)

with tm.assert_produces_warning(FutureWarning):
datetime_series.to_csv(**kwargs)

# Make sure roundtrip still works.
ts = self.read_csv(path)
tm.assert_series_equal(datetime_series, ts, check_names=False)

def test_from_csv(self, datetime_series, string_series):

with tm.ensure_clean() as path:
Expand Down