From b3ed7f9873f7f4689aa8266bf0ed77912d6d1b9b Mon Sep 17 00:00:00 2001 From: UrielMaD <57372467+UrielMaD@users.noreply.github.com> Date: Wed, 9 Dec 2020 03:15:04 -0600 Subject: [PATCH] CLN: C408 Unnecessary dict call - rewrite as a literal #38138 (#38383) * last dict fixings * last dict fixings * last dict fixings * last dict fixings --- .../tests/io/generate_legacy_storage_files.py | 118 +++++++++--------- pandas/tests/io/parser/test_comment.py | 2 +- pandas/tests/io/parser/test_dialect.py | 30 ++--- pandas/tests/io/parser/test_encoding.py | 12 +- pandas/tests/io/parser/test_header.py | 68 +++++----- pandas/tests/io/parser/test_index_col.py | 40 +++--- pandas/tests/io/parser/test_na_values.py | 14 +-- pandas/tests/io/parser/test_parse_dates.py | 18 +-- .../io/parser/test_python_parser_only.py | 10 +- pandas/tests/io/parser/test_skiprows.py | 10 +- 10 files changed, 163 insertions(+), 159 deletions(-) diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 61e1fc019faac..be6cdf1696882 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -142,16 +142,16 @@ def create_data(): "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0], } - scalars = dict(timestamp=Timestamp("20130101"), period=Period("2012", "M")) - - index = dict( - int=Index(np.arange(10)), - date=date_range("20130101", periods=10), - period=period_range("2013-01-01", freq="M", periods=10), - float=Index(np.arange(10, dtype=np.float64)), - uint=Index(np.arange(10, dtype=np.uint64)), - timedelta=timedelta_range("00:00:00", freq="30T", periods=10), - ) + scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")} + + index = { + "int": Index(np.arange(10)), + "date": date_range("20130101", periods=10), + "period": period_range("2013-01-01", freq="M", periods=10), + "float": Index(np.arange(10, dtype=np.float64)), + "uint": Index(np.arange(10, dtype=np.uint64)), + "timedelta": timedelta_range("00:00:00", freq="30T", periods=10), + } index["range"] = RangeIndex(10) @@ -160,8 +160,8 @@ def create_data(): index["interval"] = interval_range(0, periods=10) - mi = dict( - reg2=MultiIndex.from_tuples( + mi = { + "reg2": MultiIndex.from_tuples( tuple( zip( *[ @@ -172,35 +172,35 @@ def create_data(): ), names=["first", "second"], ) - ) + } - series = dict( - float=Series(data["A"]), - int=Series(data["B"]), - mixed=Series(data["E"]), - ts=Series( + series = { + "float": Series(data["A"]), + "int": Series(data["B"]), + "mixed": Series(data["E"]), + "ts": Series( np.arange(10).astype(np.int64), index=date_range("20130101", periods=10) ), - mi=Series( + "mi": Series( np.arange(5).astype(np.float64), index=MultiIndex.from_tuples( tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"] ), ), - dup=Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]), - cat=Series(Categorical(["foo", "bar", "baz"])), - dt=Series(date_range("20130101", periods=5)), - dt_tz=Series(date_range("20130101", periods=5, tz="US/Eastern")), - period=Series([Period("2000Q1")] * 5), - ) + "dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]), + "cat": Series(Categorical(["foo", "bar", "baz"])), + "dt": Series(date_range("20130101", periods=5)), + "dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")), + "period": Series([Period("2000Q1")] * 5), + } mixed_dup_df = DataFrame(data) mixed_dup_df.columns = list("ABCDA") - frame = dict( - float=DataFrame({"A": series["float"], "B": series["float"] + 1}), - int=DataFrame({"A": series["int"], "B": series["int"] + 1}), - mixed=DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}), - mi=DataFrame( + frame = { + "float": DataFrame({"A": series["float"], "B": series["float"] + 1}), + "int": DataFrame({"A": series["int"], "B": series["int"] + 1}), + "mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}), + "mi": DataFrame( {"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)}, index=MultiIndex.from_tuples( tuple( @@ -214,25 +214,25 @@ def create_data(): names=["first", "second"], ), ), - dup=DataFrame( + "dup": DataFrame( np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"] ), - cat_onecol=DataFrame({"A": Categorical(["foo", "bar"])}), - cat_and_float=DataFrame( + "cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}), + "cat_and_float": DataFrame( { "A": Categorical(["foo", "bar", "baz"]), "B": np.arange(3).astype(np.int64), } ), - mixed_dup=mixed_dup_df, - dt_mixed_tzs=DataFrame( + "mixed_dup": mixed_dup_df, + "dt_mixed_tzs": DataFrame( { "A": Timestamp("20130102", tz="US/Eastern"), "B": Timestamp("20130603", tz="CET"), }, index=range(5), ), - dt_mixed2_tzs=DataFrame( + "dt_mixed2_tzs": DataFrame( { "A": Timestamp("20130102", tz="US/Eastern"), "B": Timestamp("20130603", tz="CET"), @@ -240,19 +240,19 @@ def create_data(): }, index=range(5), ), - ) + } - cat = dict( - int8=Categorical(list("abcdefg")), - int16=Categorical(np.arange(1000)), - int32=Categorical(np.arange(10000)), - ) + cat = { + "int8": Categorical(list("abcdefg")), + "int16": Categorical(np.arange(1000)), + "int32": Categorical(np.arange(10000)), + } - timestamp = dict( - normal=Timestamp("2011-01-01"), - nat=NaT, - tz=Timestamp("2011-01-01", tz="US/Eastern"), - ) + timestamp = { + "normal": Timestamp("2011-01-01"), + "nat": NaT, + "tz": Timestamp("2011-01-01", tz="US/Eastern"), + } timestamp["freq"] = Timestamp("2011-01-01", freq="D") timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M") @@ -282,18 +282,18 @@ def create_data(): "Minute": Minute(1), } - return dict( - series=series, - frame=frame, - index=index, - scalars=scalars, - mi=mi, - sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()), - sp_frame=dict(float=_create_sp_frame()), - cat=cat, - timestamp=timestamp, - offsets=off, - ) + return { + "series": series, + "frame": frame, + "index": index, + "scalars": scalars, + "mi": mi, + "sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()}, + "sp_frame": {"float": _create_sp_frame()}, + "cat": cat, + "timestamp": timestamp, + "offsets": off, + } def create_pickle_data(): diff --git a/pandas/tests/io/parser/test_comment.py b/pandas/tests/io/parser/test_comment.py index 60e32d7c27200..bddccb0334cc8 100644 --- a/pandas/tests/io/parser/test_comment.py +++ b/pandas/tests/io/parser/test_comment.py @@ -26,7 +26,7 @@ def test_comment(all_parsers, na_values): @pytest.mark.parametrize( - "read_kwargs", [dict(), dict(lineterminator="*"), dict(delim_whitespace=True)] + "read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}] ) def test_line_comment(all_parsers, read_kwargs): parser = all_parsers diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py index cc65def0fd096..c12eb5ec873b2 100644 --- a/pandas/tests/io/parser/test_dialect.py +++ b/pandas/tests/io/parser/test_dialect.py @@ -17,14 +17,14 @@ @pytest.fixture def custom_dialect(): dialect_name = "weird" - dialect_kwargs = dict( - doublequote=False, - escapechar="~", - delimiter=":", - skipinitialspace=False, - quotechar="~", - quoting=3, - ) + dialect_kwargs = { + "doublequote": False, + "escapechar": "~", + "delimiter": ":", + "skipinitialspace": False, + "quotechar": "~", + "quoting": 3, + } return dialect_name, dialect_kwargs @@ -91,7 +91,7 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val data = "a:b\n1:2" warning_klass = None - kwds = dict() + kwds = {} # arg=None tests when we pass in the dialect without any other arguments. if arg is not None: @@ -114,12 +114,12 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val @pytest.mark.parametrize( "kwargs,warning_klass", [ - (dict(sep=","), None), # sep is default --> sep_override=True - (dict(sep="."), ParserWarning), # sep isn't default --> sep_override=False - (dict(delimiter=":"), None), # No conflict - (dict(delimiter=None), None), # Default arguments --> sep_override=True - (dict(delimiter=","), ParserWarning), # Conflict - (dict(delimiter="."), ParserWarning), # Conflict + ({"sep": ","}, None), # sep is default --> sep_override=True + ({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False + ({"delimiter": ":"}, None), # No conflict + ({"delimiter": None}, None), # Default arguments --> sep_override=True + ({"delimiter": ","}, ParserWarning), # Conflict + ({"delimiter": "."}, ParserWarning), # Conflict ], ids=[ "sep-override-true", diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index e74265da3e966..10386cf87b9c2 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -47,7 +47,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding): ",", sep ) path = f"__{tm.rands(10)}__.csv" - kwargs = dict(sep=sep, skiprows=2) + kwargs = {"sep": sep, "skiprows": 2} utf8 = "utf-8" with tm.ensure_clean(path) as path: @@ -91,17 +91,17 @@ def test_unicode_encoding(all_parsers, csv_dir_path): "data,kwargs,expected", [ # Basic test - ("a\n1", dict(), DataFrame({"a": [1]})), + ("a\n1", {}, DataFrame({"a": [1]})), # "Regular" quoting - ('"a"\n1', dict(quotechar='"'), DataFrame({"a": [1]})), + ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})), # Test in a data row instead of header - ("b\n1", dict(names=["a"]), DataFrame({"a": ["b", "1"]})), + ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})), # Test in empty data row with skipping - ("\n1", dict(names=["a"], skip_blank_lines=True), DataFrame({"a": [1]})), + ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})), # Test in empty data row without skipping ( "\n1", - dict(names=["a"], skip_blank_lines=False), + {"names": ["a"], "skip_blank_lines": False}, DataFrame({"a": [np.nan, 1]}), ), ], diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index 4cd110136d7b0..ae2808f494118 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -144,7 +144,7 @@ def test_header_multi_index(all_parsers): "kwargs,msg", [ ( - dict(index_col=["foo", "bar"]), + {"index_col": ["foo", "bar"]}, ( "index_col must only contain " "row numbers when specifying " @@ -152,11 +152,11 @@ def test_header_multi_index(all_parsers): ), ), ( - dict(index_col=[0, 1], names=["foo", "bar"]), + {"index_col": [0, 1], "names": ["foo", "bar"]}, ("cannot specify names when specifying a multi-index header"), ), ( - dict(index_col=[0, 1], usecols=["foo", "bar"]), + {"index_col": [0, 1], "usecols": ["foo", "bar"]}, ("cannot specify usecols when specifying a multi-index header"), ), ], @@ -187,10 +187,10 @@ def test_header_multi_index_invalid(all_parsers, kwargs, msg): @pytest.mark.parametrize( "kwargs", [ - dict(header=[0, 1]), - dict( - skiprows=3, - names=[ + {"header": [0, 1]}, + { + "skiprows": 3, + "names": [ ("a", "q"), ("a", "r"), ("a", "s"), @@ -198,10 +198,10 @@ def test_header_multi_index_invalid(all_parsers, kwargs, msg): ("c", "u"), ("c", "v"), ], - ), - dict( - skiprows=3, - names=[ + }, + { + "skiprows": 3, + "names": [ _TestTuple("a", "q"), _TestTuple("a", "r"), _TestTuple("a", "s"), @@ -209,7 +209,7 @@ def test_header_multi_index_invalid(all_parsers, kwargs, msg): _TestTuple("c", "u"), _TestTuple("c", "v"), ], - ), + }, ], ) def test_header_multi_index_common_format1(all_parsers, kwargs): @@ -234,10 +234,10 @@ def test_header_multi_index_common_format1(all_parsers, kwargs): @pytest.mark.parametrize( "kwargs", [ - dict(header=[0, 1]), - dict( - skiprows=2, - names=[ + {"header": [0, 1]}, + { + "skiprows": 2, + "names": [ ("a", "q"), ("a", "r"), ("a", "s"), @@ -245,10 +245,10 @@ def test_header_multi_index_common_format1(all_parsers, kwargs): ("c", "u"), ("c", "v"), ], - ), - dict( - skiprows=2, - names=[ + }, + { + "skiprows": 2, + "names": [ _TestTuple("a", "q"), _TestTuple("a", "r"), _TestTuple("a", "s"), @@ -256,7 +256,7 @@ def test_header_multi_index_common_format1(all_parsers, kwargs): _TestTuple("c", "u"), _TestTuple("c", "v"), ], - ), + }, ], ) def test_header_multi_index_common_format2(all_parsers, kwargs): @@ -280,10 +280,10 @@ def test_header_multi_index_common_format2(all_parsers, kwargs): @pytest.mark.parametrize( "kwargs", [ - dict(header=[0, 1]), - dict( - skiprows=2, - names=[ + {"header": [0, 1]}, + { + "skiprows": 2, + "names": [ ("a", "q"), ("a", "r"), ("a", "s"), @@ -291,10 +291,10 @@ def test_header_multi_index_common_format2(all_parsers, kwargs): ("c", "u"), ("c", "v"), ], - ), - dict( - skiprows=2, - names=[ + }, + { + "skiprows": 2, + "names": [ _TestTuple("a", "q"), _TestTuple("a", "r"), _TestTuple("a", "s"), @@ -302,7 +302,7 @@ def test_header_multi_index_common_format2(all_parsers, kwargs): _TestTuple("c", "u"), _TestTuple("c", "v"), ], - ), + }, ], ) def test_header_multi_index_common_format3(all_parsers, kwargs): @@ -397,7 +397,7 @@ def test_header_names_backward_compat(all_parsers, data, header): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=False)]) +@pytest.mark.parametrize("kwargs", [{}, {"index_col": False}]) def test_read_only_header_no_rows(all_parsers, kwargs): # See gh-7773 parser = all_parsers @@ -410,10 +410,10 @@ def test_read_only_header_no_rows(all_parsers, kwargs): @pytest.mark.parametrize( "kwargs,names", [ - (dict(), [0, 1, 2, 3, 4]), - (dict(prefix="X"), ["X0", "X1", "X2", "X3", "X4"]), + ({}, [0, 1, 2, 3, 4]), + ({"prefix": "X"}, ["X0", "X1", "X2", "X3", "X4"]), ( - dict(names=["foo", "bar", "baz", "quux", "panda"]), + {"names": ["foo", "bar", "baz", "quux", "panda"]}, ["foo", "bar", "baz", "quux", "panda"], ), ], diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index 9c6cad4b41949..f3191d5195308 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -86,35 +86,39 @@ def test_infer_index_col(all_parsers): @pytest.mark.parametrize( "index_col,kwargs", [ - (None, dict(columns=["x", "y", "z"])), - (False, dict(columns=["x", "y", "z"])), - (0, dict(columns=["y", "z"], index=Index([], name="x"))), - (1, dict(columns=["x", "z"], index=Index([], name="y"))), - ("x", dict(columns=["y", "z"], index=Index([], name="x"))), - ("y", dict(columns=["x", "z"], index=Index([], name="y"))), + (None, {"columns": ["x", "y", "z"]}), + (False, {"columns": ["x", "y", "z"]}), + (0, {"columns": ["y", "z"], "index": Index([], name="x")}), + (1, {"columns": ["x", "z"], "index": Index([], name="y")}), + ("x", {"columns": ["y", "z"], "index": Index([], name="x")}), + ("y", {"columns": ["x", "z"], "index": Index([], name="y")}), ( [0, 1], - dict( - columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) - ), + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]), + }, ), ( ["x", "y"], - dict( - columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"]) - ), + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]), + }, ), ( [1, 0], - dict( - columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) - ), + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]), + }, ), ( ["y", "x"], - dict( - columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"]) - ), + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]), + }, ), ], ) diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 9f86bbd65640e..4237a774261ca 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -214,7 +214,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): "kwargs,expected", [ ( - dict(), + {}, DataFrame( { "A": ["a", "b", np.nan, "d", "e", np.nan, "g"], @@ -224,7 +224,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): ), ), ( - dict(na_values={"A": [], "C": []}, keep_default_na=False), + {"na_values": {"A": [], "C": []}, "keep_default_na": False}, DataFrame( { "A": ["a", "b", "", "d", "e", "nan", "g"], @@ -234,7 +234,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): ), ), ( - dict(na_values=["a"], keep_default_na=False), + {"na_values": ["a"], "keep_default_na": False}, DataFrame( { "A": [np.nan, "b", "", "d", "e", "nan", "g"], @@ -244,7 +244,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): ), ), ( - dict(na_values={"A": [], "C": []}), + {"na_values": {"A": [], "C": []}}, DataFrame( { "A": ["a", "b", np.nan, "d", "e", np.nan, "g"], @@ -445,11 +445,11 @@ def test_na_values_dict_col_index(all_parsers): [ ( str(2 ** 63) + "\n" + str(2 ** 63 + 1), - dict(na_values=[2 ** 63]), + {"na_values": [2 ** 63]}, DataFrame([str(2 ** 63), str(2 ** 63 + 1)]), ), - (str(2 ** 63) + ",1" + "\n,2", dict(), DataFrame([[str(2 ** 63), 1], ["", 2]])), - (str(2 ** 63) + "\n1", dict(na_values=[2 ** 63]), DataFrame([np.nan, 1])), + (str(2 ** 63) + ",1" + "\n,2", {}, DataFrame([[str(2 ** 63), 1], ["", 2]])), + (str(2 ** 63) + "\n1", {"na_values": [2 ** 63]}, DataFrame([np.nan, 1])), ], ) def test_na_values_uint64(all_parsers, data, kwargs, expected): diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index a20ca508ebbfe..119b4090dd4c7 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -573,7 +573,7 @@ def test_multiple_date_cols_with_header(all_parsers): ID,date,nominalTime KORD,19990127, 19:00:00 KORD,19990127, 20:00:00""", - dict(ID=[1, 2]), + {"ID": [1, 2]}, "Date column ID already in dict", ), ], @@ -784,7 +784,7 @@ def test_multi_index_parse_dates(all_parsers, index_col): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("kwargs", [dict(dayfirst=True), dict(day_first=True)]) +@pytest.mark.parametrize("kwargs", [{"dayfirst": True}, {"day_first": True}]) def test_parse_dates_custom_euro_format(all_parsers, kwargs): parser = all_parsers data = """foo,bar,baz @@ -1076,7 +1076,7 @@ def test_multiple_date_col_multiple_index_compat(all_parsers): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("kwargs", [dict(), dict(index_col="C")]) +@pytest.mark.parametrize("kwargs", [{}, {"index_col": "C"}]) def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs): # see gh-5636 parser = all_parsers @@ -1140,24 +1140,24 @@ def test_parse_dates_empty_string(all_parsers): [ ( "a\n04.15.2016", - dict(parse_dates=["a"]), + {"parse_dates": ["a"]}, DataFrame([datetime(2016, 4, 15)], columns=["a"]), ), ( "a\n04.15.2016", - dict(parse_dates=True, index_col=0), + {"parse_dates": True, "index_col": 0}, DataFrame(index=DatetimeIndex(["2016-04-15"], name="a")), ), ( "a,b\n04.15.2016,09.16.2013", - dict(parse_dates=["a", "b"]), + {"parse_dates": ["a", "b"]}, DataFrame( [[datetime(2016, 4, 15), datetime(2013, 9, 16)]], columns=["a", "b"] ), ), ( "a,b\n04.15.2016,09.16.2013", - dict(parse_dates=True, index_col=[0, 1]), + {"parse_dates": True, "index_col": [0, 1]}, DataFrame( index=MultiIndex.from_tuples( [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"] @@ -1215,7 +1215,7 @@ def test_parse_date_time_multi_level_column_name(all_parsers, date_parser, warni 2001-01-05, 10:00:00, 0.0, 10. 2001-01-05, 00:00:00, 1., 11. """, - dict(header=0, parse_dates={"date_time": [0, 1]}), + {"header": 0, "parse_dates": {"date_time": [0, 1]}}, DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10], @@ -1233,7 +1233,7 @@ def test_parse_date_time_multi_level_column_name(all_parsers, date_parser, warni "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n" "KORD,19990127, 23:00:00, 22:56:00, -0.5900" ), - dict(header=None, parse_dates={"actual": [1, 2], "nominal": [1, 3]}), + {"header": None, "parse_dates": {"actual": [1, 2], "nominal": [1, 3]}}, DataFrame( [ [ diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index 4d933fa02d36f..016fae4f4a6f5 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -49,7 +49,7 @@ def test_invalid_skipfooter_negative(python_parser_only): parser.read_csv(StringIO(data), skipfooter=-1) -@pytest.mark.parametrize("kwargs", [dict(sep=None), dict(delimiter="|")]) +@pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}]) def test_sniff_delimiter(python_parser_only, kwargs): data = """index|A|B|C foo|1|2|3 @@ -122,7 +122,7 @@ def test_single_line(python_parser_only): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("kwargs", [dict(skipfooter=2), dict(nrows=3)]) +@pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}]) def test_skipfooter(python_parser_only, kwargs): # see gh-6607 data = """A,B,C @@ -213,10 +213,10 @@ def test_skipfooter_with_decimal(python_parser_only, add_footer): if add_footer: # The stray footer line should not mess with the # casting of the first two lines if we skip it. - kwargs = dict(skipfooter=1) + kwargs = {"skipfooter": 1} data += "\nFooter" else: - kwargs = dict() + kwargs = {} result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs) tm.assert_frame_equal(result, expected) @@ -245,7 +245,7 @@ def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding): @pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) def test_multi_char_sep_quotes(python_parser_only, quoting): # see gh-13374 - kwargs = dict(sep=",,") + kwargs = {"sep": ",,"} parser = python_parser_only data = 'a,,b\n1,,a\n2,,"2,,b"' diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py index fdccef1127c7e..35b155705ccee 100644 --- a/pandas/tests/io/parser/test_skiprows.py +++ b/pandas/tests/io/parser/test_skiprows.py @@ -93,7 +93,7 @@ def test_skip_rows_blank(all_parsers): 2,"line 21 line 22",2 3,"line 31",1""", - dict(skiprows=[1]), + {"skiprows": [1]}, DataFrame( [[2, "line 21\nline 22", 2], [3, "line 31", 1]], columns=["id", "text", "num_lines"], @@ -101,7 +101,7 @@ def test_skip_rows_blank(all_parsers): ), ( "a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~", - dict(quotechar="~", skiprows=[2]), + {"quotechar": "~", "skiprows": [2]}, DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]), ), ( @@ -111,7 +111,7 @@ def test_skip_rows_blank(all_parsers): "example\n sentence\n two~,url2\n~" "example\n sentence\n three~,url3" ), - dict(quotechar="~", skiprows=[1, 3]), + {"quotechar": "~", "skiprows": [1, 3]}, DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]), ), ], @@ -222,8 +222,8 @@ def test_skiprows_infield_quote(all_parsers): @pytest.mark.parametrize( "kwargs,expected", [ - (dict(), DataFrame({"1": [3, 5]})), - (dict(header=0, names=["foo"]), DataFrame({"foo": [3, 5]})), + ({}, DataFrame({"1": [3, 5]})), + ({"header": 0, "names": ["foo"]}, DataFrame({"foo": [3, 5]})), ], ) def test_skip_rows_callable(all_parsers, kwargs, expected):