Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
aminalaee committed Aug 15, 2023
1 parent b42b39a commit 670cff9
Show file tree
Hide file tree
Showing 34 changed files with 117 additions and 112 deletions.
4 changes: 3 additions & 1 deletion py-polars/polars/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -724,4 +724,6 @@ def from_pandas(
include_index=include_index,
)
else:
raise ValueError(f"expected pandas DataFrame or Series, got {type(data).__name__!r}")
raise ValueError(
f"expected pandas DataFrame or Series, got {type(data).__name__!r}"
)
38 changes: 19 additions & 19 deletions py-polars/polars/dataframe/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def __init__(
)
else:
raise ValueError(
f"dataFrame constructor called with unsupported type; got {type(data).__name__!r}"
f"DataFrame constructor called with unsupported type; got {type(data).__name__!r}"
)

@classmethod
Expand Down Expand Up @@ -723,7 +723,7 @@ def _read_csv(
if dtype_slice is not None:
raise ValueError(
"cannot use glob patterns and unnamed dtypes as `dtypes` argument;"
"\n\nUse dtypes: Mapping[str, Type[DataType]"
" Use dtypes: Mapping[str, Type[DataType]"
)
from polars import scan_csv

Expand Down Expand Up @@ -755,7 +755,7 @@ def _read_csv(
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns`"
"\n\nargument; Use columns: List[str]"
" argument; Use columns: List[str]"
)

projection, columns = handle_projection_columns(columns)
Expand Down Expand Up @@ -844,7 +844,7 @@ def _read_parquet(
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns`"
"\n\nargument; Use columns: List[str]"
" argument; Use columns: List[str]"
)

projection, columns = handle_projection_columns(columns)
Expand Down Expand Up @@ -957,7 +957,7 @@ def _read_ipc(
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns`"
"\n\nargument; Use columns: List[str]"
" argument; Use columns: List[str]"
)
return cls._from_pydf(df._df)

Expand Down Expand Up @@ -1247,7 +1247,7 @@ def __dataframe__(
if nan_as_null:
raise NotImplementedError(
"functionality for `nan_as_null` has not been implemented and the"
"\n\nparameter will be removed in a future version."
" parameter will be removed in a future version."
"\n\nUse the default `nan_as_null=False`"
)

Expand Down Expand Up @@ -1546,7 +1546,7 @@ def __getitem__(
if len(col_selection) != self.width:
raise ValueError(
f"expected {self.width} values when selecting columns by"
f"\n\nboolean mask. Got {len(col_selection)}"
f" boolean mask. Got {len(col_selection)}"
)
series_list = []
for i, val in enumerate(col_selection):
Expand Down Expand Up @@ -1641,8 +1641,8 @@ def __getitem__(

# if no data has been returned, the operation is not supported
raise ValueError(
f"cannot __getitem__ on DataFrame with item: '{item!r}'"
f"\n\nof type: '{type(item).__name__!r}'"
f"cannot __getitem__ on DataFrame with item: {item!r}"
f" of type: {type(item).__name__!r}"
)

def __setitem__(
Expand All @@ -1654,7 +1654,7 @@ def __setitem__(
if isinstance(key, str):
raise TypeError(
"'DataFrame' object does not support 'Series' assignment by index."
"\n\nUse 'DataFrame.with_columns'"
" Use 'DataFrame.with_columns'"
)

# df[["C", "D"]]
Expand All @@ -1666,7 +1666,7 @@ def __setitem__(
if value.shape[1] != len(key):
raise ValueError(
"matrix columns should be equal to list use to determine column"
"\n\nnames"
" names"
)

# todo! we can parallelize this by calling from_numpy
Expand All @@ -1684,7 +1684,7 @@ def __setitem__(
) or is_bool_sequence(row_selection):
raise ValueError(
"not allowed to set 'DataFrame' by boolean mask in the"
"\n\nrow position. Consider using 'DataFrame.with_columns'"
" row position. Consider using 'DataFrame.with_columns'"
)

# get series column selection
Expand All @@ -1707,9 +1707,9 @@ def __setitem__(
self.replace(col_selection, s)
else:
raise ValueError(
f"cannot __setitem__ on DataFrame with key: '{key!r}'"
f"\n\nof type: '{type(key).__name__!r}' and value: '{value!r}'"
f"\n\nof type: '{type(value).__name__!r}'."
f"cannot __setitem__ on DataFrame with key: {key!r}"
f" of type: {type(key).__name__!r} and value: {value!r}"
f" of type: {type(value).__name__!r}."
)

def __len__(self) -> int:
Expand Down Expand Up @@ -1787,7 +1787,7 @@ def item(self, row: int | None = None, column: int | str | None = None) -> Any:
if self.shape != (1, 1):
raise ValueError(
f"can only call '.item()' if the dataframe is of shape (1,1), or if"
f"\n\nexplicit row/col values are provided; frame has shape {self.shape!r}"
f" explicit row/col values are provided; frame has shape {self.shape!r}"
)
return self._df.select_at_idx(0).get_idx(0)

Expand Down Expand Up @@ -3288,7 +3288,7 @@ def write_database(
else:
raise ValueError(
f"value for 'if_exists'={if_exists} was unexpected."
f"\n\nChoose one of: {'fail', 'replace', 'append'}"
f" Choose one of: {'fail', 'replace', 'append'}"
)
with _open_adbc_connection(connection) as conn, conn.cursor() as cursor:
cursor.adbc_ingest(table_name, self.to_arrow(), mode)
Expand Down Expand Up @@ -7713,7 +7713,7 @@ def n_chunks(self, strategy: str = "first") -> int | list[int]:
else:
raise ValueError(
f"strategy: '{strategy}' not understood."
f"\n\nChoose one of {{'first', 'all'}}"
f" Choose one of {{'first', 'all'}}"
)

@overload
Expand Down Expand Up @@ -8666,7 +8666,7 @@ def row(
if not isinstance(by_predicate, pl.Expr):
raise TypeError(
f"expected 'by_predicate to be an expression;"
f"\n\nfound {type(by_predicate).__name__!r}"
f" found {type(by_predicate).__name__!r}"
)
rows = self.filter(by_predicate).rows()
n_rows = len(rows)
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/datatypes/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ def py_type_to_dtype(
if not raise_unmatched:
return None
raise ValueError(
f"cannot infer dtype from '{data_type!r}' (type: {type(data_type).__name__!r})"
f"cannot infer dtype from {data_type!r} (type: {type(data_type).__name__!r})"
) from None


Expand Down
4 changes: 2 additions & 2 deletions py-polars/polars/expr/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def __bool__(self) -> NoReturn:
raise ValueError(
"since Expr are lazy, the truthiness of an Expr is ambiguous."
"\n\nHint: use '&' or '|' to logically combine Expr, not 'and'/'or', and"
"\n\nuse 'x.is_in([y,z])' instead of 'x in [y,z]' to check membership"
" use 'x.is_in([y,z])' instead of 'x in [y,z]' to check membership"
)

def __abs__(self) -> Self:
Expand Down Expand Up @@ -4932,7 +4932,7 @@ def is_between(
else:
raise ValueError(
"closed must be one of {'left', 'right', 'both', 'none'},"
f"\n\ngot {closed!r}"
f" got {closed!r}"
)

def hash(
Expand Down
8 changes: 5 additions & 3 deletions py-polars/polars/functions/eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def concat(
else:
raise ValueError(
f"`how` must be one of {{'vertical','vertical_relaxed','diagonal','horizontal','align'}},"
f"\n\ngot {how!r}"
f" ngot {how!r}"
)
elif isinstance(first, pl.LazyFrame):
if how == "vertical":
Expand All @@ -194,12 +194,14 @@ def concat(
if how == "vertical":
out = wrap_s(plr.concat_series(elems))
else:
raise ValueError("'Series' only allows 'vertical' concat strategy")
raise ValueError("'Series' only allows {'vertical'} concat strategy")

elif isinstance(first, pl.Expr):
return wrap_expr(plr.concat_expr([e._pyexpr for e in elems], rechunk))
else:
raise ValueError(f"did not expect type: {type(first).__name__!r} in 'pl.concat'")
raise ValueError(
f"did not expect type: {type(first).__name__!r} in 'pl.concat'"
)

if rechunk:
return out.rechunk()
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/functions/lazy.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def col(
else:
raise TypeError(
"invalid input for `col`. Expected iterable of type `str` or `DataType`,"
f"\n\ngot iterable of type {type(item).__name__!r}"
f" got iterable of type {type(item).__name__!r}"
)
else:
raise TypeError(
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/functions/lit.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def lit(
and dtype.time_zone != str(value.tzinfo) # type: ignore[union-attr]
):
raise TypeError(
f"time zone of dtype ({dtype.time_zone!r}) differs from time zone of value ({value.tzinfo!r})." # type: ignore[union-attr]
f"time zone of dtype ({dtype.time_zone!r}) differs from time zone of value ({value.tzinfo!r})" # type: ignore[union-attr]
)
e = lit(_datetime_to_pl_timestamp(value, time_unit)).cast(Datetime(time_unit))
if time_zone is not None:
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/interchange/column.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(self, column: Series, *, allow_copy: bool = True):
if not allow_copy:
raise RuntimeError(
f"column {column.name!r} must be converted to a local categorical,"
"\n\nwhich is not zero-copy"
" which is not zero-copy"
)
column = column.cat.to_local()

Expand Down
6 changes: 3 additions & 3 deletions py-polars/polars/interchange/from_dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _df_to_pyarrow_table(df: Any, *, allow_copy: bool = False) -> pa.Table:
if not _PYARROW_AVAILABLE or parse_version(pa.__version__) < parse_version("11"):
raise ImportError(
"pyarrow>=11.0.0 is required for converting a dataframe interchange object"
"\n\nto a Polars dataframe"
" to a Polars dataframe"
)

import pyarrow.interchange # noqa: F401
Expand All @@ -90,8 +90,8 @@ def _df_to_pyarrow_table_zero_copy(df: Any) -> pa.Table:
if _dfi_contains_categorical_data(dfi):
raise TypeError(
"Polars can not currently guarantee zero-copy conversion from Arrow for"
"\n\ncategorical columns. Set `allow_copy=True` or cast categorical columns to"
"\n\nstring first"
" categorical columns. Set `allow_copy=True` or cast categorical columns to"
" string first"
)

if isinstance(df, pa.Table):
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/io/csv/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def _check_arg_is_1byte(
if arg_byte_length > 1:
raise ValueError(
f'{arg_name}="{arg}" should be a single byte character or empty,'
f"\n\nbut is {arg_byte_length} bytes long"
f" but is {arg_byte_length} bytes long"
)
elif arg_byte_length != 1:
raise ValueError(
Expand Down
14 changes: 7 additions & 7 deletions py-polars/polars/io/csv/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def read_csv(
if not column.startswith("column_"):
raise ValueError(
'specified column names do not start with "column_",'
"\n\nbut autogenerated header names were requested"
" but autogenerated header names were requested"
)

if (
Expand Down Expand Up @@ -302,8 +302,8 @@ def read_csv(
if columns:
if len(columns) < len(new_columns):
raise ValueError(
"More new column names are specified than there are selected"
"\n\ncolumns"
"more new column names are specified than there are selected"
" columns"
)

# Get column names of requested columns.
Expand All @@ -315,7 +315,7 @@ def read_csv(
if columns and len(columns) < len(new_columns):
raise ValueError(
"more new column names are specified than there are selected"
"\n\ncolumns"
" columns"
)
# Convert column indices from projection to 'column_1', 'column_2', ...
# column names.
Expand Down Expand Up @@ -567,7 +567,7 @@ def read_csv_batched(
if not column.startswith("column_"):
raise ValueError(
'specified column names do not start with "column_",'
"\n\nbut autogenerated header names were requested."
" but autogenerated header names were requested."
)

if projection and dtypes and isinstance(dtypes, list):
Expand Down Expand Up @@ -607,7 +607,7 @@ def read_csv_batched(
if len(columns) < len(new_columns):
raise ValueError(
"more new column names are specified than there are selected"
"\n\ncolumns"
" columns"
)

# Get column names of requested columns.
Expand All @@ -619,7 +619,7 @@ def read_csv_batched(
if columns and len(columns) < len(new_columns):
raise ValueError(
"more new column names are specified than there are selected"
"\n\ncolumns"
" columns"
)
# Convert column indices from projection to 'column_1', 'column_2', ...
# column names.
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/io/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def _open_adbc_connection(connection_uri: str) -> Any:
except ImportError:
raise ImportError(
f"ADBC {driver_name} driver not detected; if ADBC supports this database,"
f"\n\nplease run `pip install adbc-driver-{driver_name} pyarrow`"
f" please run `pip install adbc-driver-{driver_name} pyarrow`"
) from None

# some backends require the driver name to be stripped from the URI
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/io/ipc/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def read_ipc(
if use_pyarrow and n_rows and not memory_map:
raise ValueError(
"``n_rows`` cannot be used with ``use_pyarrow=True`"
"\n\nand memory_map=False`"
" and memory_map=False`"
)

storage_options = storage_options or {}
Expand Down
6 changes: 4 additions & 2 deletions py-polars/polars/lazyframe/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,11 +744,13 @@ def width(self) -> int:
def __bool__(self) -> NoReturn:
raise ValueError(
"the truth value of a LazyFrame is ambiguous; consequently it"
"\n\ncannot be used in boolean context with and/or/not operators"
" cannot be used in boolean context with and/or/not operators"
)

def _comparison_error(self, operator: str) -> NoReturn:
raise TypeError(f'"{operator!r}" comparison not supported for LazyFrame objects')
raise TypeError(
f'"{operator!r}" comparison not supported for LazyFrame objects'
)

def __eq__(self, other: Any) -> NoReturn:
self._comparison_error("==")
Expand Down
2 changes: 1 addition & 1 deletion py-polars/polars/lazyframe/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def agg(
raise ValueError(
"specifying aggregations as a dictionary is not supported."
"\n\nTry unpacking the dictionary to take advantage of the keyword syntax"
"\n\nof the `agg` method"
" of the `agg` method"
)

if "aggs" in named_aggs:
Expand Down
Loading

0 comments on commit 670cff9

Please sign in to comment.