Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DOC: Fix validation error type RT01 and check in CI (#25356) #26234

Merged
merged 21 commits into from
May 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
89e5b22
DOC: Fix validation error RT01 in pandas/core (#25356)
ihsansecer Apr 27, 2019
4ef2b93
DOC: Fix validation error RT01 in pandas/tseries (#25356)
ihsansecer Apr 27, 2019
648d156
DOC: Fix validation error RT01 in pandas/core/arrays (#25356)
ihsansecer Apr 27, 2019
9498481
DOC: Fix validation error RT01 in pandas/core/dtypes (#25356)
ihsansecer Apr 27, 2019
7967414
DOC: Fix validation error RT01 in pandas/plotting (#25356)
ihsansecer Apr 27, 2019
ac4e339
DOC: Fix validation error RT01 in pandas/util (#25356)
ihsansecer Apr 27, 2019
e1ae56c
DOC: Fix validation error RT01 in pandas/io (#25356)
ihsansecer Apr 27, 2019
dd105a6
DOC: Fix remaining validation errors RT01 (#25356)
ihsansecer Apr 28, 2019
a107468
DOC: Fix validation error RT01 for itertuples function (#25356)
ihsansecer Apr 28, 2019
c7124e7
DOC: Update the code_check.sh script to take into account the RT01 ty…
ihsansecer Apr 28, 2019
26a3dc5
DOC: Remove repeating return description (#25356)
ihsansecer Apr 28, 2019
5920ad2
DOC: Remove whitespaces (#25356)
ihsansecer Apr 28, 2019
484b69b
DOC: Fix introduced docstring validation errors (#25356)
ihsansecer Apr 28, 2019
dc209be
DOC: Add period to the end of first line where missing (#26234)
ihsansecer Apr 29, 2019
a011fa4
DOC: Change boolean to bool (#26234)
ihsansecer Apr 29, 2019
85a82c4
DOC: Remove returned variable names from docstring (#26234)
ihsansecer Apr 29, 2019
bd21eea
DOC: Fix typo #26234
datapythonista May 1, 2019
d5f80f7
DOC: Make minor style changes (#26234)
ihsansecer May 1, 2019
d32306c
DOC: Remove return section from None returning functions (#26234)
ihsansecer May 7, 2019
dc42367
Merge remote-tracking branch 'upstream/master' into rt01-validation-e…
ihsansecer May 7, 2019
331a5df
DOC: Resolve conflicts (#26234)
ihsansecer May 7, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -261,8 +261,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then

MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05
MSG='Validate docstrings (GL03, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG
$BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05
RET=$(($RET + $?)) ; echo $MSG "DONE"

fi
Expand Down
5 changes: 5 additions & 0 deletions pandas/core/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,11 @@ def decorator(accessor):
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.

Returns
-------
callable
A class decorator.

See Also
--------
%(others)s
Expand Down
14 changes: 14 additions & 0 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,10 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.

Returns
-------
Categorical

Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
Expand Down Expand Up @@ -756,6 +760,11 @@ def as_ordered(self, inplace=False):
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.

Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
Expand All @@ -769,6 +778,11 @@ def as_unordered(self, inplace=False):
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.

Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
Expand Down
8 changes: 8 additions & 0 deletions pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,10 @@ def _from_factorized(cls, values, original):

.. versionadded:: 0.23.0

Returns
-------
%(klass)s

See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
Expand Down Expand Up @@ -383,6 +387,10 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None):

..versionadded:: 0.23.0

Returns
-------
%(klass)s

See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
Expand Down
18 changes: 18 additions & 0 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,6 +656,10 @@ class IndexOpsMixin:
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.

Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
Expand Down Expand Up @@ -696,6 +700,11 @@ def ndim(self):
def item(self):
"""
Return the first element of the underlying data as a python scalar.

Returns
-------
scalar
The first element of %(klass)s.
"""
return self.values.item()

Expand Down Expand Up @@ -1022,6 +1031,11 @@ def argmax(self, axis=None, skipna=True):
Dummy argument for consistency with Series
skipna : bool, default True

Returns
-------
numpy.ndarray
Indices of the maximum values.

See Also
--------
numpy.ndarray.argmax
Expand Down Expand Up @@ -1122,6 +1136,10 @@ def __iter__(self):
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)

Returns
-------
iterator
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
Expand Down
5 changes: 5 additions & 0 deletions pandas/core/dtypes/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ def register_extension_dtype(cls):
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.

Returns
-------
callable
A class decorator.

Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
Expand Down
4 changes: 4 additions & 0 deletions pandas/core/dtypes/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,10 @@ def is_hashable(obj):
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.

Returns
-------
bool

Examples
--------
>>> a = ([],)
Expand Down
19 changes: 14 additions & 5 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -839,12 +839,12 @@ def itertuples(self, index=True, name="Pandas"):
The name of the returned namedtuples or None to return regular
tuples.

Yields
Returns
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
iterator
ihsansecer marked this conversation as resolved.
Show resolved Hide resolved
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.

See Also
--------
Expand Down Expand Up @@ -3651,6 +3651,10 @@ def lookup(self, row_labels, col_labels):
col_labels : sequence
The column labels to use for lookup

Returns
-------
numpy.ndarray

Notes
-----
Akin to::
Expand Down Expand Up @@ -6053,6 +6057,11 @@ def unstack(self, level=-1, fill_value=None):
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.

Returns
-------
DataFrame
Unpivoted DataFrame.

See Also
--------
%(other)s
Expand Down
48 changes: 47 additions & 1 deletion pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1518,6 +1518,11 @@ def bool(self):
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean

Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
Expand Down Expand Up @@ -1845,14 +1850,26 @@ def __hash__(self):
' hashed'.format(self.__class__.__name__))

def __iter__(self):
"""Iterate over info axis"""
"""
Iterate over info axis.

Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)

# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)

This is index for Series, columns for DataFrame.

Returns
-------
Index
Info axis.
"""
return self._info_axis

Expand Down Expand Up @@ -1946,6 +1963,11 @@ def __array_wrap__(self, result, context=None):
def to_dense(self):
"""
Return dense representation of NDFrame (as opposed to sparse).

Returns
-------
%(klass)s
Dense %(klass)s.
"""
# compat
return self
Expand Down Expand Up @@ -2238,6 +2260,12 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,

.. versionadded:: 0.23.0

Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.

See Also
--------
read_json
Expand Down Expand Up @@ -2418,6 +2446,12 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)

Returns
-------
None or str
ihsansecer marked this conversation as resolved.
Show resolved Hide resolved
If path_or_buf is None, returns the resulting msgpack format as a
string. Otherwise returns None.
"""

from pandas.io import packers
Expand Down Expand Up @@ -6167,13 +6201,23 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.

Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)

def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.

Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
Expand Down Expand Up @@ -9313,6 +9357,8 @@ def tz_convert(self, tz, axis=0, level=None, copy=True):

Returns
-------
%(klass)s
Object with time zone converted axis.

Raises
------
Expand Down
23 changes: 21 additions & 2 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -999,6 +999,11 @@ def true_and_notna(x, *args, **kwargs):
def nunique(self, dropna=True):
"""
Return number of unique elements in the group.

Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info

Expand Down Expand Up @@ -1181,7 +1186,14 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
return Series(out, index=mi, name=self._selection_name)

def count(self):
""" Compute count of group, excluding missing values """
"""
Compute count of group, excluding missing values.

Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()

Expand Down Expand Up @@ -1479,7 +1491,14 @@ def _fill(self, direction, limit=None):
return concat((self._wrap_transformed_output(output), res), axis=1)

def count(self):
""" Compute count of group, excluding missing values """
"""
Compute count of group, excluding missing values.

Returns
-------
DataFrame
Count of values within each group.
"""
from pandas.core.dtypes.missing import _isna_ndarraylike as _isna

data, _ = self._get_data_to_aggregate()
Expand Down
Loading