From 21705e61503fb49f000186c0d556e5623bd5ac82 Mon Sep 17 00:00:00 2001 From: crusaderky Date: Tue, 1 Oct 2019 19:13:55 +0100 Subject: [PATCH] Revisit # noqa annotations (#3359) --- asv_bench/benchmarks/__init__.py | 2 +- asv_bench/benchmarks/dataarray_missing.py | 2 +- doc/examples/_code/weather_data_setup.py | 2 +- doc/gallery/plot_cartopy_facetgrid.py | 2 +- setup.py | 2 +- xarray/backends/api.py | 73 ++++++++++----------- xarray/backends/locks.py | 4 +- xarray/conventions.py | 2 +- xarray/core/alignment.py | 16 ++--- xarray/core/common.py | 10 +-- xarray/core/dataarray.py | 2 +- xarray/core/dataset.py | 8 +-- xarray/core/indexing.py | 4 +- xarray/core/merge.py | 24 +++---- xarray/core/rolling_exp.py | 2 +- xarray/testing.py | 12 ++-- xarray/tests/__init__.py | 6 +- xarray/tests/test_accessor_str.py | 4 +- xarray/tests/test_backends.py | 8 +-- xarray/tests/test_coding_times.py | 4 +- xarray/tests/test_dataarray.py | 44 ++++++------- xarray/tests/test_dataset.py | 4 +- xarray/tests/test_distributed.py | 13 ++-- xarray/tests/test_indexing.py | 80 +++++++++++------------ xarray/tests/test_sparse.py | 10 +-- xarray/tests/test_ufuncs.py | 2 +- xarray/tests/test_variable.py | 2 +- 27 files changed, 167 insertions(+), 177 deletions(-) diff --git a/asv_bench/benchmarks/__init__.py b/asv_bench/benchmarks/__init__.py index ef647c5a74a..1ffd3afa4ae 100644 --- a/asv_bench/benchmarks/__init__.py +++ b/asv_bench/benchmarks/__init__.py @@ -16,7 +16,7 @@ def decorator(func): def requires_dask(): try: - import dask # noqa + import dask # noqa: F401 except ImportError: raise NotImplementedError diff --git a/asv_bench/benchmarks/dataarray_missing.py b/asv_bench/benchmarks/dataarray_missing.py index 9711e2bbcd0..d79d2558b35 100644 --- a/asv_bench/benchmarks/dataarray_missing.py +++ b/asv_bench/benchmarks/dataarray_missing.py @@ -5,7 +5,7 @@ from . import randn, requires_dask try: - import dask # noqa + import dask # noqa: F401 except ImportError: pass diff --git a/doc/examples/_code/weather_data_setup.py b/doc/examples/_code/weather_data_setup.py index 385f5366ef7..4e4e2ab176e 100644 --- a/doc/examples/_code/weather_data_setup.py +++ b/doc/examples/_code/weather_data_setup.py @@ -1,6 +1,6 @@ import numpy as np import pandas as pd -import seaborn as sns # noqa, pandas aware plotting library +import seaborn as sns import xarray as xr diff --git a/doc/gallery/plot_cartopy_facetgrid.py b/doc/gallery/plot_cartopy_facetgrid.py index af04ad6856a..11db9b800b5 100644 --- a/doc/gallery/plot_cartopy_facetgrid.py +++ b/doc/gallery/plot_cartopy_facetgrid.py @@ -12,7 +12,7 @@ For more details see `this discussion`_ on github. .. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567 -""" # noqa +""" import cartopy.crs as ccrs diff --git a/setup.py b/setup.py index b829f6e1f98..5cfa4d9f9df 100644 --- a/setup.py +++ b/setup.py @@ -86,7 +86,7 @@ - Issue tracker: http://github.com/pydata/xarray/issues - Source code: http://github.com/pydata/xarray - SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk -""" # noqa +""" setup( diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 0d6dedac57e..458a2d0cc42 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -42,12 +42,12 @@ def _get_default_engine_remote_uri(): try: - import netCDF4 # noqa + import netCDF4 # noqa: F401 engine = "netcdf4" except ImportError: # pragma: no cover try: - import pydap # noqa + import pydap # noqa: F401 engine = "pydap" except ImportError: @@ -61,13 +61,13 @@ def _get_default_engine_remote_uri(): def _get_default_engine_grib(): msgs = [] try: - import Nio # noqa + import Nio # noqa: F401 msgs += ["set engine='pynio' to access GRIB files with PyNIO"] except ImportError: # pragma: no cover pass try: - import cfgrib # noqa + import cfgrib # noqa: F401 msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"] except ImportError: # pragma: no cover @@ -80,7 +80,7 @@ def _get_default_engine_grib(): def _get_default_engine_gz(): try: - import scipy # noqa + import scipy # noqa: F401 engine = "scipy" except ImportError: # pragma: no cover @@ -90,12 +90,12 @@ def _get_default_engine_gz(): def _get_default_engine_netcdf(): try: - import netCDF4 # noqa + import netCDF4 # noqa: F401 engine = "netcdf4" except ImportError: # pragma: no cover try: - import scipy.io.netcdf # noqa + import scipy.io.netcdf # noqa: F401 engine = "scipy" except ImportError: @@ -722,44 +722,41 @@ def open_mfdataset( ): """Open multiple files as a single dataset. - If combine='by_coords' then the function ``combine_by_coords`` is used to - combine the datasets into one before returning the result, and if - combine='nested' then ``combine_nested`` is used. The filepaths must be - structured according to which combining function is used, the details of - which are given in the documentation for ``combine_by_coords`` and - ``combine_nested``. By default the old (now deprecated) ``auto_combine`` - will be used, please specify either ``combine='by_coords'`` or - ``combine='nested'`` in future. Requires dask to be installed. See - documentation for details on dask [1]. Attributes from the first dataset - file are used for the combined dataset. + If combine='by_coords' then the function ``combine_by_coords`` is used to combine + the datasets into one before returning the result, and if combine='nested' then + ``combine_nested`` is used. The filepaths must be structured according to which + combining function is used, the details of which are given in the documentation for + ``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated) + ``auto_combine`` will be used, please specify either ``combine='by_coords'`` or + ``combine='nested'`` in future. Requires dask to be installed. See documentation for + details on dask [1]. Attributes from the first dataset file are used for the + combined dataset. Parameters ---------- paths : str or sequence - Either a string glob in the form "path/to/my/files/*.nc" or an explicit - list of files to open. Paths can be given as strings or as pathlib - Paths. If concatenation along more than one dimension is desired, then - ``paths`` must be a nested list-of-lists (see ``manual_combine`` for - details). (A string glob will be expanded to a 1-dimensional list.) + Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of + files to open. Paths can be given as strings or as pathlib Paths. If + concatenation along more than one dimension is desired, then ``paths`` must be a + nested list-of-lists (see ``manual_combine`` for details). (A string glob will + be expanded to a 1-dimensional list.) chunks : int or dict, optional - Dictionary with keys given by dimension names and values given by chunk - sizes. In general, these should divide the dimensions of each dataset. - If int, chunk each dimension by ``chunks``. - By default, chunks will be chosen to load entire input files into - memory at once. This has a major impact on performance: please see the - full documentation for more details [2]. + Dictionary with keys given by dimension names and values given by chunk sizes. + In general, these should divide the dimensions of each dataset. If int, chunk + each dimension by ``chunks``. By default, chunks will be chosen to load entire + input files into memory at once. This has a major impact on performance: please + see the full documentation for more details [2]. concat_dim : str, or list of str, DataArray, Index or None, optional - Dimensions to concatenate files along. You only - need to provide this argument if any of the dimensions along which you - want to concatenate is not a dimension in the original datasets, e.g., - if you want to stack a collection of 2D arrays along a third dimension. - Set ``concat_dim=[..., None, ...]`` explicitly to + Dimensions to concatenate files along. You only need to provide this argument + if any of the dimensions along which you want to concatenate is not a dimension + in the original datasets, e.g., if you want to stack a collection of 2D arrays + along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a particular dimension. combine : {'by_coords', 'nested'}, optional - Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is - used to combine all the data. If this argument is not provided, - `xarray.auto_combine` is used, but in the future this behavior will - switch to use `xarray.combine_by_coords` by default. + Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to + combine all the data. If this argument is not provided, `xarray.auto_combine` is + used, but in the future this behavior will switch to use + `xarray.combine_by_coords` by default. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional String indicating how to compare variables of the same name for @@ -854,7 +851,7 @@ def open_mfdataset( .. [1] http://xarray.pydata.org/en/stable/dask.html .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance - """ # noqa + """ if isinstance(paths, str): if is_remote_uri(paths): raise ValueError( diff --git a/xarray/backends/locks.py b/xarray/backends/locks.py index 1c5edc215fc..865ce1ddccd 100644 --- a/xarray/backends/locks.py +++ b/xarray/backends/locks.py @@ -21,9 +21,7 @@ NETCDFC_LOCK = SerializableLock() -_FILE_LOCKS = ( - weakref.WeakValueDictionary() -) # type: MutableMapping[Any, threading.Lock] # noqa +_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] def _get_threaded_lock(key): diff --git a/xarray/conventions.py b/xarray/conventions.py index 1e40d254e96..a3424db66ee 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -753,7 +753,7 @@ def cf_encoder(variables, attributes): for var in new_vars.values(): bounds = var.attrs["bounds"] if "bounds" in var.attrs else None if bounds and bounds in new_vars: - # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries # noqa + # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries for attr in [ "units", "standard_name", diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index c26b879d839..4529fa509d9 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -13,8 +13,8 @@ from .variable import IndexVariable, Variable if TYPE_CHECKING: - from .dataarray import DataArray # noqa: F401 - from .dataset import Dataset # noqa: F401 + from .dataarray import DataArray + from .dataset import Dataset def _get_joiner(join): @@ -350,8 +350,8 @@ def deep_align( This function is not public API. """ - from .dataarray import DataArray # noqa: F811 - from .dataset import Dataset # noqa: F811 + from .dataarray import DataArray + from .dataset import Dataset if indexes is None: indexes = {} @@ -411,7 +411,7 @@ def is_alignable(obj): def reindex_like_indexers( - target: Union["DataArray", "Dataset"], other: Union["DataArray", "Dataset"] + target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]" ) -> Dict[Hashable, pd.Index]: """Extract indexers to align target with other. @@ -503,7 +503,7 @@ def reindex_variables( new_indexes : OrderedDict Dict of indexes associated with the reindexed variables. """ - from .dataarray import DataArray # noqa: F811 + from .dataarray import DataArray # create variables for the new dataset reindexed = OrderedDict() # type: OrderedDict[Any, Variable] @@ -600,8 +600,8 @@ def _get_broadcast_dims_map_common_coords(args, exclude): def _broadcast_helper(arg, exclude, dims_map, common_coords): - from .dataarray import DataArray # noqa: F811 - from .dataset import Dataset # noqa: F811 + from .dataarray import DataArray + from .dataset import Dataset def _set_dims(var): # Add excluded dims to a copy of dims_map diff --git a/xarray/core/common.py b/xarray/core/common.py index 5b166890575..a8fac245c02 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -293,7 +293,7 @@ def _ipython_key_completions_(self) -> List[str]: """Provide method for the key-autocompletions in IPython. See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion For the details. - """ # noqa + """ item_lists = [ item for sublist in self._item_sources @@ -669,7 +669,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None): -------- core.groupby.DataArrayGroupBy core.groupby.DatasetGroupBy - """ # noqa + """ return self._groupby_cls( self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims ) @@ -732,7 +732,7 @@ def groupby_bins( References ---------- .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html - """ # noqa + """ return self._groupby_cls( self, group, @@ -808,7 +808,7 @@ def rolling( -------- core.rolling.DataArrayRolling core.rolling.DatasetRolling - """ # noqa + """ dim = either_dict_or_kwargs(dim, window_kwargs, "rolling") return self._rolling_cls(self, dim, min_periods=min_periods, center=center) @@ -1005,7 +1005,7 @@ def resample( ---------- .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases - """ # noqa + """ # TODO support non-string indexer after removing the old API. from .dataarray import DataArray diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 3becce7e432..68bfe301bfc 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -3054,7 +3054,7 @@ def integrate( return self._from_temp_dataset(ds) # this needs to be at the end, or mypy will confuse with `str` - # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names # noqa + # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names str = property(StringAccessor) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 5fa96216ba0..9a1339cf528 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1063,7 +1063,7 @@ def copy(self, deep: bool = False, data: Mapping = None) -> "Dataset": See Also -------- pandas.DataFrame.copy - """ # noqa + """ if data is None: variables = OrderedDict( (k, v.copy(deep=deep)) for k, v in self._variables.items() @@ -1714,7 +1714,7 @@ def chunk( from dask.base import tokenize except ImportError: # raise the usual error if dask is entirely missing - import dask # noqa + import dask # noqa: F401 raise ImportError("xarray requires dask version 0.9 or newer") @@ -4178,7 +4178,7 @@ def apply( Data variables: foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948 bar (x) float64 1.0 2.0 - """ # noqa + """ variables = OrderedDict( (k, maybe_wrap_array(v, func(v, *args, **kwargs))) for k, v in self.data_vars.items() @@ -5381,7 +5381,7 @@ def filter_by_attrs(self, **kwargs): temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ... precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ... - """ # noqa + """ selection = [] for var_name, variable in self.variables.items(): has_value_flag = False diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index c6a8f6f35e4..6d42c254438 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -331,7 +331,7 @@ class ExplicitIndexer: __slots__ = ("_key",) def __init__(self, key): - if type(self) is ExplicitIndexer: # noqa + if type(self) is ExplicitIndexer: raise TypeError("cannot instantiate base ExplicitIndexer objects") self._key = tuple(key) @@ -1261,7 +1261,7 @@ def _indexing_array_and_key(self, key): array = self.array # We want 0d slices rather than scalars. This is achieved by # appending an ellipsis (see - # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes). # noqa + # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes). key = key.tuple + (Ellipsis,) else: raise TypeError("unexpected key type: {}".format(type(key))) diff --git a/xarray/core/merge.py b/xarray/core/merge.py index ca753c588d4..ceeb7db09f1 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -94,7 +94,7 @@ def unique_variable(name, variables, compat="broadcast_equals", equals=None): Raises ------ MergeError: if any of the variables are not equal. - """ # noqa + """ out = variables[0] if len(variables) == 1 or compat == "override": @@ -171,7 +171,7 @@ def merge_variables( OrderedDict with keys taken by the union of keys on list_of_variable_dicts, and Variable values corresponding to those that should be found on the merged result. - """ # noqa + """ if priority_vars is None: priority_vars = {} @@ -231,7 +231,7 @@ def expand_variable_dicts( an input's values. The values of each ordered dictionary are all xarray.Variable objects. """ - from .dataarray import DataArray # noqa: F811 + from .dataarray import DataArray from .dataset import Dataset var_dicts = [] @@ -278,7 +278,7 @@ def determine_coords( All variable found in the input should appear in either the set of coordinate or non-coordinate names. """ - from .dataarray import DataArray # noqa: F811 + from .dataarray import DataArray from .dataset import Dataset coord_names = set() # type: set @@ -313,7 +313,7 @@ def coerce_pandas_values(objects: Iterable["DatasetLike"]) -> List["DatasetLike" List of Dataset or OrderedDict objects. Any inputs or values in the inputs that were pandas objects have been converted into native xarray objects. """ - from .dataarray import DataArray # noqa: F811 + from .dataarray import DataArray from .dataset import Dataset out = [] @@ -363,7 +363,7 @@ def _get_priority_vars(objects, priority_arg, compat="equals"): ------- None, if priority_arg is None, or an OrderedDict with Variable objects as values indicating priority variables. - """ # noqa + """ if priority_arg is None: priority_vars = {} else: @@ -485,7 +485,7 @@ def merge_core( Raises ------ MergeError if the merge cannot be done successfully. - """ # noqa + """ from .dataset import calculate_dimensions _assert_compat_valid(compat) @@ -592,7 +592,7 @@ def merge(objects, compat="no_conflicts", join="outer", fill_value=dtypes.NA): Coordinates: * lat (lat) float64 35.0 40.0 * lon (lon) float64 100.0 120.0 - + >>> y array([[5., 6.], @@ -632,7 +632,7 @@ def merge(objects, compat="no_conflicts", join="outer", fill_value=dtypes.NA): var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0 - + >>> xr.merge([x, y, z], compat='equals') Dimensions: (lat: 3, lon: 3, time: 2) @@ -718,8 +718,8 @@ def merge(objects, compat="no_conflicts", join="outer", fill_value=dtypes.NA): See also -------- concat - """ # noqa - from .dataarray import DataArray # noqa: F811 + """ + from .dataarray import DataArray from .dataset import Dataset dict_like_objects = list() @@ -793,7 +793,7 @@ def dataset_update_method( `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068, GH2180). """ - from .dataarray import DataArray # noqa: F811 + from .dataarray import DataArray from .dataset import Dataset if not isinstance(other, Dataset): diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 057884fef85..2139d246f46 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -81,7 +81,7 @@ class RollingExp: Returns ------- RollingExp : type of input argument - """ # noqa + """ def __init__(self, obj, windows, window_type="span"): self.obj = obj diff --git a/xarray/testing.py b/xarray/testing.py index 9fa58b64001..787ec1aadb0 100644 --- a/xarray/testing.py +++ b/xarray/testing.py @@ -50,8 +50,8 @@ def assert_equal(a, b): assert_identical, assert_allclose, Dataset.equals, DataArray.equals, numpy.testing.assert_array_equal """ - __tracebackhide__ = True # noqa: F841 - assert type(a) == type(b) # noqa + __tracebackhide__ = True + assert type(a) == type(b) if isinstance(a, (Variable, DataArray)): assert a.equals(b), formatting.diff_array_repr(a, b, "equals") elif isinstance(a, Dataset): @@ -77,8 +77,8 @@ def assert_identical(a, b): -------- assert_equal, assert_allclose, Dataset.equals, DataArray.equals """ - __tracebackhide__ = True # noqa: F841 - assert type(a) == type(b) # noqa + __tracebackhide__ = True + assert type(a) == type(b) if isinstance(a, Variable): assert a.identical(b), formatting.diff_array_repr(a, b, "identical") elif isinstance(a, DataArray): @@ -115,8 +115,8 @@ def assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True): -------- assert_identical, assert_equal, numpy.testing.assert_allclose """ - __tracebackhide__ = True # noqa: F841 - assert type(a) == type(b) # noqa + __tracebackhide__ = True + assert type(a) == type(b) kwargs = dict(rtol=rtol, atol=atol, decode_bytes=decode_bytes) if isinstance(a, Variable): assert a.dims == b.dims diff --git a/xarray/tests/__init__.py b/xarray/tests/__init__.py index ab1d2714b9d..4f5a3e37888 100644 --- a/xarray/tests/__init__.py +++ b/xarray/tests/__init__.py @@ -4,7 +4,7 @@ import warnings from contextlib import contextmanager from distutils import version -from unittest import mock # noqa +from unittest import mock # noqa: F401 import numpy as np import pytest @@ -12,7 +12,7 @@ import xarray.testing from xarray.core import utils -from xarray.core.duck_array_ops import allclose_or_equiv # noqa +from xarray.core.duck_array_ops import allclose_or_equiv # noqa: F401 from xarray.core.indexing import ExplicitlyIndexed from xarray.core.options import set_options from xarray.plot.utils import import_seaborn @@ -127,7 +127,7 @@ def LooseVersion(vstring): @contextmanager def raises_regex(error, pattern): - __tracebackhide__ = True # noqa: F841 + __tracebackhide__ = True with pytest.raises(error) as excinfo: yield message = str(excinfo.value) diff --git a/xarray/tests/test_accessor_str.py b/xarray/tests/test_accessor_str.py index 360653b229b..56bf6dbb3a2 100644 --- a/xarray/tests/test_accessor_str.py +++ b/xarray/tests/test_accessor_str.py @@ -142,14 +142,14 @@ def test_replace(dtype): def test_replace_callable(): values = xr.DataArray(["fooBAD__barBAD"]) # test with callable - repl = lambda m: m.group(0).swapcase() # noqa + repl = lambda m: m.group(0).swapcase() result = values.str.replace("[a-z][A-Z]{2}", repl, n=2) exp = xr.DataArray(["foObaD__baRbaD"]) assert_equal(result, exp) # test regex named groups values = xr.DataArray(["Foo Bar Baz"]) pat = r"(?P\w+) (?P\w+) (?P\w+)" - repl = lambda m: m.group("middle").swapcase() # noqa + repl = lambda m: m.group("middle").swapcase() result = values.str.replace(pat, repl) exp = xr.DataArray(["bAR"]) assert_equal(result, exp) diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 04801a64c7f..87958824c7b 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -2861,11 +2861,9 @@ def test_encoding_mfdataset(self): ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset([tmp1, tmp2], combine="nested") as actual: - assert ( - actual.t.encoding["units"] == original.t.encoding["units"] - ) # noqa - assert actual.t.encoding["units"] == ds1.t.encoding["units"] # noqa - assert actual.t.encoding["units"] != ds2.t.encoding["units"] # noqa + assert actual.t.encoding["units"] == original.t.encoding["units"] + assert actual.t.encoding["units"] == ds1.t.encoding["units"] + assert actual.t.encoding["units"] != ds2.t.encoding["units"] def test_preprocess_mfdataset(self): original = Dataset({"foo": ("x", np.random.randn(10))}) diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py index 615a7e00172..a778ff8147f 100644 --- a/xarray/tests/test_coding_times.py +++ b/xarray/tests/test_coding_times.py @@ -738,9 +738,7 @@ def test_encode_time_bounds(): with pytest.raises(AssertionError): assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs - assert ( - encoded["time_bounds"].attrs["units"] == ds.time_bounds.encoding["units"] - ) # noqa + assert encoded["time_bounds"].attrs["units"] == ds.time_bounds.encoding["units"] ds.time.encoding = {} with pytest.warns(UserWarning): diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 9ba3eecc5a0..717025afb23 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -486,32 +486,32 @@ def test_getitem(self): assert_identical(self.ds["x"], x) assert_identical(self.ds["y"], y) - I = ReturnItem() # noqa + arr = ReturnItem() for i in [ - I[:], - I[...], - I[x.values], - I[x.variable], - I[x], - I[x, y], - I[x.values > -1], - I[x.variable > -1], - I[x > -1], - I[x > -1, y > -1], + arr[:], + arr[...], + arr[x.values], + arr[x.variable], + arr[x], + arr[x, y], + arr[x.values > -1], + arr[x.variable > -1], + arr[x > -1], + arr[x > -1, y > -1], ]: assert_equal(self.dv, self.dv[i]) for i in [ - I[0], - I[:, 0], - I[:3, :2], - I[x.values[:3]], - I[x.variable[:3]], - I[x[:3]], - I[x[:3], y[:4]], - I[x.values > 3], - I[x.variable > 3], - I[x > 3], - I[x > 3, y > 3], + arr[0], + arr[:, 0], + arr[:3, :2], + arr[x.values[:3]], + arr[x.variable[:3]], + arr[x[:3]], + arr[x[:3], y[:4]], + arr[x.values > 3], + arr[x.variable > 3], + arr[x > 3], + arr[x > 3, y > 3], ]: assert_array_equal(self.v[i], self.dv[i]) diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index f02990a1be9..5d856c9f323 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -557,7 +557,7 @@ def test_properties(self): # change them inadvertently: assert isinstance(ds.dims, utils.Frozen) assert isinstance(ds.dims.mapping, utils.SortedKeysDict) - assert type(ds.dims.mapping.mapping) is dict # noqa + assert type(ds.dims.mapping.mapping) is dict assert list(ds) == list(ds.data_vars) assert list(ds.keys()) == list(ds.data_vars) @@ -2359,7 +2359,7 @@ def test_rename(self): renamed[k].variable.to_base_variable(), ) assert v.encoding == renamed[k].encoding - assert type(v) == type(renamed.variables[k]) # noqa: E721 + assert type(v) is type(renamed.variables[k]) # noqa: E721 assert "var1" not in renamed assert "dim2" not in renamed diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index 0929efc56f2..a3bea6db85f 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -1,5 +1,4 @@ """ isort:skip_file """ -# flake8: noqa: E402 - ignore linters re order of imports import pickle import pytest @@ -9,7 +8,7 @@ from dask.distributed import Client, Lock from distributed.utils_test import cluster, gen_cluster -from distributed.utils_test import loop # noqa +from distributed.utils_test import loop from distributed.client import futures_of import xarray as xr @@ -74,7 +73,7 @@ def tmp_netcdf_filename(tmpdir): ] -@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) # noqa +@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_netcdf_roundtrip( loop, tmp_netcdf_filename, engine, nc_format ): @@ -106,7 +105,7 @@ def test_dask_distributed_netcdf_roundtrip( assert_allclose(original, computed) -@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) # noqa +@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_read_netcdf_integration_test( loop, tmp_netcdf_filename, engine, nc_format ): @@ -130,7 +129,7 @@ def test_dask_distributed_read_netcdf_integration_test( assert_allclose(original, computed) -@requires_zarr # noqar +@requires_zarr @pytest.mark.parametrize("consolidated", [True, False]) @pytest.mark.parametrize("compute", [True, False]) def test_dask_distributed_zarr_integration_test(loop, consolidated, compute): @@ -158,7 +157,7 @@ def test_dask_distributed_zarr_integration_test(loop, consolidated, compute): assert_allclose(original, computed) -@requires_rasterio # noqa +@requires_rasterio def test_dask_distributed_rasterio_integration_test(loop): with create_tmp_geotiff() as (tmp_file, expected): with cluster() as (s, [a, b]): @@ -169,7 +168,7 @@ def test_dask_distributed_rasterio_integration_test(loop): assert_allclose(actual, expected) -@requires_cfgrib # noqa +@requires_cfgrib def test_dask_distributed_cfgrib_integration_test(loop): with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py index 82ee9b63f9d..ba108b2dbaf 100644 --- a/xarray/tests/test_indexing.py +++ b/xarray/tests/test_indexing.py @@ -21,24 +21,24 @@ def set_to_zero(self, x, i): def test_expanded_indexer(self): x = np.random.randn(10, 11, 12, 13, 14) y = np.arange(5) - I = ReturnItem() # noqa + arr = ReturnItem() for i in [ - I[:], - I[...], - I[0, :, 10], - I[..., 10], - I[:5, ..., 0], - I[..., 0, :], - I[y], - I[y, y], - I[..., y, y], - I[..., 0, 1, 2, 3, 4], + arr[:], + arr[...], + arr[0, :, 10], + arr[..., 10], + arr[:5, ..., 0], + arr[..., 0, :], + arr[y], + arr[y, y], + arr[..., y, y], + arr[..., 0, 1, 2, 3, 4], ]: j = indexing.expanded_indexer(i, x.ndim) assert_array_equal(x[i], x[j]) assert_array_equal(self.set_to_zero(x, i), self.set_to_zero(x, j)) with raises_regex(IndexError, "too many indices"): - indexing.expanded_indexer(I[1, 2, 3], 2) + indexing.expanded_indexer(arr[1, 2, 3], 2) def test_asarray_tuplesafe(self): res = indexing._asarray_tuplesafe(("a", 1)) @@ -184,27 +184,27 @@ def test_read_only_view(self): class TestLazyArray: def test_slice_slice(self): - I = ReturnItem() # noqa: E741 # allow ambiguous name + arr = ReturnItem() for size in [100, 99]: # We test even/odd size cases x = np.arange(size) slices = [ - I[:3], - I[:4], - I[2:4], - I[:1], - I[:-1], - I[5:-1], - I[-5:-1], - I[::-1], - I[5::-1], - I[:3:-1], - I[:30:-1], - I[10:4:], - I[::4], - I[4:4:4], - I[:4:-4], - I[::-2], + arr[:3], + arr[:4], + arr[2:4], + arr[:1], + arr[:-1], + arr[5:-1], + arr[-5:-1], + arr[::-1], + arr[5::-1], + arr[:3:-1], + arr[:30:-1], + arr[10:4:], + arr[::4], + arr[4:4:4], + arr[:4:-4], + arr[::-2], ] for i in slices: for j in slices: @@ -219,9 +219,9 @@ def test_lazily_indexed_array(self): v = Variable(["i", "j", "k"], original) lazy = indexing.LazilyOuterIndexedArray(x) v_lazy = Variable(["i", "j", "k"], lazy) - I = ReturnItem() # noqa: E741 # allow ambiguous name + arr = ReturnItem() # test orthogonally applied indexers - indexers = [I[:], 0, -2, I[:3], [0, 1, 2, 3], [0], np.arange(10) < 5] + indexers = [arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0], np.arange(10) < 5] for i in indexers: for j in indexers: for k in indexers: @@ -252,12 +252,12 @@ def test_lazily_indexed_array(self): # test sequentially applied indexers indexers = [ (3, 2), - (I[:], 0), - (I[:2], -1), - (I[:4], [0]), + (arr[:], 0), + (arr[:2], -1), + (arr[:4], [0]), ([4, 5], 0), ([0, 1, 2], [0, 1]), - ([0, 3, 5], I[:2]), + ([0, 3, 5], arr[:2]), ] for i, j in indexers: expected = v[i][j] @@ -288,7 +288,7 @@ def test_vectorized_lazily_indexed_array(self): v_eager = Variable(["i", "j", "k"], x) lazy = indexing.LazilyOuterIndexedArray(x) v_lazy = Variable(["i", "j", "k"], lazy) - I = ReturnItem() # noqa: E741 # allow ambiguous name + arr = ReturnItem() def check_indexing(v_eager, v_lazy, indexers): for indexer in indexers: @@ -307,7 +307,7 @@ def check_indexing(v_eager, v_lazy, indexers): v_lazy = actual # test orthogonal indexing - indexers = [(I[:], 0, 1), (Variable("i", [0, 1]),)] + indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]),)] check_indexing(v_eager, v_lazy, indexers) # vectorized indexing @@ -637,13 +637,13 @@ def nonzero(x): original = np.random.rand(10, 20, 30) v = Variable(["i", "j", "k"], original) - I = ReturnItem() # noqa: E741 # allow ambiguous name + arr = ReturnItem() # test orthogonally applied indexers indexers = [ - I[:], + arr[:], 0, -2, - I[:3], + arr[:3], np.array([0, 1, 2, 3]), np.array([0]), np.arange(10) < 5, diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index 9393d073cb7..4a0c6c58619 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -176,7 +176,7 @@ def test_variable_property(prop): ), param( do("pad_with_fill_value", pad_widths={"x": (1, 1)}, fill_value=5), - True, # noqa + True, marks=xfail(reason="Missing implementation for np.pad"), ), (do("prod"), False), @@ -430,7 +430,7 @@ def test_dataarray_property(prop): ), True, marks=xfail(reason="Indexing COO with more than one iterable index"), - ), # noqa + ), param(do("interpolate_na", "x"), True, marks=xfail(reason="Coercion to dense")), param( do("isin", [1, 2, 3]), @@ -477,13 +477,13 @@ def test_dataarray_property(prop): ), True, marks=xfail(reason="Indexing COO with more than one iterable index"), - ), # noqa + ), (do("roll", x=2, roll_coords=True), True), param( do("sel", x=[0, 1, 2], y=[2, 3]), True, marks=xfail(reason="Indexing COO with more than one iterable index"), - ), # noqa + ), param( do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd") ), @@ -495,7 +495,7 @@ def test_dataarray_property(prop): do("where", make_xrarray({"x": 10, "y": 5}) > 0.5), False, marks=xfail(reason="Conversion of dense to sparse when using sparse mask"), - ), # noqa + ), ], ids=repr, ) diff --git a/xarray/tests/test_ufuncs.py b/xarray/tests/test_ufuncs.py index dc8ba22f57c..1095cc360dd 100644 --- a/xarray/tests/test_ufuncs.py +++ b/xarray/tests/test_ufuncs.py @@ -12,7 +12,7 @@ def assert_identical(a, b): - assert type(a) is type(b) or (float(a) == float(b)) # noqa + assert type(a) is type(b) or float(a) == float(b) if isinstance(a, (xr.DataArray, xr.Dataset, xr.Variable)): assert_identical_(a, b) else: diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index 43551d62265..7f9538c9ea9 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -101,7 +101,7 @@ def test_getitem_1d_fancy(self): ind = Variable(("a", "b"), [[0, 1], [0, 1]]) v_new = v[ind] assert v_new.dims == ("a", "b") - expected = np.array(v._data)[([0, 1], [0, 1]),] # noqa + expected = np.array(v._data)[([0, 1], [0, 1]), ...] assert_array_equal(v_new, expected) # boolean indexing