Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove py2 compat #2645

Merged
merged 13 commits into from
Jan 25, 2019
30 changes: 15 additions & 15 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path

import numpy as np

from .. import Dataset, backends, conventions
from ..core import indexing
from ..core.combine import _auto_combine, _infer_concat_order_from_positions
from ..core.pycompat import basestring, path_type
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import ArrayWriter
from .locks import _get_scheduler
Expand Down Expand Up @@ -98,7 +98,7 @@ def _normalize_path(path):
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, basestring):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for DataArray or Dataset key: '
'string must be length 1 or greater for '
Expand All @@ -116,7 +116,7 @@ def _validate_attrs(dataset):
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, basestring):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for attr: string must be '
'length 1 or greater for serialization to '
Expand All @@ -125,7 +125,7 @@ def check_attr(name, value):
raise TypeError("Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name))

if not isinstance(value, (basestring, Number, np.ndarray, np.number,
if not isinstance(value, (str, Number, np.ndarray, np.number,
list, tuple)):
raise TypeError('Invalid value for attr: {} must be a number, '
'a string, an ndarray or a list/tuple of '
Expand Down Expand Up @@ -278,7 +278,7 @@ def maybe_decode_store(store, lock=False):
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if (isinstance(filename_or_obj, basestring) and
if (isinstance(filename_or_obj, str) and
not is_remote_uri(filename_or_obj)):
mtime = os.path.getmtime(filename_or_obj)
else:
Expand All @@ -294,13 +294,13 @@ def maybe_decode_store(store, lock=False):

return ds2

if isinstance(filename_or_obj, path_type):
if isinstance(filename_or_obj, Path):
filename_or_obj = str(filename_or_obj)

if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
ds = maybe_decode_store(store)
elif isinstance(filename_or_obj, basestring):
elif isinstance(filename_or_obj, str):

if (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'\x89HDF')):
Expand All @@ -309,7 +309,7 @@ def maybe_decode_store(store, lock=False):
filename_or_obj.startswith(b'CDF')):
# netCDF3 file images are handled by scipy
pass
elif isinstance(filename_or_obj, basestring):
elif isinstance(filename_or_obj, str):
filename_or_obj = _normalize_path(filename_or_obj)

if engine is None:
Expand Down Expand Up @@ -351,7 +351,7 @@ def maybe_decode_store(store, lock=False):

# Ensure source filename always stored in dataset object (GH issue #2550)
if 'source' not in ds.encoding:
if isinstance(filename_or_obj, basestring):
if isinstance(filename_or_obj, str):
ds.encoding['source'] = filename_or_obj

return ds
Expand Down Expand Up @@ -590,15 +590,15 @@ def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
if isinstance(paths, basestring):
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, path_type) else p for p in paths]
paths = [str(p) if isinstance(p, Path) else p for p in paths]

if not paths:
raise IOError('no files to open')
Expand Down Expand Up @@ -683,7 +683,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,

The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, path_type):
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)

if encoding is None:
Expand All @@ -700,7 +700,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
raise NotImplementedError(
'to_netcdf() with compute=False is not yet implemented when '
'returning bytes')
elif isinstance(path_or_file, basestring):
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
Expand Down Expand Up @@ -735,7 +735,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,

if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, basestring):
if isinstance(unlimited_dims, str):
unlimited_dims = [unlimited_dims]

writer = ArrayWriter()
Expand Down Expand Up @@ -898,7 +898,7 @@ def to_zarr(dataset, store=None, mode='w-', synchronizer=None, group=None,

See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, path_type):
if isinstance(store, Path):
store = str(store)
if encoding is None:
encoding = {}
Expand Down
10 changes: 5 additions & 5 deletions xarray/backends/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import dask_array_type, iteritems
from ..core.pycompat import dask_array_type
from ..core.utils import FrozenOrderedDict, NdimSizeLenMixin

# Create a logger object, but don't add any handlers. Leave that to user code.
Expand Down Expand Up @@ -109,9 +109,9 @@ class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
for k, v in variables.items()}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
for k, v in attributes.items()}
return variables, attributes

This function will be called anytime variables or attributes
Expand Down Expand Up @@ -275,7 +275,7 @@ def set_attributes(self, attributes):
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in iteritems(attributes):
for k, v in attributes.items():
self.set_attribute(k, v)

def set_variables(self, variables, check_encoding_set, writer,
Expand All @@ -297,7 +297,7 @@ def set_variables(self, variables, check_encoding_set, writer,
dimensions.
"""

for vn, v in iteritems(variables):
for vn, v in variables.items():
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
Expand Down
12 changes: 6 additions & 6 deletions xarray/backends/h5netcdf_.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from __future__ import absolute_import, division, print_function

import functools
from collections import OrderedDict

import numpy as np

from .. import Variable
from ..core import indexing
from ..core.pycompat import OrderedDict, bytes_type, iteritems, unicode_type
from ..core.utils import FrozenOrderedDict, close_on_error
from .common import WritableCFDataStore
from .file_manager import CachingFileManager
Expand All @@ -32,7 +32,7 @@ def _getitem(self, key):


def maybe_decode_bytes(txt):
if isinstance(txt, bytes_type):
if isinstance(txt, bytes):
return txt.decode('utf-8')
else:
return txt
Expand Down Expand Up @@ -124,7 +124,7 @@ def open_store_variable(self, name, var):
encoding['original_shape'] = var.shape

vlen_dtype = h5py.check_dtype(vlen=var.dtype)
if vlen_dtype is unicode_type:
if vlen_dtype is str:
encoding['dtype'] = str
elif vlen_dtype is not None: # pragma: no cover
# xarray doesn't support writing arbitrary vlen dtypes yet.
Expand All @@ -136,7 +136,7 @@ def open_store_variable(self, name, var):

def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
for k, v in self.ds.variables.items())

def get_attrs(self):
return FrozenOrderedDict(_read_attributes(self.ds))
Expand Down Expand Up @@ -182,7 +182,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
'NC_CHAR type.' % name)

if dtype is str:
dtype = h5py.special_dtype(vlen=unicode_type)
dtype = h5py.special_dtype(vlen=str)

encoding = _extract_h5nc_encoding(variable,
raise_on_invalid=check_encoding)
Expand Down Expand Up @@ -221,7 +221,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
else:
nc4_var = self.ds[name]

for k, v in iteritems(attrs):
for k, v in attrs.items():
nc4_var.attrs[k] = v

target = H5NetCDFArrayWrapper(name, self)
Expand Down
4 changes: 1 addition & 3 deletions xarray/backends/lru_cache.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import collections
import threading

from ..core.pycompat import move_to_end


class LRUCache(collections.MutableMapping):
"""Thread-safe LRUCache based on an OrderedDict.
Expand Down Expand Up @@ -41,7 +39,7 @@ def __getitem__(self, key):
# record recent use of the key by moving it to the front of the list
with self._lock:
value = self._cache[key]
move_to_end(self._cache, key)
self._cache.move_to_end(key)
return value

def _enforce_size_limit(self, capacity):
Expand Down
2 changes: 1 addition & 1 deletion xarray/backends/memory.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from __future__ import absolute_import, division, print_function

import copy
from collections import OrderedDict

import numpy as np

from ..core.pycompat import OrderedDict
from ..core.variable import Variable
from .common import AbstractWritableDataStore

Expand Down
14 changes: 6 additions & 8 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@
import functools
import operator
import warnings
from collections import OrderedDict
from contextlib import suppress
from distutils.version import LooseVersion

import numpy as np

from .. import Variable, coding
from ..coding.variables import pop_to
from ..core import indexing
from ..core.pycompat import PY3, OrderedDict, basestring, iteritems, suppress
from ..core.utils import FrozenOrderedDict, close_on_error, is_remote_uri
from .common import (
BackendArray, WritableCFDataStore, find_root, robust_getitem)
Expand Down Expand Up @@ -81,9 +82,6 @@ def _getitem(self, key):
msg = ('The indexing operation you are attempting to perform '
'is not valid on netCDF4.Variable object. Try loading '
'your data into memory first by calling .load().')
if not PY3:
import traceback
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
raise IndexError(msg)
return array

Expand Down Expand Up @@ -141,7 +139,7 @@ def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
if not isinstance(group, str):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
Expand Down Expand Up @@ -392,7 +390,7 @@ def open_store_variable(self, name, var):
def get_variables(self):
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in
iteritems(self.ds.variables))
self.ds.variables.items())
return dsvars

def get_attrs(self):
Expand All @@ -402,7 +400,7 @@ def get_attrs(self):

def get_dimensions(self):
dims = FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
for k, v in self.ds.dimensions.items())
return dims

def get_encoding(self):
Expand Down Expand Up @@ -467,7 +465,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
fill_value=fill_value)
_disable_auto_decode_variable(nc4_var)

for k, v in iteritems(attrs):
for k, v in attrs.items():
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
_set_nc_attribute(nc4_var, k, v)
Expand Down
8 changes: 4 additions & 4 deletions xarray/backends/netcdf3.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from __future__ import absolute_import, division, print_function

import unicodedata
from collections import OrderedDict

import numpy as np

from .. import Variable, coding
from ..core.pycompat import OrderedDict, basestring, unicode_type

# Special characters that are permitted in netCDF names except in the
# 0th position of the string
Expand Down Expand Up @@ -50,7 +50,7 @@ def coerce_nc3_dtype(arr):
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
elif isinstance(value, str):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
Expand Down Expand Up @@ -99,9 +99,9 @@ def is_valid_nc3_name(s):
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, basestring):
if not isinstance(s, str):
return False
if not isinstance(s, unicode_type):
if not isinstance(s, str):
s = s.decode('utf-8')
num_bytes = len(s.encode('utf-8'))
return ((unicodedata.normalize('NFC', s) == s) and
Expand Down
3 changes: 2 additions & 1 deletion xarray/backends/pseudonetcdf_.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from __future__ import absolute_import, division, print_function

from collections import OrderedDict

import numpy as np

from .. import Variable
from ..core import indexing
from ..core.pycompat import OrderedDict
from ..core.utils import Frozen, FrozenOrderedDict
from .common import AbstractDataStore, BackendArray
from .file_manager import CachingFileManager
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/rasterio_.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion

import numpy as np

from .. import DataArray
Expand Down
Loading