From 9ac1e0784a995de541d796ea60deac0da2602125 Mon Sep 17 00:00:00 2001 From: dcherian Date: Mon, 5 Jul 2021 07:40:39 -0600 Subject: [PATCH 01/13] Faster unstacking to sparse --- asv_bench/asv.conf.json | 1 + asv_bench/benchmarks/__init__.py | 7 ++++ asv_bench/benchmarks/unstacking.py | 31 +++++++++++++++++- doc/whats-new.rst | 5 +++ xarray/core/dataset.py | 10 +++--- xarray/core/variable.py | 51 ++++++++++++++++++++++-------- xarray/tests/test_dataset.py | 20 +++++++++++- 7 files changed, 105 insertions(+), 20 deletions(-) diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 83a2aa9f010..621d98c7077 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -65,6 +65,7 @@ "bottleneck": ["", null], "dask": [""], "distributed": [""], + "sparse": [""] }, diff --git a/asv_bench/benchmarks/__init__.py b/asv_bench/benchmarks/__init__.py index b0adb2feafd..cc26a6e0c48 100644 --- a/asv_bench/benchmarks/__init__.py +++ b/asv_bench/benchmarks/__init__.py @@ -21,6 +21,13 @@ def requires_dask(): raise NotImplementedError() +def requires_sparse(): + try: + import sparse # noqa: F401 + except ImportError: + raise NotImplementedError() + + def randn(shape, frac_nan=None, chunks=None, seed=0): rng = np.random.RandomState(seed) if chunks is None: diff --git a/asv_bench/benchmarks/unstacking.py b/asv_bench/benchmarks/unstacking.py index 8d0c3932870..897f63267d1 100644 --- a/asv_bench/benchmarks/unstacking.py +++ b/asv_bench/benchmarks/unstacking.py @@ -1,8 +1,9 @@ import numpy as np +import pandas as pd import xarray as xr -from . import requires_dask +from . import requires_dask, requires_sparse class Unstacking: @@ -27,3 +28,31 @@ def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.da_full = self.da_full.chunk({"flat_dim": 50}) + + +class UnstackingSparse(Unstacking): + def setup(self, *args, **kwargs): + requires_sparse() + + import sparse + + data = sparse.random((500, 1000), random_state=0, fill_value=0) + self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) + self.da_missing = self.da_full[:-1] + + mindex = pd.MultiIndex.from_arrays([np.arange(500), np.arange(500)]) + self.da_eye_2d = xr.DataArray(np.ones((500,)), dims="z", coords={"z": mindex}) + self.da_eye_3d = xr.DataArray( + np.ones((500, 50)), + dims=("z", "foo"), + coords={"z": mindex, "foo": np.arange(50)}, + ) + + def time_unstack_to_sparse_2d(self): + self.da_eye_2d.unstack(sparse=True) + + def time_unstack_to_sparse_3d(self): + self.da_eye_3d.unstack(sparse=True) + + def time_unstack_pandas_slow(self): + pass diff --git a/doc/whats-new.rst b/doc/whats-new.rst index cd65e0dbe35..fe15e153703 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -66,6 +66,11 @@ Breaking changes Deprecations ~~~~~~~~~~~~ +Performance +~~~~~~~~~~~ + +- Significantly faster unstacking to a ``sparse`` array. + By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 13da8cfad03..0d6103ceb70 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -3995,7 +3995,9 @@ def ensure_stackable(val): return data_array - def _unstack_once(self, dim: Hashable, fill_value) -> "Dataset": + def _unstack_once( + self, dim: Hashable, fill_value, sparse: bool = False + ) -> "Dataset": index = self.get_index(dim) index = remove_unused_levels_categories(index) @@ -4011,7 +4013,7 @@ def _unstack_once(self, dim: Hashable, fill_value) -> "Dataset": fill_value_ = fill_value variables[name] = var._unstack_once( - index=index, dim=dim, fill_value=fill_value_ + index=index, dim=dim, fill_value=fill_value_, sparse=sparse ) else: variables[name] = var @@ -4138,7 +4140,7 @@ def unstack( isinstance(v.data, sparse_array_type) for v in self.variables.values() ) - or sparse + # or sparse # Until https://github.com/pydata/xarray/pull/4751 is resolved, # we check explicitly whether it's a numpy array. Once that is # resolved, explicitly exclude pint arrays. @@ -4154,7 +4156,7 @@ def unstack( ): result = result._unstack_full_reindex(dim, fill_value, sparse) else: - result = result._unstack_once(dim, fill_value) + result = result._unstack_once(dim, fill_value, sparse) return result def update(self, other: "CoercibleMapping") -> "Dataset": diff --git a/xarray/core/variable.py b/xarray/core/variable.py index ace09c6f482..80b42a8a310 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1565,6 +1565,7 @@ def _unstack_once( index: pd.MultiIndex, dim: Hashable, fill_value=dtypes.NA, + sparse=False, ) -> "Variable": """ Unstacks this variable given an index to unstack and the name of the @@ -1572,14 +1573,14 @@ def _unstack_once( """ reordered = self.transpose(..., dim) - + shape = reordered.shape new_dim_sizes = [lev.size for lev in index.levels] new_dim_names = index.names indexer = index.codes # Potentially we could replace `len(other_dims)` with just `-1` other_dims = [d for d in self.dims if d != dim] - new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes) + new_shape = tuple(list(shape[: len(other_dims)]) + new_dim_sizes) new_dims = reordered.dims[: len(other_dims)] + new_dim_names if fill_value is dtypes.NA: @@ -1592,19 +1593,41 @@ def _unstack_once( else: dtype = self.dtype - data = np.full_like( - self.data, - fill_value=fill_value, - shape=new_shape, - dtype=dtype, - ) + if sparse: + # TODO: how do we allow different sparse array types + from sparse import COO + + codes = zip(*index.codes) + if not shape[:-1]: + indexes = codes + else: + sizes = itertools.product(range(*shape[:-1])) + tuple_indexes = itertools.product(sizes, codes) + indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore + + data = COO( + coords=np.array(list(indexes)).T, + data=self.data.astype(dtype).ravel(), + fill_value=fill_value, + shape=new_shape, + has_duplicates=False, + sorted=True, + ) + + else: + data = np.full_like( + self.data, + fill_value=fill_value, + shape=new_shape, + dtype=dtype, + ) - # Indexer is a list of lists of locations. Each list is the locations - # on the new dimension. This is robust to the data being sparse; in that - # case the destinations will be NaN / zero. - # sparse doesn't support item assigment, - # https://github.com/pydata/sparse/issues/114 - data[(..., *indexer)] = reordered + # Indexer is a list of lists of locations. Each list is the locations + # on the new dimension. This is robust to the data being sparse; in that + # case the destinations will be NaN / zero. + # sparse doesn't support item assigment, + # https://github.com/pydata/sparse/issues/114 + data[(..., *indexer)] = reordered return self._replace(dims=new_dims, data=data) diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index b08ce9ea730..bfce7530ba0 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -29,7 +29,7 @@ from xarray.core import dtypes, indexing, utils from xarray.core.common import duck_array_ops, full_like from xarray.core.indexes import Index -from xarray.core.pycompat import integer_types +from xarray.core.pycompat import integer_types, sparse_array_type from xarray.core.utils import is_scalar from . import ( @@ -3084,14 +3084,32 @@ def test_unstack_sparse(self): # test fill_value actual = ds.unstack("index", sparse=True) expected = ds.unstack("index") + assert isinstance(actual["var"].data, sparse_array_type) assert actual["var"].variable._to_dense().equals(expected["var"].variable) assert actual["var"].data.density < 1.0 actual = ds["var"].unstack("index", sparse=True) expected = ds["var"].unstack("index") + assert isinstance(actual.data, sparse_array_type) assert actual.variable._to_dense().equals(expected.variable) assert actual.data.density < 1.0 + mindex = pd.MultiIndex.from_arrays( + [np.arange(3), np.arange(3)], names=["a", "b"] + ) + ds_eye = Dataset( + {"var": (("z", "foo"), np.ones((3, 4)))}, + coords={"z": mindex, "foo": np.arange(4)}, + ) + actual = ds_eye.unstack(sparse=True, fill_value=0) + assert isinstance(actual["var"].data, sparse_array_type) + expected = xr.Dataset( + {"var": (("foo", "a", "b"), np.broadcast_to(np.eye(3, 3), (4, 3, 3)))}, + coords={"foo": np.arange(4), "a": np.arange(3), "b": np.arange(3)}, + ) + actual["var"].data = actual["var"].data.todense() + assert_equal(expected, actual) + def test_stack_unstack_fast(self): ds = Dataset( { From 6bd0fe7a8ce6d8490d1a2f283201abad70a469bc Mon Sep 17 00:00:00 2001 From: Deepak Cherian Date: Mon, 5 Jul 2021 11:27:10 -0600 Subject: [PATCH 02/13] Update xarray/core/variable.py --- xarray/core/variable.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 80b42a8a310..73cdc76a211 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1610,8 +1610,6 @@ def _unstack_once( data=self.data.astype(dtype).ravel(), fill_value=fill_value, shape=new_shape, - has_duplicates=False, - sorted=True, ) else: From e976ada9464126e278366fc0fba917d067ff6b8c Mon Sep 17 00:00:00 2001 From: dcherian Date: Mon, 5 Jul 2021 11:51:47 -0600 Subject: [PATCH 03/13] [skip-ci] Add memory benchmarks --- asv_bench/benchmarks/unstacking.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/asv_bench/benchmarks/unstacking.py b/asv_bench/benchmarks/unstacking.py index 897f63267d1..b7e98dac0fd 100644 --- a/asv_bench/benchmarks/unstacking.py +++ b/asv_bench/benchmarks/unstacking.py @@ -54,5 +54,11 @@ def time_unstack_to_sparse_2d(self): def time_unstack_to_sparse_3d(self): self.da_eye_3d.unstack(sparse=True) + def peakmem_unstack_to_sparse_2d(self): + self.da_eye_2d.unstack(sparse=True) + + def peakmem_unstack_to_sparse_3d(self): + self.da_eye_3d.unstack(sparse=True) + def time_unstack_pandas_slow(self): pass From e4a6ec2af94ce8c21d482f13ad54ea0057c26f48 Mon Sep 17 00:00:00 2001 From: dcherian Date: Mon, 5 Jul 2021 14:42:27 -0600 Subject: [PATCH 04/13] cleanups + add comments --- xarray/core/dataset.py | 18 ++++++++---------- xarray/core/variable.py | 13 ++++++++----- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 0d6103ceb70..9f201557a60 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -4133,23 +4133,21 @@ def unstack( # function requires. # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125 any(is_duck_dask_array(v.data) for v in self.variables.values()) - # Sparse doesn't currently support (though we could special-case - # it) - # https://github.com/pydata/sparse/issues/422 + # Sparse doesn't currently support advanced indexing + # https://github.com/pydata/sparse/issues/114 or any( isinstance(v.data, sparse_array_type) for v in self.variables.values() ) - # or sparse # Until https://github.com/pydata/xarray/pull/4751 is resolved, # we check explicitly whether it's a numpy array. Once that is # resolved, explicitly exclude pint arrays. - # # pint doesn't implement `np.full_like` in a way that's - # # currently compatible. - # # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173 - # # or any( - # # isinstance(v.data, pint_array_type) for v in self.variables.values() - # # ) + # pint doesn't implement `np.full_like` in a way that's + # currently compatible. + # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173 + # or any( + # isinstance(v.data, pint_array_type) for v in self.variables.values() + # ) or any( not isinstance(v.data, np.ndarray) for v in self.variables.values() ) diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 73cdc76a211..4b6a250c66a 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1565,7 +1565,7 @@ def _unstack_once( index: pd.MultiIndex, dim: Hashable, fill_value=dtypes.NA, - sparse=False, + sparse: bool = False, ) -> "Variable": """ Unstacks this variable given an index to unstack and the name of the @@ -1573,14 +1573,14 @@ def _unstack_once( """ reordered = self.transpose(..., dim) - shape = reordered.shape + new_dim_sizes = [lev.size for lev in index.levels] new_dim_names = index.names indexer = index.codes # Potentially we could replace `len(other_dims)` with just `-1` other_dims = [d for d in self.dims if d != dim] - new_shape = tuple(list(shape[: len(other_dims)]) + new_dim_sizes) + new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes) new_dims = reordered.dims[: len(other_dims)] + new_dim_names if fill_value is dtypes.NA: @@ -1594,14 +1594,17 @@ def _unstack_once( dtype = self.dtype if sparse: + # unstacking a dense multitindexed array to a sparse array + # Use the sparse.COO constructor until sparse supports advanced indexing + # https://github.com/pydata/sparse/issues/114 # TODO: how do we allow different sparse array types from sparse import COO codes = zip(*index.codes) - if not shape[:-1]: + if reordered.ndim == 1: indexes = codes else: - sizes = itertools.product(range(*shape[:-1])) + sizes = itertools.product(range(*reordered.shape[:-1])) tuple_indexes = itertools.product(sizes, codes) indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore From 0c6f22feab5acea8d7045b28ddb370d8ab929a84 Mon Sep 17 00:00:00 2001 From: dcherian Date: Mon, 5 Jul 2021 14:49:14 -0600 Subject: [PATCH 05/13] optimize. --- xarray/core/variable.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 4b6a250c66a..222e8dab9a2 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1613,6 +1613,8 @@ def _unstack_once( data=self.data.astype(dtype).ravel(), fill_value=fill_value, shape=new_shape, + has_duplicates=False, + sorted=index.is_monotonic_increasing, ) else: From 6e12955e7ebed12635df4569ae1a665dcabc519b Mon Sep 17 00:00:00 2001 From: dcherian Date: Tue, 6 Jul 2021 18:50:04 -0600 Subject: [PATCH 06/13] bugfix --- xarray/core/variable.py | 2 +- xarray/tests/test_dataset.py | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 222e8dab9a2..bdbd1913547 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1604,7 +1604,7 @@ def _unstack_once( if reordered.ndim == 1: indexes = codes else: - sizes = itertools.product(range(*reordered.shape[:-1])) + sizes = itertools.product(*[range(s) for s in reordered.shape[:-1]]) tuple_indexes = itertools.product(sizes, codes) indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index bfce7530ba0..ac7cfa4cbb9 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -3098,14 +3098,24 @@ def test_unstack_sparse(self): [np.arange(3), np.arange(3)], names=["a", "b"] ) ds_eye = Dataset( - {"var": (("z", "foo"), np.ones((3, 4)))}, - coords={"z": mindex, "foo": np.arange(4)}, + {"var": (("z", "foo", "bar"), np.ones((3, 4, 5)))}, + coords={"z": mindex, "foo": np.arange(4), "bar": np.arange(5)}, ) actual = ds_eye.unstack(sparse=True, fill_value=0) assert isinstance(actual["var"].data, sparse_array_type) expected = xr.Dataset( - {"var": (("foo", "a", "b"), np.broadcast_to(np.eye(3, 3), (4, 3, 3)))}, - coords={"foo": np.arange(4), "a": np.arange(3), "b": np.arange(3)}, + { + "var": ( + ("foo", "bar", "a", "b"), + np.broadcast_to(np.eye(3, 3), (4, 5, 3, 3)), + ) + }, + coords={ + "foo": np.arange(4), + "bar": np.arange(5), + "a": np.arange(3), + "b": np.arange(3), + }, ) actual["var"].data = actual["var"].data.todense() assert_equal(expected, actual) From 8e6c5486337dffea0b6bd7fe2cba4cf0e4ebf4d9 Mon Sep 17 00:00:00 2001 From: Deepak Cherian Date: Wed, 7 Jul 2021 09:21:50 -0600 Subject: [PATCH 07/13] [skip-ci] Update doc/whats-new.rst --- doc/whats-new.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index fe15e153703..6cbb284566f 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -69,7 +69,7 @@ Deprecations Performance ~~~~~~~~~~~ -- Significantly faster unstacking to a ``sparse`` array. +- Significantly faster unstacking to a ``sparse`` array. :pull:`5577` By `Deepak Cherian `_. Bug fixes From 637421d78faa3e0cb49951f791496d4795690479 Mon Sep 17 00:00:00 2001 From: dcherian Date: Fri, 13 Aug 2021 13:33:47 -0600 Subject: [PATCH 08/13] clean up comments --- xarray/core/dataset.py | 3 +-- xarray/core/variable.py | 5 ----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 9f201557a60..ce5c6ee720e 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -4133,8 +4133,7 @@ def unstack( # function requires. # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125 any(is_duck_dask_array(v.data) for v in self.variables.values()) - # Sparse doesn't currently support advanced indexing - # https://github.com/pydata/sparse/issues/114 + # sparse.COO doesn't currently support assignment or any( isinstance(v.data, sparse_array_type) for v in self.variables.values() diff --git a/xarray/core/variable.py b/xarray/core/variable.py index bdbd1913547..48535f08958 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1595,9 +1595,6 @@ def _unstack_once( if sparse: # unstacking a dense multitindexed array to a sparse array - # Use the sparse.COO constructor until sparse supports advanced indexing - # https://github.com/pydata/sparse/issues/114 - # TODO: how do we allow different sparse array types from sparse import COO codes = zip(*index.codes) @@ -1628,8 +1625,6 @@ def _unstack_once( # Indexer is a list of lists of locations. Each list is the locations # on the new dimension. This is robust to the data being sparse; in that # case the destinations will be NaN / zero. - # sparse doesn't support item assigment, - # https://github.com/pydata/sparse/issues/114 data[(..., *indexer)] = reordered return self._replace(dims=new_dims, data=data) From 58aa601d07aefbde5447961b10d5df52c0453975 Mon Sep 17 00:00:00 2001 From: dcherian Date: Fri, 13 Aug 2021 13:37:15 -0600 Subject: [PATCH 09/13] FIx whats-new --- doc/whats-new.rst | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 7b46633d293..8d0ee47b8df 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -36,6 +36,13 @@ Deprecations ~~~~~~~~~~~~ +Performance +~~~~~~~~~~~ + +- Significantly faster unstacking to a ``sparse`` array. :pull:`5577` + By `Deepak Cherian `_. + + Bug fixes ~~~~~~~~~ @@ -129,11 +136,6 @@ Breaking changes Deprecations ~~~~~~~~~~~~ -Performance -~~~~~~~~~~~ - -- Significantly faster unstacking to a ``sparse`` array. :pull:`5577` - By `Deepak Cherian `_. - Removed the deprecated ``dim`` kwarg to :py:func:`DataArray.integrate` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.rolling` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.coarsen` (:pull:`5630`) From ea22454d3c19267e7223a26d95a4a20024d82463 Mon Sep 17 00:00:00 2001 From: dcherian Date: Tue, 23 Nov 2021 19:57:55 -0700 Subject: [PATCH 10/13] faster benchmarks --- asv_bench/benchmarks/unstacking.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/asv_bench/benchmarks/unstacking.py b/asv_bench/benchmarks/unstacking.py index 9ed6803ac69..dc8bc3307c3 100644 --- a/asv_bench/benchmarks/unstacking.py +++ b/asv_bench/benchmarks/unstacking.py @@ -40,10 +40,10 @@ def setup(self, *args, **kwargs): self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) self.da_missing = self.da_full[:-1] - mindex = pd.MultiIndex.from_arrays([np.arange(500), np.arange(500)]) - self.da_eye_2d = xr.DataArray(np.ones((500,)), dims="z", coords={"z": mindex}) + mindex = pd.MultiIndex.from_arrays([np.arange(100), np.arange(100)]) + self.da_eye_2d = xr.DataArray(np.ones((100,)), dims="z", coords={"z": mindex}) self.da_eye_3d = xr.DataArray( - np.ones((500, 50)), + np.ones((100, 50)), dims=("z", "foo"), coords={"z": mindex, "foo": np.arange(50)}, ) From 97e69151035b34bf5fc8f7558f8339d71957259a Mon Sep 17 00:00:00 2001 From: dcherian Date: Tue, 23 Nov 2021 19:59:17 -0700 Subject: [PATCH 11/13] make fewer assumptions --- xarray/core/variable.py | 1 - 1 file changed, 1 deletion(-) diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 2abcd7c314f..e2d02b41a17 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1676,7 +1676,6 @@ def _unstack_once( data=self.data.astype(dtype).ravel(), fill_value=fill_value, shape=new_shape, - has_duplicates=False, sorted=index.is_monotonic_increasing, ) From b7017afd8ffb763c711f58d9ab39ccf27a7455cf Mon Sep 17 00:00:00 2001 From: Deepak Cherian Date: Wed, 1 Dec 2021 19:15:02 -0700 Subject: [PATCH 12/13] Fix whats-new --- doc/whats-new.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 5360e32d690..77aa580598e 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -33,6 +33,12 @@ Deprecations ~~~~~~~~~~~~ +Performance +~~~~~~~~~~~ + +- Significantly faster unstacking to a ``sparse`` array. :pull:`5577` + By `Deepak Cherian `_. + Bug fixes ~~~~~~~~~ - :py:func:`xr.map_blocks` and :py:func:`xr.corr` now work when dask is not installed (:issue:`3391`, :issue:`5715`, :pull:`5731`). @@ -158,12 +164,6 @@ Deprecations passed alongside ``combine='by_coords'``. By `Tom Nicholas `_. -Performance -~~~~~~~~~~~ - -- Significantly faster unstacking to a ``sparse`` array. :pull:`5577` - By `Deepak Cherian `_. - Bug fixes ~~~~~~~~~ From 1532c5e387961b0fb53fce26d68be7ee7207bcbd Mon Sep 17 00:00:00 2001 From: Deepak Cherian Date: Wed, 1 Dec 2021 19:15:39 -0700 Subject: [PATCH 13/13] Update doc/whats-new.rst --- doc/whats-new.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 77aa580598e..9a6f6e21e5e 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -164,7 +164,6 @@ Deprecations passed alongside ``combine='by_coords'``. By `Tom Nicholas `_. - Bug fixes ~~~~~~~~~