diff --git a/doc/api.rst b/doc/api.rst index 9735eb0c708..e191304e95c 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -649,6 +649,7 @@ Plotting plot.imshow plot.line plot.pcolormesh + plot.step plot.FacetGrid Faceting diff --git a/doc/examples/_code/weather_data_setup.py b/doc/examples/_code/weather_data_setup.py deleted file mode 100644 index 4e4e2ab176e..00000000000 --- a/doc/examples/_code/weather_data_setup.py +++ /dev/null @@ -1,22 +0,0 @@ -import numpy as np -import pandas as pd -import seaborn as sns - -import xarray as xr - -np.random.seed(123) - -times = pd.date_range("2000-01-01", "2001-12-31", name="time") -annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) - -base = 10 + 15 * annual_cycle.reshape(-1, 1) -tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) -tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) - -ds = xr.Dataset( - { - "tmin": (("time", "location"), tmin_values), - "tmax": (("time", "location"), tmax_values), - }, - {"time": times, "location": ["IA", "IN", "IL"]}, -) diff --git a/doc/examples/weather-data.ipynb b/doc/examples/weather-data.ipynb new file mode 100644 index 00000000000..f582453aacf --- /dev/null +++ b/doc/examples/weather-data.ipynb @@ -0,0 +1,374 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Toy weather data\n", + "\n", + "Here is an example of how to easily manipulate a toy weather dataset using\n", + "xarray and other recommended Python libraries:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:43:36.127628Z", + "start_time": "2020-01-27T15:43:36.081733Z" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import seaborn as sns\n", + "\n", + "import xarray as xr\n", + "\n", + "np.random.seed(123)\n", + "\n", + "xr.set_options(display_style=\"html\")\n", + "\n", + "times = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\n", + "annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n", + "\n", + "base = 10 + 15 * annual_cycle.reshape(-1, 1)\n", + "tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\n", + "tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n", + "\n", + "ds = xr.Dataset(\n", + " {\n", + " \"tmin\": ((\"time\", \"location\"), tmin_values),\n", + " \"tmax\": ((\"time\", \"location\"), tmax_values),\n", + " },\n", + " {\"time\": times, \"location\": [\"IA\", \"IN\", \"IL\"]},\n", + ")\n", + "\n", + "ds" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Examine a dataset with pandas and seaborn" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Convert to a pandas DataFrame" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:47:14.160297Z", + "start_time": "2020-01-27T15:47:14.126738Z" + } + }, + "outputs": [], + "source": [ + "df = ds.to_dataframe()\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:47:32.682065Z", + "start_time": "2020-01-27T15:47:32.652629Z" + } + }, + "outputs": [], + "source": [ + "df.describe()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize using pandas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:47:34.617042Z", + "start_time": "2020-01-27T15:47:34.282605Z" + } + }, + "outputs": [], + "source": [ + "ds.mean(dim=\"location\").to_dataframe().plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize using seaborn" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:47:37.643175Z", + "start_time": "2020-01-27T15:47:37.202479Z" + } + }, + "outputs": [], + "source": [ + "sns.pairplot(df.reset_index(), vars=ds.data_vars)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Probability of freeze by calendar month" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:48:11.241224Z", + "start_time": "2020-01-27T15:48:11.211156Z" + } + }, + "outputs": [], + "source": [ + "freeze = (ds[\"tmin\"] <= 0).groupby(\"time.month\").mean(\"time\")\n", + "freeze" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:48:13.131247Z", + "start_time": "2020-01-27T15:48:12.924985Z" + } + }, + "outputs": [], + "source": [ + "freeze.to_pandas().plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Monthly averaging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:48:08.498259Z", + "start_time": "2020-01-27T15:48:08.210890Z" + } + }, + "outputs": [], + "source": [ + "monthly_avg = ds.resample(time=\"1MS\").mean()\n", + "monthly_avg.sel(location=\"IA\").to_dataframe().plot(style=\"s-\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that ``MS`` here refers to Month-Start; ``M`` labels Month-End (the last day of the month)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Calculate monthly anomalies" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In climatology, \"anomalies\" refer to the difference between observations and\n", + "typical weather for a particular season. Unlike observations, anomalies should\n", + "not show any seasonal cycle." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:49:34.855086Z", + "start_time": "2020-01-27T15:49:34.406439Z" + } + }, + "outputs": [], + "source": [ + "climatology = ds.groupby(\"time.month\").mean(\"time\")\n", + "anomalies = ds.groupby(\"time.month\") - climatology\n", + "anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Calculate standardized monthly anomalies" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can create standardized anomalies where the difference between the\n", + "observations and the climatological monthly mean is\n", + "divided by the climatological standard deviation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:50:09.144586Z", + "start_time": "2020-01-27T15:50:08.734682Z" + } + }, + "outputs": [], + "source": [ + "climatology_mean = ds.groupby(\"time.month\").mean(\"time\")\n", + "climatology_std = ds.groupby(\"time.month\").std(\"time\")\n", + "stand_anomalies = xr.apply_ufunc(\n", + " lambda x, m, s: (x - m) / s,\n", + " ds.groupby(\"time.month\"),\n", + " climatology_mean,\n", + " climatology_std,\n", + ")\n", + "\n", + "stand_anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fill missing values with climatology" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:50:46.192491Z", + "start_time": "2020-01-27T15:50:46.174554Z" + } + }, + "source": [ + "The ``fillna`` method on grouped objects lets you easily fill missing values by group:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:51:40.279299Z", + "start_time": "2020-01-27T15:51:40.220342Z" + } + }, + "outputs": [], + "source": [ + "# throw away the first half of every month\n", + "some_missing = ds.tmin.sel(time=ds[\"time.day\"] > 15).reindex_like(ds)\n", + "filled = some_missing.groupby(\"time.month\").fillna(climatology.tmin)\n", + "both = xr.Dataset({\"some_missing\": some_missing, \"filled\": filled})\n", + "both" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:52:11.815769Z", + "start_time": "2020-01-27T15:52:11.770825Z" + } + }, + "outputs": [], + "source": [ + "df = both.sel(time=\"2000\").mean(\"location\").reset_coords(drop=True).to_dataframe()\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2020-01-27T15:52:14.867866Z", + "start_time": "2020-01-27T15:52:14.449684Z" + } + }, + "outputs": [], + "source": [ + "df[[\"filled\", \"some_missing\"]].plot()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": true, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/doc/examples/weather-data.rst b/doc/examples/weather-data.rst deleted file mode 100644 index 5a019e637c4..00000000000 --- a/doc/examples/weather-data.rst +++ /dev/null @@ -1,138 +0,0 @@ -.. _toy weather data: - -Toy weather data -================ - -Here is an example of how to easily manipulate a toy weather dataset using -xarray and other recommended Python libraries: - -.. contents:: - :local: - :depth: 1 - -Shared setup: - -.. literalinclude:: _code/weather_data_setup.py - -.. ipython:: python - :suppress: - - fpath = "examples/_code/weather_data_setup.py" - with open(fpath) as f: - code = compile(f.read(), fpath, 'exec') - exec(code) - - -Examine a dataset with pandas_ and seaborn_ -------------------------------------------- - -.. _pandas: http://pandas.pydata.org -.. _seaborn: http://stanford.edu/~mwaskom/software/seaborn - -.. ipython:: python - - ds - - df = ds.to_dataframe() - - df.head() - - df.describe() - - @savefig examples_tmin_tmax_plot.png - ds.mean(dim='location').to_dataframe().plot() - - -.. ipython:: python - - @savefig examples_pairplot.png - sns.pairplot(df.reset_index(), vars=ds.data_vars) - -.. _average by month: - -Probability of freeze by calendar month ---------------------------------------- - -.. ipython:: python - - freeze = (ds['tmin'] <= 0).groupby('time.month').mean('time') - freeze - - @savefig examples_freeze_prob.png - freeze.to_pandas().plot() - -.. _monthly average: - -Monthly averaging ------------------ - -.. ipython:: python - - monthly_avg = ds.resample(time='1MS').mean() - - @savefig examples_tmin_tmax_plot_mean.png - monthly_avg.sel(location='IA').to_dataframe().plot(style='s-') - -Note that ``MS`` here refers to Month-Start; ``M`` labels Month-End (the last -day of the month). - -.. _monthly anomalies: - -Calculate monthly anomalies ---------------------------- - -In climatology, "anomalies" refer to the difference between observations and -typical weather for a particular season. Unlike observations, anomalies should -not show any seasonal cycle. - -.. ipython:: python - - climatology = ds.groupby('time.month').mean('time') - anomalies = ds.groupby('time.month') - climatology - - @savefig examples_anomalies_plot.png - anomalies.mean('location').to_dataframe()[['tmin', 'tmax']].plot() - -.. _standardized monthly anomalies: - -Calculate standardized monthly anomalies ----------------------------------------- - -You can create standardized anomalies where the difference between the -observations and the climatological monthly mean is -divided by the climatological standard deviation. - -.. ipython:: python - - climatology_mean = ds.groupby('time.month').mean('time') - climatology_std = ds.groupby('time.month').std('time') - stand_anomalies = xr.apply_ufunc( - lambda x, m, s: (x - m) / s, - ds.groupby('time.month'), - climatology_mean, climatology_std) - - @savefig examples_standardized_anomalies_plot.png - stand_anomalies.mean('location').to_dataframe()[['tmin', 'tmax']].plot() - -.. _fill with climatology: - -Fill missing values with climatology ------------------------------------- - -The :py:func:`~xarray.Dataset.fillna` method on grouped objects lets you easily -fill missing values by group: - -.. ipython:: python - :okwarning: - - # throw away the first half of every month - some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds) - filled = some_missing.groupby('time.month').fillna(climatology.tmin) - - both = xr.Dataset({'some_missing': some_missing, 'filled': filled}) - both - - df = both.sel(time='2000').mean('location').reset_coords(drop=True).to_dataframe() - - @savefig examples_filled.png - df[['filled', 'some_missing']].plot() diff --git a/doc/time-series.rst b/doc/time-series.rst index 1cb535ea886..d838dbbd4cd 100644 --- a/doc/time-series.rst +++ b/doc/time-series.rst @@ -222,4 +222,4 @@ Data that has indices outside of the given ``tolerance`` are set to ``NaN``. For more examples of using grouped operations on a time dimension, see -:ref:`toy weather data`. +:doc:`examples/weather-data`. diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 3ee804e32a5..f8ac959f0ad 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -19,9 +19,17 @@ What's New v0.15.0 (unreleased) -------------------- +This release brings many improvements to xarray's documentation: our examples are now binderized notebooks (`click here `_) +and we have new example notebooks from our SciPy 2019 sprint (many thanks to our contributors!). + +This release also features many API improvements such as a new +:py:class:`~core.accessor_dt.TimedeltaAccessor` and support for :py:class:`CFTimeIndex` in +:py:meth:`~DataArray.interpolate_na`); as well as many bug fixes. + Breaking changes ~~~~~~~~~~~~~~~~ - Bumped minimum tested versions for dependencies: + - numpy 1.15 - pandas 0.25 - dask 2.2 @@ -30,14 +38,13 @@ Breaking changes - Remove ``compat`` and ``encoding`` kwargs from ``DataArray``, which have been deprecated since 0.12. (:pull:`3650`). - Instead, specify the encoding when writing to disk or set - the ``encoding`` attribute directly. - By `Maximilian Roos `_ + Instead, specify the ``encoding`` kwarg when writing to disk or set + the :py:attr:`DataArray.encoding` attribute directly. + By `Maximilian Roos `_. - :py:func:`xarray.dot`, :py:meth:`DataArray.dot`, and the ``@`` operator now use ``align="inner"`` (except when ``xarray.set_options(arithmetic_join="exact")``; :issue:`3694`) by `Mathias Hauser `_. - New Features ~~~~~~~~~~~~ - :py:meth:`DataArray.sel` and :py:meth:`Dataset.sel` now support :py:class:`pandas.CategoricalIndex`. (:issue:`3669`) @@ -47,10 +54,10 @@ New Features :py:class:`~xarray.Dataset` from a h5netcdf ``File`` that has been opened using other means (:issue:`3618`). By `Kai Mühlbauer `_. -- Implement :py:func:`median` and :py:func:`nanmedian` for dask arrays. This works by rechunking +- Implement ``median`` and ``nanmedian`` for dask arrays. This works by rechunking to a single chunk along all reduction axes. (:issue:`2999`). By `Deepak Cherian `_. -- :py:func:`xarray.concat` now preserves attributes from the first Variable. +- :py:func:`~xarray.concat` now preserves attributes from the first Variable. (:issue:`2575`, :issue:`2060`, :issue:`1614`) By `Deepak Cherian `_. - :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` and ``GroupBy.quantile`` @@ -59,44 +66,48 @@ New Features - Added the ``count`` reduction method to both :py:class:`~core.rolling.DatasetCoarsen` and :py:class:`~core.rolling.DataArrayCoarsen` objects. (:pull:`3500`) By `Deepak Cherian `_ -- Add ``meta`` kwarg to :py:func:`~xarray.apply_ufunc`; this is passed on to - :py:meth:`dask.array.blockwise`. (:pull:`3660`) By `Deepak Cherian `_. -- Add `attrs_file` option in :py:func:`~xarray.open_mfdataset` to choose the +- Add ``meta`` kwarg to :py:func:`~xarray.apply_ufunc`; + this is passed on to :py:func:`dask.array.blockwise`. (:pull:`3660`) + By `Deepak Cherian `_. +- Add ``attrs_file`` option in :py:func:`~xarray.open_mfdataset` to choose the source file for global attributes in a multi-file dataset (:issue:`2382`, - :pull:`3498`) by `Julien Seguinot _`. + :pull:`3498`). By `Julien Seguinot `_. - :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` now allow swapping to dimension names that don't exist yet. (:pull:`3636`) By `Justus Magin `_. -- Extend :py:class:`core.accessor_dt.DatetimeAccessor` properties - and support `.dt` accessor for timedelta - via :py:class:`core.accessor_dt.TimedeltaAccessor` (:pull:`3612`) +- Extend :py:class:`~core.accessor_dt.DatetimeAccessor` properties + and support ``.dt`` accessor for timedeltas + via :py:class:`~core.accessor_dt.TimedeltaAccessor` (:pull:`3612`) By `Anderson Banihirwe `_. -- Support CFTimeIndex in :py:meth:`DataArray.interpolate_na`, define 1970-01-01 - as the default offset for the interpolation index for both DatetimeIndex and - CFTimeIndex, use microseconds in the conversion from timedelta objects - to floats to avoid overflow errors (:issue:`3641`, :pull:`3631`). - By David Huard ``_. +- Improvements to interpolating along time axes (:issue:`3641`, :pull:`3631`). + By `David Huard `_. + + - Support :py:class:`CFTimeIndex` in :py:meth:`DataArray.interpolate_na` + - define 1970-01-01 as the default offset for the interpolation index for both + :py:class:`pandas.DatetimeIndex` and :py:class:`CFTimeIndex`, + - use microseconds in the conversion from timedelta objects to floats to avoid + overflow errors. Bug fixes ~~~~~~~~~ - Applying a user-defined function that adds new dimensions using :py:func:`apply_ufunc` and ``vectorize=True`` now works with ``dask > 2.0``. (:issue:`3574`, :pull:`3660`). By `Deepak Cherian `_. -- Fix :py:meth:`xarray.combine_by_coords` to allow for combining incomplete +- Fix :py:meth:`~xarray.combine_by_coords` to allow for combining incomplete hypercubes of Datasets (:issue:`3648`). By `Ian Bolliger `_. -- Fix :py:meth:`xarray.combine_by_coords` when combining cftime coordinates +- Fix :py:func:`~xarray.combine_by_coords` when combining cftime coordinates which span long time intervals (:issue:`3535`). By `Spencer Clark `_. - Fix plotting with transposed 2D non-dimensional coordinates. (:issue:`3138`, :pull:`3441`) By `Deepak Cherian `_. -- :py:meth:`~xarray.plot.FacetGrid.set_titles` can now replace existing row titles of a +- :py:meth:`plot.FacetGrid.set_titles` can now replace existing row titles of a :py:class:`~xarray.plot.FacetGrid` plot. In addition :py:class:`~xarray.plot.FacetGrid` gained two new attributes: :py:attr:`~xarray.plot.FacetGrid.col_labels` and - :py:attr:`~xarray.plot.FacetGrid.row_labels` contain matplotlib Text handles for both column and + :py:attr:`~xarray.plot.FacetGrid.row_labels` contain :py:class:`matplotlib.text.Text` handles for both column and row labels. These can be used to manually change the labels. By `Deepak Cherian `_. -- Fix issue with Dask-backed datasets raising a ``KeyError`` on some computations involving ``map_blocks`` (:pull:`3598`) +- Fix issue with Dask-backed datasets raising a ``KeyError`` on some computations involving :py:func:`map_blocks` (:pull:`3598`). By `Tom Augspurger `_. - Ensure :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` issue the correct error when ``q`` is out of bounds (:issue:`3634`) by `Mathias Hauser `_. @@ -108,36 +119,34 @@ Bug fixes By `Justus Magin `_. - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename` now check for conflicts with MultiIndex level names. -- :py:meth:`Dataset.merge` no longer fails when passed a `DataArray` instead of a `Dataset` object. +- :py:meth:`Dataset.merge` no longer fails when passed a :py:class:`DataArray` instead of a :py:class:`Dataset`. By `Tom Nicholas `_. - Fix a regression in :py:meth:`Dataset.drop`: allow passing any iterable when dropping variables (:issue:`3552`, :pull:`3693`) By `Justus Magin `_. - Fixed errors emitted by ``mypy --strict`` in modules that import xarray. (:issue:`3695`) by `Guido Imperiale `_. -- Fix plotting of binned coordinates on the y axis in :py:meth:`DataArray.plot` - (line) and :py:meth:`DataArray.plot.step` plots (:issue:`#3571`, - :pull:`3685`) by `Julien Seguinot _`. +- Allow plotting of binned coordinates on the y axis in :py:meth:`plot.line` + and :py:meth:`plot.step` plots (:issue:`3571`, + :pull:`3685`) by `Julien Seguinot `_. Documentation ~~~~~~~~~~~~~ -- Switch doc examples to use nbsphinx and replace sphinx_gallery with - notebook. - (:pull:`3105`, :pull:`3106`, :pull:`3121`) - By `Ryan Abernathey `_ -- Added example notebook demonstrating use of xarray with Regional Ocean - Modeling System (ROMS) ocean hydrodynamic model output. - (:pull:`3116`). - By `Robert Hetland `_ -- Added example notebook demonstrating the visualization of ERA5 GRIB - data. (:pull:`3199`) +- Switch doc examples to use `nbsphinx `_ and replace + ``sphinx_gallery`` scripts with Jupyter notebooks. (:pull:`3105`, :pull:`3106`, :pull:`3121`) + By `Ryan Abernathey `_. +- Added :doc:`example notebook ` demonstrating use of xarray with + Regional Ocean Modeling System (ROMS) ocean hydrodynamic model output. (:pull:`3116`) + By `Robert Hetland `_. +- Added :doc:`example notebook ` demonstrating the visualization of + ERA5 GRIB data. (:pull:`3199`) By `Zach Bruick `_ and - `Stephan Siemen `_ + `Stephan Siemen `_. - Added examples for :py:meth:`DataArray.quantile`, :py:meth:`Dataset.quantile` and ``GroupBy.quantile``. (:pull:`3576`) By `Justus Magin `_. -- Add new :py:func:`apply_ufunc` example notebook demonstrating vectorization of a 1D - function using dask and numba. +- Add new :doc:`example notebook ` example notebook demonstrating + vectorization of a 1D function using :py:func:`apply_ufunc` , dask and numba. By `Deepak Cherian `_. - Added example for :py:func:`~xarray.map_blocks`. (:pull:`3667`) By `Riley X. Brady `_. @@ -153,10 +162,10 @@ Internal Changes (:pull:`3533`) by `Guido Imperiale `_. - Removed internal method ``Dataset._from_vars_and_coord_names``, which was dominated by ``Dataset._construct_direct``. (:pull:`3565`) - By `Maximilian Roos `_ + By `Maximilian Roos `_. - Replaced versioneer with setuptools-scm. Moved contents of setup.py to setup.cfg. Removed pytest-runner from setup.py, as per deprecation notice on the pytest-runner - project. (:pull:`3714`) by `Guido Imperiale `_ + project. (:pull:`3714`) by `Guido Imperiale `_. - Use of isort is now enforced by CI. (:pull:`3721`) by `Guido Imperiale `_ @@ -1751,7 +1760,7 @@ Documentation - Added a new guide on :ref:`contributing` (:issue:`640`) By `Joe Hamman `_. -- Added apply_ufunc example to :ref:`toy weather data` (:issue:`1844`). +- Added apply_ufunc example to :ref:`/examples/weather-data.ipynb#Toy-weather-data` (:issue:`1844`). By `Liam Brannigan `_. - New entry `Why don’t aggregations return Python scalars?` in the :doc:`faq` (:issue:`1726`). @@ -3659,7 +3668,7 @@ Enhancements ``fillna`` works on both ``Dataset`` and ``DataArray`` objects, and uses index based alignment and broadcasting like standard binary operations. It also can be applied by group, as illustrated in - :ref:`fill with climatology`. + :ref:`/examples/weather-data.ipynb#Fill-missing-values-with-climatology`. - New ``xray.Dataset.assign`` and ``xray.Dataset.assign_coords`` methods patterned off the new :py:meth:`DataFrame.assign ` method in pandas: diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index a658f125054..062cc6342df 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -1,3 +1,4 @@ +import datetime import functools import warnings from numbers import Number @@ -18,7 +19,6 @@ cast, ) -import datetime import numpy as np import pandas as pd diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index c72ed6cc7d6..07bea6dac19 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1,4 +1,5 @@ import copy +import datetime import functools import sys import warnings @@ -27,7 +28,6 @@ cast, ) -import datetime import numpy as np import pandas as pd diff --git a/xarray/core/missing.py b/xarray/core/missing.py index b20441e993c..40f010b3514 100644 --- a/xarray/core/missing.py +++ b/xarray/core/missing.py @@ -1,8 +1,8 @@ +import datetime as dt import warnings from functools import partial from numbers import Number from typing import Any, Callable, Dict, Hashable, Sequence, Union -import datetime as dt import numpy as np import pandas as pd diff --git a/xarray/core/nputils.py b/xarray/core/nputils.py index dba67174fc1..cf189e471cc 100644 --- a/xarray/core/nputils.py +++ b/xarray/core/nputils.py @@ -2,7 +2,6 @@ import numpy as np import pandas as pd - from numpy.core.multiarray import normalize_axis_index try: diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index b4802f6194b..98131887e28 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -339,6 +339,7 @@ def step(darray, *args, where="pre", linestyle=None, ls=None, **kwargs): ---------- where : {'pre', 'post', 'mid'}, optional, default 'pre' Define where the steps should be placed: + - 'pre': The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. @@ -346,12 +347,13 @@ def step(darray, *args, where="pre", linestyle=None, ls=None, **kwargs): every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - 'mid': Steps occur half-way between the *x* positions. + Note that this parameter is ignored if one coordinate consists of :py:func:`pandas.Interval` values, e.g. as a result of :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. - *args, **kwargs : optional + ``*args``, ``**kwargs`` : optional Additional arguments following :py:func:`xarray.plot.line` """ if where not in {"pre", "post", "mid"}: diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index 96c883baa67..f4f11473e48 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -1,7 +1,7 @@ +import datetime as dt import warnings from textwrap import dedent -import datetime as dt import numpy as np import pandas as pd import pytest @@ -17,13 +17,13 @@ gradient, last, mean, - rolling_window, - stack, - where, - py_timedelta_to_float, np_timedelta64_to_float, pd_timedelta_to_float, + py_timedelta_to_float, + rolling_window, + stack, timedelta_to_numeric, + where, ) from xarray.core.pycompat import dask_array_type from xarray.testing import assert_allclose, assert_equal @@ -753,7 +753,7 @@ def test_pd_timedelta_to_float(td, expected): @pytest.mark.parametrize( - "td", [dt.timedelta(days=1), np.timedelta64(1, "D"), pd.Timedelta(1, "D"), "1 day"], + "td", [dt.timedelta(days=1), np.timedelta64(1, "D"), pd.Timedelta(1, "D"), "1 day"] ) def test_timedelta_to_numeric(td): # Scalar input diff --git a/xarray/tests/test_missing.py b/xarray/tests/test_missing.py index 8d70d9a0fcc..35c71c2854c 100644 --- a/xarray/tests/test_missing.py +++ b/xarray/tests/test_missing.py @@ -14,16 +14,15 @@ ) from xarray.core.pycompat import dask_array_type from xarray.tests import ( + assert_allclose, assert_array_equal, assert_equal, - assert_allclose, raises_regex, requires_bottleneck, + requires_cftime, requires_dask, requires_scipy, - requires_cftime, ) - from xarray.tests.test_cftime_offsets import _CFTIME_CALENDARS