diff --git a/.github/workflows/bleeding-edge.yaml b/.github/workflows/bleeding-edge.yaml new file mode 100644 index 00000000000..dae94349cd8 --- /dev/null +++ b/.github/workflows/bleeding-edge.yaml @@ -0,0 +1,54 @@ +name: CI (bleeding edge) +# this workflow is heavily inspired from pandas, see +# https://github.com/pandas-dev/pandas/blob/master/.github/workflows/python-dev.yml + +# goals: check stability against +# - dev version of Python, numpy, and matplotlib +# - building with future pip default options + +on: + push: + branches: + - main + schedule: + # run this every day at 3 am UTC + - cron: '0 3 * * *' + +jobs: + build: + runs-on: ubuntu-latest + name: Python3.10-dev + timeout-minutes: 60 + + concurrency: + group: ${{ github.ref }}-dev + cancel-in-progress: true + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Set up Python Dev Version + uses: actions/setup-python@v2 + with: + python-version: '3.10-dev' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools wheel + python -m pip install git+https://github.com/numpy/numpy.git + python -m pip install git+https://github.com/matplotlib/matplotlib.git + python -m pip install cython + + - name: Build yt + # --no-build-isolation is used to guarantee that build time dependencies + # are not installed by pip as specified from pyproject.toml, hence we get + # to use the dev version of numpy at build time. + run: | + python setup.py build_ext -q -j2 + python -m pip install -e .[test] --no-build-isolation + + - name: Run Tests + run: pytest -vvv diff --git a/.github/workflows/build-test-pytest.yaml b/.github/workflows/build-test-pytest.yaml index 9010f3d1a4b..aec0faf5cea 100644 --- a/.github/workflows/build-test-pytest.yaml +++ b/.github/workflows/build-test-pytest.yaml @@ -1,6 +1,13 @@ name: Build and Test -on: [push, pull_request] +on: + push: + branches: + - main + pull_request: + paths-ignore: + - "doc/**" + - README.md defaults: run: diff --git a/.github/workflows/build-test.yaml b/.github/workflows/build-test.yaml index 78dec91aab3..1df27b9795e 100644 --- a/.github/workflows/build-test.yaml +++ b/.github/workflows/build-test.yaml @@ -1,6 +1,13 @@ name: Build and Test -on: [push, pull_request] +on: + push: + branches: + - main + pull_request: + paths-ignore: + - "doc/**" + - README.md defaults: run: diff --git a/.github/workflows/rules-checks.yaml b/.github/workflows/rules-checks.yaml index 134693afc60..d7308711a62 100644 --- a/.github/workflows/rules-checks.yaml +++ b/.github/workflows/rules-checks.yaml @@ -1,3 +1,6 @@ +# FUTURE: this file can be deleted at condition that warnings are treated +# as errors in all CI jobs covering h5py. +# In practice, this will only be possible when we're done transitioning to pytest. name: Auto review bad practice on: [pull_request] diff --git a/.github/workflows/wheels.yaml b/.github/workflows/wheels.yaml new file mode 100644 index 00000000000..3ec94d4bc39 --- /dev/null +++ b/.github/workflows/wheels.yaml @@ -0,0 +1,60 @@ +name: Build CI Wheels + +on: + push: + branches: + - main + - stable + tags: + - 'yt-*' + +jobs: + build_wheels: + name: Wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04, windows-latest, macos-latest] + fail-fast: false + + steps: + - uses: actions/setup-python@v2 + name: Install Python + with: + python-version: 3.9 + + - uses: s-weigand/setup-conda@v1 + if: matrix.os == 'windows-latest' + with: + update-conda: true + conda-channels: conda-forge + activate-conda: true + python-version: 3.9 + + - uses: actions/checkout@v2 + - name: Install cibuildwheel + run: python -m pip install cibuildwheel==1.9.0 + + - name: Install dependencies and yt + shell: bash + env: + dependencies: "full" + LDFLAGS: "-static-libstdc++" + run: source ./tests/ci_install.sh + + - name: Build wheels for CPython + run: | + python -m cibuildwheel --output-dir dist + env: + CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-*" + CIBW_ARCHS_LINUX: "x86_64" + CIBW_ARCHS_MACOS: "x86_64" + CIBW_ARCHS_WINDOWS: "auto" + CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 + CIBW_MANYLINUX_I686_IMAGE: manylinux2014 + CIBW_ENVIRONMENT: "LDFLAGS='-static-libstdc++'" + + - uses: actions/upload-artifact@v2 + with: + name: wheels + path: ./dist/*.whl diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff2143e5d56..aebe256ff0b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,27 +28,31 @@ repos: - id: no-commit-to-branch args: [--branch, main] - repo: https://github.com/asottile/pyupgrade - rev: v2.19.1 + rev: v2.21.0 hooks: - id: pyupgrade args: [--py36-plus] - repo: https://github.com/psf/black - rev: 21.5b2 + rev: 21.6b0 hooks: - id: black language_version: python3 - repo: https://github.com/PyCQA/isort - rev: '5.8.0' + rev: '5.9.2' hooks: - id: isort - additional_dependencies: [toml] + name: isort (python) + - id: isort + name: isort (cython) + types: [cython] - repo: https://github.com/PyCQA/flake8 rev: '3.9.2' hooks: - id: flake8 additional_dependencies: [ flake8-bugbear>=20.3.2, # GH PR 2851 - flake8-logging-format + flake8-logging-format, + flake8-2020==1.6.0, ] - repo: https://github.com/asottile/blacken-docs rev: v1.10.0 @@ -56,6 +60,6 @@ repos: - id: blacken-docs additional_dependencies: [black==20.8b1] - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.8.0 + rev: v1.9.0 hooks: - id: rst-backticks diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 855eb5a9465..27ab1bcb15f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -118,17 +118,11 @@ Feel free to `blog `_ about, `tweet Long-Term Projects ------------------ -There are some wild-eyed, out-there ideas that have been bandied about for the -future directions of yt -- some of them even written into the mission -statement. The ultimate goal is to move past simple analysis and visualization -of data and begin to approach it from the other side, of generating data, -running solvers. We also hope to increase its ability to act as an in situ -analysis code, by presenting a unified protocol. Other projects include -interfacing with ParaView and VisIt, creating a web GUI for running -simulations, creating a run-tracker that follows simulations in progress, a -federated database for simulation outputs, and so on and so forth. - -yt is an ambitious project. Let's be ambitious together. +There are some out-there ideas that have been bandied about for the +future directions of yt -- stuff like fun new types of visualization, remapping +of coordinates, new ways of accessing data, and even new APIs to make life easier. + +yt is an ambitious project. Let's be ambitious together! yt Community Code of Conduct ---------------------------- @@ -362,7 +356,7 @@ How To Read The Source Code If you just want to *look* at the source code, you may already have it on your computer. If you build yt using the install script, the source is available at -``$YT_DEST/src/yt-git``. See :ref:`source-installation` for more details about +``$YT_DEST/src/yt-git``. See :ref:`install-from-source` for more details about to obtain the yt source code if you did not build yt using the install script. @@ -401,10 +395,9 @@ the following subdirectories: classes for data regions, covering grids, time series, and so on. This also includes derived fields and derived quantities. -``gui`` - This is where all GUI components go. Typically this will be some small - tool used for one or two things, which contains a launching mechanism on - the command line. +``units`` + This used to be where all the unit-handling code resided, but as of now it's + mostly just a thin wrapper around unyt. ``utilities`` All broadly useful code that doesn't clearly fit in one of the other @@ -434,33 +427,7 @@ Building yt +++++++++++ If you have made changes to any C or Cython (``.pyx``) modules, you have to -rebuild yt. If your changes have exclusively been to Python modules, you will -not need to re-build, but (see below) you may need to re-install. - -Note that you will need a functioning compilation environment to build yt. On -linux this typically means installing the package that sets up a basic build -environment (e.g. ``build-essential`` on Debian and Ubuntu). On MacOS this means -installing the XCode command line tools. On Windows this means installing the -version of the Microsoft Visual C++ compiler that is appropriate for your -version of Python. See `the Python wiki -`_ for more details. - -If you are running from a clone that is executable in-place (i.e., has been -installed via the installation script or you have run ``setup.py develop``) you -can rebuild these modules by executing: - -.. code-block:: bash - - $ python setup.py develop - -If you have previously "installed" via ``setup.py install`` you have to -re-install: - -.. code-block:: bash - - $ python setup.py install - -Only one of these two options is needed. +rebuild yt before your changes are usable. See :ref:`install-from-source`. .. _requirements-for-code-submission: @@ -585,7 +552,7 @@ Here's a more detailed flowchart of how to submit changes. this at: https://github.com/yt-project/yt/fork. #. If you have used the installation script, the source code for yt can be found in ``$YT_DEST/src/yt-git``. Alternatively see - :ref:`source-installation` for instructions on how to build yt from the + :ref:`install-from-source` for instructions on how to build yt from the git repository. (Below, in :ref:`reading-source`, we describe how to find items of interest.) If you have already forked the repository then you can clone your fork locally:: diff --git a/MANIFEST.in b/MANIFEST.in index e230010a1a2..212cd4b30cf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,10 +1,16 @@ include README* CREDITS COPYING.txt CITATION setupext.py CONTRIBUTING.rst include yt/visualization/mapserver/html/map.js include yt/visualization/mapserver/html/map_index.html +include yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.css +include yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.src.js include yt/utilities/tests/cosmology_answers.yml include yt/utilities/mesh_types.yaml exclude scripts/pr_backport.py -recursive-include yt *.py *.pyx *.pxd *.h *.hpp README* *.txt LICENSE* *.cu +exclude yt/utilities/lib/cykdtree/c_kdtree.cpp +prune tests +prune docker +prune answer-store +recursive-include yt *.py *.pyx *.pxi *.pxd *.h *.hpp README* *.txt LICENSE* *.cu recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex @@ -13,5 +19,8 @@ prune doc/source/reference/api/generated prune doc/build recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader include yt/sample_data_registry.json +include conftest.py prune yt/frontends/_skeleton recursive-include yt/frontends/amrvac *.par +exclude .codecov.yml .coveragerc .git-blame-ignore-revs .gitmodules .hgchurn .mailmap +exclude .pre-commit-config.yaml clean.sh nose_answer.cfg nose_unit.cfg diff --git a/README.md b/README.md index c32c9600967..4c3a4efd182 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # The yt Project -![Supported Python Version](https://img.shields.io/badge/python%20version-≥%203.6-important) +[![PyPI](https://img.shields.io/pypi/v/yt)](https://pypi.org/project/yt) +[![Supported Python Versions](https://img.shields.io/pypi/pyversions/yt)](https://pypi.org/project/yt/) [![Latest Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](http://yt-project.org/docs/dev/) [![Users' Mailing List](https://img.shields.io/badge/Users-List-lightgrey.svg)](https://mail.python.org/archives/list/yt-users@python.org//) [![Devel Mailing List](https://img.shields.io/badge/Devel-List-lightgrey.svg)](https://mail.python.org/archives/list/yt-dev@python.org//) @@ -10,14 +11,15 @@ ![Build and Test](https://github.com/yt-project/yt/workflows/Build%20and%20Test/badge.svg?branch=main) +[![CI (bleeding edge)](https://github.com/yt-project/yt/actions/workflows/bleeding-edge.yaml/badge.svg)](https://github.com/yt-project/yt/actions/workflows/bleeding-edge.yaml) [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/yt-project/yt/main.svg)](https://results.pre-commit.ci/latest/github/yt-project/yt/main) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) - + -yt is an open-source, permissively-licensed python package for analyzing and +yt is an open-source, permissively-licensed Python library for analyzing and visualizing volumetric data. yt supports structured, variable-resolution meshes, unstructured meshes, and @@ -50,54 +52,11 @@ conda install -c conda-forge yt or with pip: ```shell -pip install yt +python -m pip install yt ``` -To get set up with a development version, you want to clone this repository: - -```shell -git clone https://github.com/yt-project/yt yt-git -cd yt-git -``` - -and work within a conda environment: - -```shell -# Create a conda environment named yt-git -$ conda create -n yt-git python=3.6 -# Activate it -$ source activate yt-git -# Make sure you run the latest version of conda -$ conda update conda -# Install yt's runtime dependencies -$ conda install -c conda-forge yt --only-deps -# Install yt's build dependencies -$ conda install -c conda-forge cython -# Make sure you run the latest version of pip -$ pip install --upgrade pip -$ pip install -v -e . -# Output installed packages -$ conda env export -``` - -Alternatively, you can install yt in a -[virtualenv](https://packaging.python.org/installing/#creating-virtual-environments): - -```shell -# It is conventional to create virtualenvs at ~/.virtualenv/ -$ mkdir -p ~/.virtualenv -# Assuming your version of Python 3 meets the minimal requirement -# create a virtualenv named yt-git -$ python3 -m venv ~/.virtualenv/yt-git -# Activate it -$ source ~/.virtualenv/yt-git/bin/activate -# Make sure you run the latest version of pip -$ pip install --upgrade pip -# Assuming you have cd'd into yt-git -$ pip install -e . -# Output installed packages -$ pip freeze -``` +More information on the various ways to install yt, and in particular to install from source, +can be found on [the project's website](https://yt-project.org/docs/dev/installing.html). ## Getting Started diff --git a/conftest.py b/conftest.py index 29edf369e9f..ccdb5390739 100644 --- a/conftest.py +++ b/conftest.py @@ -1,6 +1,7 @@ import os import shutil import tempfile +from importlib.util import find_spec from pathlib import Path import pytest @@ -71,6 +72,91 @@ def pytest_configure(config): config.addinivalue_line( "markers", "big_data: Run answer tests that require large data files." ) + for value in ( + # treat most warnings as errors + "error", + # >>> internal deprecation warnings with no obvious solution + # see https://github.com/yt-project/yt/issues/3381 + ( + r"ignore:The requested field name 'pd?[xyz]' is ambiguous and corresponds " + "to any one of the following field types.*:yt._maintenance.deprecation.VisibleDeprecationWarning" + ), + # >>> warnings emitted by testing frameworks, or in testing contexts + # we still have some yield-based tests, awaiting for transition into pytest + "ignore::pytest.PytestCollectionWarning", + # imp is used in nosetest + "ignore:the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses:DeprecationWarning", + # the deprecation warning message for imp changed in Python 3.10, so we ignore both versions + "ignore:the imp module is deprecated in favour of importlib and slated for removal in Python 3.12; see the module's documentation for alternative uses:DeprecationWarning", + # matplotlib warnings related to the Agg backend which is used in CI, not much we can do about it + "ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning", + "ignore:tight_layout . falling back to Agg renderer:UserWarning", + # + # >>> warnings from wrong values passed to numpy + # these should normally be curated out of the test suite but they are too numerous + # to deal with in a reasonable time at the moment. + "ignore:invalid value encountered in log10:RuntimeWarning", + "ignore:divide by zero encountered in log10:RuntimeWarning", + "ignore:invalid value encountered in true_divide:RuntimeWarning", + # + # >>> there are many places in yt (most notably at the frontend level) + # where we open files but never explicitly close them + # Although this is in general bad practice, it can be intentional and + # justified in contexts where reading speeds should be optimized. + # It is not clear at the time of writing how to approach this, + # so I'm going to ignore this class of warnings altogether for now. + "ignore:unclosed file.*:ResourceWarning", + ): + config.addinivalue_line("filterwarnings", value) + + # at the time of writing, astropy's wheels are behind numpy's latest + # version but this doesn't cause actual problems in our test suite, so + # we allow this warning to pass. + # last checked with astropy 4.2.1 + config.addinivalue_line( + "filterwarnings", + ( + "ignore:numpy.ndarray size changed, may indicate binary incompatibility. " + "Expected 80 from C header, got 88 from PyObject:RuntimeWarning" + ), + ) + if find_spec("astropy") is not None: + # astropy triggers this warning from itself, there's not much we can do on our side + # last checked with astropy 4.2.1 + config.addinivalue_line( + "filterwarnings", "ignore::astropy.wcs.wcs.FITSFixedWarning" + ) + + if find_spec("cartopy") is not None: + # cartopy still triggers this numpy warning + # last checked with cartopy 0.19.0 + config.addinivalue_line( + "filterwarnings", + ( + "ignore:`np.float` is a deprecated alias for the builtin `float`. " + "To silence this warning, use `float` by itself. " + "Doing this will not modify any behavior and is safe. " + "If you specifically wanted the numpy scalar type, use `np.float64` here." + ":DeprecationWarning: " + ), + ) + # this warning *still* shows up on cartopy 0.19 so we'll ignore it + config.addinivalue_line( + "filterwarnings", + ( + r"ignore:The default value for the \*approx\* keyword argument to " + r"\w+ will change from True to False after 0\.18\.:UserWarning" + ), + ) + # this one could be resolved by upgrading PROJ on Jenkins, + # but there's isn't much else that can be done about it. + config.addinivalue_line( + "filterwarnings", + ( + "ignore:The Stereographic projection in Proj older than 5.0.0 incorrectly " + "transforms points when central_latitude=0. Use this projection with caution.:UserWarning" + ), + ) def pytest_collection_modifyitems(config, items): diff --git a/doc/cheatsheet.tex b/doc/cheatsheet.tex index 0a8e852c221..b3a2065e1bd 100644 --- a/doc/cheatsheet.tex +++ b/doc/cheatsheet.tex @@ -116,7 +116,6 @@ \subsection{Command Line yt} {\bf {-}{-}help} (e.g. {\bf yt render {-}{-}help}) for detailed help for that command including a list of the available flags. -\texttt{iyt}\textemdash\ Load yt and IPython. \\ \texttt{yt load} \textit{dataset} \textemdash\ Load a single dataset. \\ \texttt{yt help} \textemdash\ Print yt help information. \\ \texttt{yt stats} \textit{dataset} \textemdash\ Print stats of a dataset. \\ @@ -182,7 +181,7 @@ \subsection{YTArray} \subsection{IPython Tips} \settowidth{\MyLen}{\texttt{multicol} } These tips work if IPython has been loaded, typically either by invoking -\texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}). +\texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}). \texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a variable or function name when the \texttt{Tab} key is pressed, e.g. \textit{HaloFi}\textendash\texttt{Tab} would auto-complete to \textit{HaloFinder}. This also works with imports, e.g. \textit{from numpy.random.}\textendash\texttt{Tab} diff --git a/doc/source/about/index.rst b/doc/source/about/index.rst index 650e10b3ae3..859acd1f991 100644 --- a/doc/source/about/index.rst +++ b/doc/source/about/index.rst @@ -30,28 +30,22 @@ to the project. For a list of those members and a description of their contributions to the code, see `our members website. `_ -For an up-to-date list of everyone who has contributed to the yt codebase, -see the current `CREDITS `_ file. -For a more detailed breakup of contributions made by individual users, see out -`Open HUB page `_. - History of yt ------------- -yt was originally begun by Matthew Turk in 2007 in the course of his graduate -studies in computational astrophysics. The code was developed -as a simple data-reader and exporter for grid-based hydrodynamical simulation -data outputs from the *Enzo* code. Over the next few years, he invited -collaborators and friends to contribute and use yt. As the community grew, -so did the capabilities of yt. It is now a community-developed project with -contributions from many people, the hospitality of several institutions, and -benefiting from numerous grants. With this community-driven approach -and contributions from a sizeable population of developers, it has evolved -into a fully-featured toolkit for analysis and visualization of -multidimensional data. It relies on no proprietary software -- although it -can be and has been extended to interface with proprietary software and -libraries -- and has been designed from the ground up to enable users to be -as immersed in the data as they desire. +yt was originally created to study datasets generated by cosmological +simulations of galaxy and star formation conducted by the simulation code Enzo. +After expanding to address data output by other simulation platforms, it further +broadened to include alternate, grid-free methods of simulating -- particularly, +particles and unstructured meshes. + +With the release of yt 4.0, we are proud that the community has continued to +expand, that yt continues to participate in the broader ecosystem, and that the +development process is continuing to improve in both inclusivity and openness. + +For a more personal retrospective by the original author, Matthew Turk, you can +see this `blog post from +2017 `_. How do I contact yt? -------------------- @@ -59,7 +53,7 @@ How do I contact yt? If you have any questions about the code, please contact the `yt users email list `_. If you're having other problems, please follow the steps in -:ref:`asking-for-help`. +:ref:`asking-for-help`, particularly including Slack and GitHub issues. How do I cite yt? ----------------- @@ -87,3 +81,9 @@ entry: :: adsurl = {https://ui.adsabs.harvard.edu/abs/2011ApJS..192....9T}, adsnote = {Provided by the SAO/NASA Astrophysics Data System} } + +While this paper is somewhat out of date -- and certainly does not include the +appropriate list of authors -- we are preparing a new method paper as well as +preparing a new strategy for ensuring equal credit distribution for +contributors. Some of this work can be found at the `yt-4.0-paper +`_ repository. diff --git a/doc/source/analyzing/Particle_Trajectories.ipynb b/doc/source/analyzing/Particle_Trajectories.ipynb index dcdb5e91e71..20f97968933 100644 --- a/doc/source/analyzing/Particle_Trajectories.ipynb +++ b/doc/source/analyzing/Particle_Trajectories.ipynb @@ -79,7 +79,7 @@ "source": [ "ds = yt.load(my_fns[0])\n", "dd = ds.all_data()\n", - "indices = dd[\"particle_index\"].astype(\"int\")\n", + "indices = dd[\"all\", \"particle_index\"].astype(\"int\")\n", "print (indices)" ] }, @@ -118,8 +118,8 @@ }, "outputs": [], "source": [ - "print (trajs[\"particle_position_x\"])\n", - "print (trajs[\"particle_position_x\"].shape)" + "print (trajs[\"all\", \"particle_position_x\"])\n", + "print (trajs[\"all\", \"particle_position_x\"].shape)" ] }, { @@ -138,8 +138,8 @@ "outputs": [], "source": [ "plt.figure(figsize=(6, 6))\n", - "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n", - "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])" + "plt.plot(trajs[\"all\", \"particle_position_x\"][0], trajs[\"all\", \"particle_position_y\"][0])\n", + "plt.plot(trajs[\"all\", \"particle_position_x\"][1], trajs[\"all\", \"particle_position_y\"][1])" ] }, { @@ -158,8 +158,8 @@ "outputs": [], "source": [ "plt.figure(figsize=(6, 6))\n", - "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n", - "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])" + "plt.plot(trajs[\"all\", \"particle_velocity_x\"][0], trajs[\"all\", \"particle_velocity_y\"][0])\n", + "plt.plot(trajs[\"all\", \"particle_velocity_x\"][1], trajs[\"all\", \"particle_velocity_y\"][1])" ] }, { @@ -199,8 +199,8 @@ "source": [ "particle1 = trajs.trajectory_from_index(1)\n", "plt.figure(figsize=(6, 6))\n", - "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n", - "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])" + "plt.plot(particle1[\"all\", \"particle_time\"], particle1[\"all\", \"particle_position_x\"])\n", + "plt.plot(particle1[\"all\", \"particle_time\"], particle1[\"all\", \"particle_position_y\"])" ] }, { @@ -219,7 +219,7 @@ "outputs": [], "source": [ "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n", - "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n", + "slc = yt.SlicePlot(ds, \"x\", [(\"gas\", \"density\"), (\"gas\", \"dark_matter_density\")], center=\"max\", width=(3.0, \"Mpc\"))\n", "slc.show()" ] }, @@ -239,7 +239,7 @@ "outputs": [], "source": [ "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n", - "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]" + "indices = sp[\"all\", \"particle_index\"][sp[\"all\", \"particle_type\"] == 1]" ] }, { @@ -280,9 +280,9 @@ "source": [ "fig = plt.figure(figsize=(8.0, 8.0))\n", "ax = fig.add_subplot(111, projection='3d')\n", - "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_y\"][100], trajs[\"particle_position_z\"][100])\n", - "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_y\"][8], trajs[\"particle_position_z\"][8])\n", - "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_y\"][25], trajs[\"particle_position_z\"][25])" + "ax.plot(trajs[\"all\", \"particle_position_x\"][100], trajs[\"all\", \"particle_position_y\"][100], trajs[\"all\", \"particle_position_z\"][100])\n", + "ax.plot(trajs[\"all\", \"particle_position_x\"][8], trajs[\"all\", \"particle_position_y\"][8], trajs[\"all\", \"particle_position_z\"][8])\n", + "ax.plot(trajs[\"all\", \"particle_position_x\"][25], trajs[\"all\", \"particle_position_y\"][25], trajs[\"all\", \"particle_position_z\"][25])" ] }, { @@ -301,9 +301,9 @@ "outputs": [], "source": [ "plt.figure(figsize=(6,6))\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])" + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"all\", \"particle_position_x\"][100])\n", + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"all\", \"particle_position_x\"][8])\n", + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"all\", \"particle_position_x\"][25])" ] }, { @@ -321,7 +321,7 @@ }, "outputs": [], "source": [ - "trajs.add_fields([\"density\"])" + "trajs.add_fields([(\"gas\", \"density\")])" ] }, { @@ -340,9 +340,9 @@ "outputs": [], "source": [ "plt.figure(figsize=(6,6))\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n", - "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n", + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"gas\", \"density\"][100])\n", + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"gas\", \"density\"][8])\n", + "plt.plot(trajs[\"all\", \"particle_time\"], trajs[\"gas\", \"density\"][25])\n", "plt.yscale(\"log\")" ] }, diff --git a/doc/source/analyzing/astropy_integrations.rst b/doc/source/analyzing/astropy_integrations.rst index 28026361af7..f7ac4e0c49d 100644 --- a/doc/source/analyzing/astropy_integrations.rst +++ b/doc/source/analyzing/astropy_integrations.rst @@ -15,8 +15,7 @@ similar to that in yt. For this reason, we have implemented "round-trip" conversions between :class:`~yt.units.yt_array.YTArray` objects and AstroPy's :class:`~astropy.units.Quantity` objects. These are implemented in the :meth:`~yt.units.yt_array.YTArray.from_astropy` and -:meth:`~yt.units.yt_array.YTArray.to_astropy` methods. See -:ref:`fields_and_unit_conversion` for more information. +:meth:`~yt.units.yt_array.YTArray.to_astropy` methods. FITS Image File Reading and Writing ----------------------------------- @@ -48,7 +47,7 @@ specifically a `QTable `_. ``QTable`` is unit-aware, and can be manipulated in a number of ways and written to disk in several formats, including ASCII text or FITS -files. For more details, see :ref:`fields-astropy-export`. +files. Similarly, 1D profile objects can also be exported to AstroPy ``QTable``, optionally writing all of the profile bins or only the ones diff --git a/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb index c12bea9ae6f..5fb4334fa65 100644 --- a/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb +++ b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb @@ -41,7 +41,7 @@ "source": [ "import yt\n", "\n", - "ds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n", + "ds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", default_species_fields=\"ionized\")\n", "\n", "xray_fields = yt.add_xray_emissivity_field(ds, 0.5, 7.0, table_type='apec', metallicity=0.3)" ] @@ -127,7 +127,7 @@ }, "outputs": [], "source": [ - "ds2 = yt.load(\"D9p_500/10MpcBox_HartGal_csf_a0.500.d\")\n", + "ds2 = yt.load(\"D9p_500/10MpcBox_HartGal_csf_a0.500.d\", default_species_fields=\"ionized\")\n", "\n", "# In this case, use the redshift and cosmology from the dataset, \n", "# but in theory you could put in something different\n", diff --git a/doc/source/analyzing/domain_analysis/index.rst b/doc/source/analyzing/domain_analysis/index.rst index bce7fc2d9ce..65336a978af 100644 --- a/doc/source/analyzing/domain_analysis/index.rst +++ b/doc/source/analyzing/domain_analysis/index.rst @@ -25,6 +25,7 @@ These modules exist within yt itself. cosmology_calculator clump_finding xray_emission_fields + xray_data_README External Analysis Modules ------------------------- @@ -63,7 +64,7 @@ a new owner and a new home. If you find something in here that you'd like to bring back to life, either by adding it to :ref:`yt-astro` or as part of your own package, you are welcome to it! If you'd like any help, let us know! See -:ref:`yt_attic:attic_modules` for a list of inventory of the +:ref:`yt_attic:attic-modules` for a list of inventory of the attic. Extensions diff --git a/doc/source/analyzing/fields.rst b/doc/source/analyzing/fields.rst index 93dbcf62ff9..fb3ff04163d 100644 --- a/doc/source/analyzing/fields.rst +++ b/doc/source/analyzing/fields.rst @@ -7,31 +7,33 @@ Fields are spatially-dependent quantities associated with a parent dataset. Examples of fields are gas density, gas temperature, particle mass, etc. The fundamental way to query data in yt is to access a field, either in its raw form (by examining a data container) or a processed form (derived quantities, -projections, and so on). "Field" is something of a loaded word, as it can -refer to quantities that are defined everywhere, which we refer to as "mesh" or -"fluid" fields, or discrete points that populate the domain, traditionally -thought of as "particle" fields. The word "particle" here is gradually falling -out of favor, as these discrete fields can be any type of sparsely populated -data. +projections, aggregations, and so on). "Field" is something of a loaded word, +as it can refer to quantities that are defined everywhere, which we refer to as +"mesh" or "fluid" fields, or discrete points that populate the domain, +traditionally thought of as "particle" fields. The word "particle" here is +gradually falling out of favor, as these discrete fields can be any type of +sparsely populated data. + +.. _what-are-fields: What are fields? ---------------- Fields in yt are denoted by a two-element tuple, of the form ``(field_type, field_name)``. The first element, the "field type" is a category for a -field. Possible field types used in yt include *gas* (for fluid mesh fields -defined on a mesh) or *io* (for fields defined at particle locations). Field +field. Possible field types used in yt include ``gas`` (for fluid mesh fields +defined on a mesh) or ``io`` (for fields defined at particle locations). Field types can also correspond to distinct particle of fluid types in a single simulation. For example, a plasma physics simulation using the Particle in Cell -method might have particle types corresponding to *electrons* and *ions*. See +method might have particle types corresponding to ``electrons`` and ``ions``. See :ref:`known-field-types` below for more info about field types in yt. -The second element of field tuples, the "field name", denotes the specific field -to select, given the field type. Possible field names include *density*, -*velocity_x* or *pressure* --- these three fields are examples of field names +The second element of field tuples, the ``field_name``, denotes the specific field +to select, given the field type. Possible field names include ``density``, +``velocity_x`` or ``pressure`` --- these three fields are examples of field names that might be used for a fluid defined on a mesh. Examples of particle fields -include *particle_mass*, *particle_position*, or *particle_velocity_x*. In -general, particle field names are prefixed by "particle\_", which makes it easy +include ``particle_mass`` ``particle_position`` or ``particle_velocity_x`` In +general, particle field names are prefixed by ``particle_``, which makes it easy to distinguish between a particle field or a mesh field when no field type is provided. @@ -118,6 +120,13 @@ field, like its default units or the source code for it. Using fields to access data --------------------------- +.. warning:: + + These *specific* operations will load the entire field -- which can be + extremely memory intensive with large datasets! If you are looking to + compute quantities, see :ref:`Data-objects` for methods for computing + aggregates, averages, subsets, regriddings, etc. + The primary *use* of fields in yt is to access data from a dataset. For example, if I want to use a data object (see :ref:`Data-objects` for more detail about data objects) to access the ``('gas', 'density')`` field, one can do any of the @@ -140,10 +149,11 @@ following: density = ad[ds.fields.gas.density] The first data access example is the simplest. In that example, the field type -is inferred from the name of the field. The next two examples use the field type -explicitly, this might be necessary if there is more than one field type with a -"density" field defined in the same dataset. The third example is slightly more -verbose but is syntactically identical to the second example due to the way +is inferred from the name of the field. However, yt will complain if there are multiple +field names that could be meant by this simple string access. The next two examples +use the field type explicitly, this might be necessary if there is more than one field +type with a "density" field defined in the same dataset. The third example is slightly +more verbose but is syntactically identical to the second example due to the way indexing works in the Python language. The final example uses the ``ds.fields`` object described above. This way of @@ -240,8 +250,7 @@ aliasing process allows universally-defined derived fields to take advantage of internal names, and it also provides an easy way to address what units something should be returned in. If an aliased field is requested (and aliased fields will always be lowercase, with underscores separating words) it will be returned -in the units specified by the unit system of the database (see :ref:`unit_systems` -for a guide to using the different unit systems in yt), whereas if the +in the units specified by the unit system of the database, whereas if the frontend-specific field is requested, it will not undergo any unit conversions from its natural units. (This rule is occasionally violated for fields which are mesh-dependent, specifically particle masses in some cosmology codes.) @@ -266,8 +275,7 @@ Recall that fields are formally accessed in two parts: ('*field type*', * ``gas`` -- This is the usual default for simulation frontends for fluid types. These fields are typically aliased to the frontend-specific mesh fields for grid-based codes or to the deposit fields for particle-based - codes. Default units are in the unit system of the dataset (see - :ref:`unit_systems` for more information). + codes. Default units are in the unit system of the dataset. * particle type -- These are particle fields that exist on-disk as written by individual frontends. If the frontend designates names for these particles (i.e. particle type) those names are the field types. @@ -408,7 +416,7 @@ systems in terms of the definition of the magnetic pressure: where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically detects on a per-frontend basis what units the magnetic should be in, and allows conversion between -different magnetic field units in the different :ref:`unit systems ` as well. To +different magnetic field units in the different unit systems as well. To determine how to set up special magnetic field handling when designing a new frontend, check out :ref:`bfields-frontend`. @@ -683,8 +691,6 @@ General Particle Fields Every particle will contain both a ``particle_position`` and ``particle_velocity`` that tracks the position and velocity (respectively) in code units. -.. FIXME: Update the following sections to reflect differences in yt-4.0. - .. _deposited-particle-fields: Deposited Particle Fields diff --git a/doc/source/analyzing/filtering.rst b/doc/source/analyzing/filtering.rst index 2a142fcfb42..d65afd12c97 100644 --- a/doc/source/analyzing/filtering.rst +++ b/doc/source/analyzing/filtering.rst @@ -38,8 +38,9 @@ array a conditional. As a general example of this: .. notebook-cell:: import numpy as np + a = np.arange(5) - bigger_than_two = (a > 2) + bigger_than_two = a > 2 print("Original Array: a = \n%s" % a) print("Boolean Mask: bigger_than_two = \n%s" % bigger_than_two) print("Masked Array: a[bigger_than_two] = \n%s" % a[bigger_than_two]) @@ -52,13 +53,19 @@ set a simple mask based on the contents of one of our fields. .. notebook-cell:: import yt - ds = yt.load('Enzo_64/DD0042/data0042') + + ds = yt.load("Enzo_64/DD0042/data0042") ad = ds.all_data() - hot = ad[("gas", "temperature")].in_units('K') > 1e6 - print('Temperature of all data: ad[("gas", "temperature")] = \n%s' % ad[("gas", "temperature")]) + hot = ad["gas", "temperature"].in_units("K") > 1e6 + print( + 'Temperature of all data: ad["gas", "temperature"] = \n%s' + % ad["gas", "temperature"] + ) print("Boolean Mask: hot = \n%s" % hot) - print('Temperature of "hot" data: ad[("gas", "temperature")][hot] = \n%s' % - ad[("gas", "temperature")][hot]) + print( + 'Temperature of "hot" data: ad["gas", "temperature"][hot] = \n%s' + % ad["gas", "temperature"][hot] + ) This was a simple example, but one can make the conditionals that define a boolean mask have multiple parts, and one can stack masks together to @@ -68,12 +75,19 @@ used if you simply need to access the NumPy arrays: .. notebook-cell:: import yt - ds = yt.load('Enzo_64/DD0042/data0042') + + ds = yt.load("Enzo_64/DD0042/data0042") ad = ds.all_data() - overpressure_and_fast = (ad["pressure"] > 1e-14) & (ad["velocity_magnitude"].in_units('km/s') > 1e2) - print('Density of all data: ad[("gas", "density")] = \n%s' % ad[("gas", "density")]) - print('Density of "overpressure and fast" data: overpressure_and_fast[("gas", "density")] = \n%s' % - overpressure_and_fast[("gas", "density")]) + overpressure_and_fast = ( + (ad["gas", "pressure"] > 1e-14) & + (ad["gas", "velocity_magnitude"].in_units("km/s") > 1e2) + ) + density = ad["gas", "density"] + print('Density of all data: ad["gas", "density"] = \n%s' % density) + print( + 'Density of "overpressure and fast" data: overpressure_and_fast["gas", "density"] = \n%s' + % density[overpressure_and_fast] + ) .. _cut-regions: @@ -94,15 +108,20 @@ filtering out unwanted regions. Such wrapper functions are methods of .. notebook-cell:: import yt - ds = yt.load('Enzo_64/DD0042/data0042') + + ds = yt.load("Enzo_64/DD0042/data0042") ad = ds.all_data() overpressure_and_fast = ad.include_above(("gas", "pressure"), 1e-14) # You can chain include_xx and exclude_xx to produce the intersection of cut regions - overpressure_and_fast = overpressure_and_fast.include_above(("gas", "velocity_magnitude"), 1e2, 'km/s') + overpressure_and_fast = overpressure_and_fast.include_above( + ("gas", "velocity_magnitude"), 1e2, "km/s" + ) - print('Density of all data: ad[("gas", "density")] = \n%s' % ad[("gas", density")]) - print('Density of "overpressure and fast" data: overpressure_and_fast[("gas", "density")] = \n%s' % - overpressure_and_fast[("gas", "density")]) + print('Density of all data: ad["gas", "density"] = \n%s' % ad["gas", "density"]) + print( + 'Density of "overpressure and fast" data: overpressure_and_fast["gas", "density"] = \n%s' + % overpressure_and_fast["gas", "density"] + ) The following exclude and include functions are supported: - :func:`~yt.data_objects.data_containers.YTSelectionContainer3D.include_equal` - Only include values equal to given value @@ -284,19 +303,22 @@ distributed throughout the dataset. .. notebook-cell:: import yt - ds = yt.load('Enzo_64/DD0042/data0042') + + ds = yt.load("Enzo_64/DD0042/data0042") center = [0.20, 0.50, 0.10] - sp = ds.sphere(center, (10, 'Mpc')) - prj = yt.ProjectionPlot(ds, "x", "density", center=center, width=(50, "Mpc"), - data_source=sp) + sp = ds.sphere(center, (10, "Mpc")) + prj = yt.ProjectionPlot( + ds, "x", ("gas", "density"), center=center, width=(50, "Mpc"), data_source=sp + ) # Mark the center with a big X - prj.annotate_marker(center, 'x', plot_args={'s':100}) + prj.annotate_marker(center, "x", plot_args={"s": 100}) prj.show() - slc = yt.SlicePlot(ds, "x", "density", center=center, width=(50, "Mpc"), - data_source=sp) + slc = yt.SlicePlot( + ds, "x", ("gas", "density"), center=center, width=(50, "Mpc"), data_source=sp + ) slc.show() diff --git a/doc/source/analyzing/generating_processed_data.rst b/doc/source/analyzing/generating_processed_data.rst index 7ef1c5b4170..541a98a86b1 100644 --- a/doc/source/analyzing/generating_processed_data.rst +++ b/doc/source/analyzing/generating_processed_data.rst @@ -23,7 +23,7 @@ To export to a :class:`~pandas.DataFrame`, use .. code-block:: python sp = ds.sphere("c", (0.2, "unitary")) - df2 = sp.to_dataframe(["density", "temperature"]) + df2 = sp.to_dataframe([("gas", "density"), ("gas", "temperature")]) To export to a :class:`~astropy.table.QTable`, use :meth:`~yt.data_objects.data_containers.YTDataContainer.to_astropy_table`: @@ -31,7 +31,7 @@ To export to a :class:`~astropy.table.QTable`, use .. code-block:: python sp = ds.sphere("c", (0.2, "unitary")) - at2 = sp.to_astropy_table(fields=["density", "temperature"]) + at2 = sp.to_astropy_table(fields=[("gas", "density"), ("gas", "temperature")]) For exports to :class:`~pandas.DataFrame` objects, the unit information is lost, but for exports to :class:`~astropy.table.QTable` objects, the :class:`~yt.units.yt_array.YTArray` @@ -85,9 +85,12 @@ The buffer arrays can be saved out to disk in either HDF5 or FITS format: .. code-block:: python - frb.save_as_dataset("my_images.h5", fields=["density", "temperature"]) + frb.save_as_dataset("my_images.h5", fields=[("gas", "density"), ("gas", "temperature")]) frb.export_fits( - "my_images.fits", fields=["density", "temperature"], clobber=True, units="kpc" + "my_images.fits", + fields=[("gas", "density"), ("gas", "temperature")], + clobber=True, + units="kpc", ) In the HDF5 case, the created file can be reloaded just like a regular dataset with @@ -101,7 +104,9 @@ as a 2D dataset itself, which may be operated on in the same way as any other da .. code-block:: python - ds_frb = frb.export_dataset(fields=["density", "temperature"], nprocs=8) + ds_frb = frb.export_dataset( + fields=[("gas", "density"), ("gas", "temperature")], nprocs=8 + ) sp = ds_frb.sphere("c", (100.0, "kpc")) where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids. @@ -213,7 +218,7 @@ whether to use a log or linear scale, and whether or not to do accumulation to create a cumulative distribution function. For more information, see the API documentation on the :func:`~yt.data_objects.profiles.create_profile` function. -For custom bins the other keyword arguments can be overriden using the +For custom bins the other keyword arguments can be overridden using the ``override_bins`` keyword argument. This accepts a dictionary with an array for each bin field or ``None`` to use the default settings. @@ -243,7 +248,7 @@ itself. If you only want to export the bins which are used, set ``only_used=True # Only adds the used bins to the DataFrame df_used = profile.to_dataframe(only_used=True) # Only adds the density and temperature fields - df2 = profile.to_dataframe(fields=["density", "temperature"]) + df2 = profile.to_dataframe(fields=[("gas", "density"), ("gas", "temperature")]) The :class:`~pandas.DataFrame` can then analyzed and/or written to disk using pandas methods. Note that unit information is lost in this export. @@ -270,7 +275,7 @@ To export the 1D profile to a Table object, simply call # Only adds the used bins to the Table t_used = profile.to_astropy_table(only_used=True) # Only adds the density and temperature fields - t2 = profile.to_astropy_table(fields=["density", "temperature"]) + t2 = profile.to_astropy_table(fields=[("gas", "density"), ("gas", "temperature")]) .. _generating-line-queries: @@ -288,7 +293,7 @@ along that ray: .. code-block:: python ray = ds.ray((0.3, 0.5, 0.9), (0.1, 0.8, 0.5)) - print(ray["density"]) + print(ray["gas", "density"]) The points are not ordered, so you may need to sort the data (see the example in the @@ -331,6 +336,6 @@ interoperability with anything that can take xarray data. The classes that can .. code-block:: python grid = ds.r[::256j, ::256j, ::256j] - obj = grid.to_xarray(fields=["density", "temperature"]) + obj = grid.to_xarray(fields=[("gas", "density"), ("gas", "temperature")]) -The returned object, ``obj``, will now have the correct labeled axes and so forth. +The returned object, ``obj``, will now have the correct labelled axes and so forth. diff --git a/doc/source/analyzing/mesh_filter.ipynb b/doc/source/analyzing/mesh_filter.ipynb index 5f9f9a10ed8..0c79ddfabf5 100644 --- a/doc/source/analyzing/mesh_filter.ipynb +++ b/doc/source/analyzing/mesh_filter.ipynb @@ -33,12 +33,12 @@ "outputs": [], "source": [ "ad = ds.all_data()\n", - "hot_ad = ad.cut_region([\"obj['temperature'] > 1e6\"])\n", - "dense_ad = ad.cut_region(['obj[\"density\"] > 5e-30'])\n", + "hot_ad = ad.cut_region(['obj[\"gas\", \"temperature\"] > 1e6'])\n", + "dense_ad = ad.cut_region(['obj[\"gas\", \"density\"] > 5e-30'])\n", "\n", "# you can chain cut regions in two ways:\n", - "dense_and_cool_ad = dense_ad.cut_region([\"obj['temperature'] < 1e5\"])\n", - "overpressure_and_fast_ad = ad.cut_region(['(obj[\"pressure\"] > 1e-14) & (obj[\"velocity_magnitude\"].in_units(\"km/s\") > 1e2)'])" + "dense_and_cool_ad = dense_ad.cut_region(['obj[\"gas\", \"temperature\"] < 1e5'])\n", + "overpressure_and_fast_ad = ad.cut_region(['(obj[\"gas\", \"pressure\"] > 1e-14) & (obj[\"gas\", \"velocity_magnitude\"].in_units(\"km/s\") > 1e2)'])" ] }, { @@ -55,13 +55,13 @@ "outputs": [], "source": [ "ad = ds.all_data()\n", - "hot_ad = ad.include_above('temperature', 1e6)\n", - "dense_ad = ad.include_above('density', 5e-30)\n", + "hot_ad = ad.include_above((\"gas\", \"temperature\"), 1e6)\n", + "dense_ad = ad.include_above((\"gas\", \"density\"), 5e-30)\n", "\n", "# These can be chained as well\n", - "dense_and_cool_ad = dense_ad.include_below('temperature', 1e5)\n", - "overpressure_and_fast_ad = ad.include_above('pressure', 1e-14)\n", - "overpressure_and_fast_ad = overpressure_and_fast_ad.include_above('velocity_magnitude', 1e2, 'km/s')" + "dense_and_cool_ad = dense_ad.include_below((\"gas\", \"temperature\"), 1e5)\n", + "overpressure_and_fast_ad = ad.include_above((\"gas\", \"pressure\"), 1e-14)\n", + "overpressure_and_fast_ad = overpressure_and_fast_ad.include_above((\"gas\", \"velocity_magnitude\"), 1e2, 'km/s')" ] }, { @@ -77,8 +77,8 @@ "metadata": {}, "outputs": [], "source": [ - "print (\"Temperature of all cells:\\n ad['temperature'] = \\n%s\\n\" % ad[\"temperature\"])\n", - "print (\"Temperatures of all \\\"hot\\\" cells:\\n hot_ad['temperature'] = \\n%s\" % hot_ad['temperature'])" + "print (\"Temperature of all cells:\\n ad['temperature'] = \\n%s\\n\" % ad[\"gas\", \"temperature\"])\n", + "print (\"Temperatures of all \\\"hot\\\" cells:\\n hot_ad['temperature'] = \\n%s\" % hot_ad[\"gas\", \"temperature\"])" ] }, { @@ -87,8 +87,8 @@ "metadata": {}, "outputs": [], "source": [ - "print (\"Density of dense, cool material:\\n dense_and_cool_ad['density'] = \\n%s\\n\" % dense_and_cool_ad['density'])\n", - "print (\"Temperature of dense, cool material:\\n dense_and_cool_ad['temperature'] = \\n%s\" % dense_and_cool_ad['temperature'])" + "print (\"Density of dense, cool material:\\n dense_and_cool_ad['density'] = \\n%s\\n\" % dense_and_cool_ad[\"gas\", \"density\"])\n", + "print (\"Temperature of dense, cool material:\\n dense_and_cool_ad['temperature'] = \\n%s\" % dense_and_cool_ad[\"gas\", \"temperature\"])" ] }, { @@ -106,14 +106,14 @@ "metadata": {}, "outputs": [], "source": [ - "proj1 = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\")\n", + "proj1 = yt.ProjectionPlot(ds, 'x', (\"gas\", \"density\"), weight_field=(\"gas\", \"density\"))\n", "proj1.annotate_title('No Cuts')\n", "proj1.set_figure_size(5)\n", "proj1.show()\n", "\n", - "proj2 = yt.ProjectionPlot(ds, 'x', \"density\", weight_field=\"density\", data_source=hot_ad)\n", + "proj2 = yt.ProjectionPlot(ds, 'x', (\"gas\", \"density\"), weight_field=(\"gas\", \"density\"), data_source=hot_ad)\n", "proj2.annotate_title('Hot Gas')\n", - "proj2.set_zlim(\"density\", 3e-31, 3e-27)\n", + "proj2.set_zlim((\"gas\", \"density\"), 3e-31, 3e-27)\n", "proj2.set_figure_size(5)\n", "proj2.show()" ] @@ -136,14 +136,14 @@ "metadata": {}, "outputs": [], "source": [ - "slc1 = yt.SlicePlot(ds, 'x', \"density\", center='m')\n", - "slc1.set_zlim('density', 3e-31, 3e-27)\n", + "slc1 = yt.SlicePlot(ds, 'x', (\"gas\", \"density\"), center='m')\n", + "slc1.set_zlim((\"gas\", 'density'), 3e-31, 3e-27)\n", "slc1.annotate_title('No Cuts')\n", "slc1.set_figure_size(5)\n", "slc1.show()\n", "\n", - "slc2 = yt.SlicePlot(ds, 'x', \"density\", center='m', data_source=dense_ad)\n", - "slc2.set_zlim('density', 3e-31, 3e-27)\n", + "slc2 = yt.SlicePlot(ds, 'x', (\"gas\", \"density\"), center='m', data_source=dense_ad)\n", + "slc2.set_zlim((\"gas\", 'density'), 3e-31, 3e-27)\n", "slc2.annotate_title('Dense Gas')\n", "slc2.set_figure_size(5)\n", "slc2.show()" @@ -155,13 +155,13 @@ "metadata": {}, "outputs": [], "source": [ - "ph1 = yt.PhasePlot(ad, 'density', 'temperature', 'mass', weight_field=None)\n", + "ph1 = yt.PhasePlot(ad, (\"gas\", \"density\"), (\"gas\", \"temperature\"), (\"gas\", \"mass\"), weight_field=None)\n", "ph1.set_xlim(3e-31, 3e-27)\n", "ph1.annotate_title('No Cuts')\n", "ph1.set_figure_size(5)\n", "ph1.show()\n", "\n", - "ph1 = yt.PhasePlot(dense_ad, 'density', 'temperature', 'mass', weight_field=None)\n", + "ph1 = yt.PhasePlot(dense_ad, (\"gas\", \"density\"), (\"gas\", \"temperature\"), (\"gas\", \"mass\"), weight_field=None)\n", "ph1.set_xlim(3e-31, 3e-27)\n", "ph1.annotate_title('Dense Gas')\n", "ph1.set_figure_size(5)\n", diff --git a/doc/source/analyzing/objects.rst b/doc/source/analyzing/objects.rst index 760fbdfe707..350c76b6448 100644 --- a/doc/source/analyzing/objects.rst +++ b/doc/source/analyzing/objects.rst @@ -58,15 +58,20 @@ dataset you could: sp = ds.sphere([0.5, 0.5, 0.5], (1, "kpc")) # Show all temperature values - print(sp["temperature"]) + print(sp["gas", "temperature"]) # Print things in a more human-friendly manner: one temperature at a time print("(x, y, z) Temperature") print("-----------------------") - for i in range(sp["temperature"].size): + for i in range(sp["gas", "temperature"].size): print( "(%f, %f, %f) %f" - % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i]) + % ( + sp["gas", "x"][i], + sp["gas", "y"][i], + sp["gas", "z"][i], + sp["gas", "temperature"][i], + ) ) Data objects can also be cloned; for instance: @@ -103,7 +108,7 @@ on the ``.r`` object, like so: .. code-block:: python ds = yt.load("RedshiftOutput0005") - rho = ds.r["density"] + rho = ds.r["gas", "density"] This will return a *flattened* array of data. The region expression object (``r``) doesn't have any derived quantities on it. This is completely @@ -113,7 +118,7 @@ equivalent to this set of statements: ds = yt.load("RedshiftOutput0005") dd = ds.all_data() - rho = dd["density"] + rho = dd["gas", "density"] .. warning:: @@ -566,54 +571,58 @@ after ``max`` will be considerably faster. Here is an example. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") reg = ds.r[0.3:0.6, 0.2:0.4, 0.9:0.95] - min_rho = reg.min("density") - max_rho = reg.max("density") + min_rho = reg.min(("gas", "density")) + max_rho = reg.max(("gas", "density")) This is equivalent to: .. code-block:: python - min_rho, max_rho = reg.quantities.extrema("density") + min_rho, max_rho = reg.quantities.extrema(("gas", "density")) The ``max`` operation can also compute the maximum intensity projection: .. code-block:: python - proj = reg.max("density", axis="x") + proj = reg.max(("gas", "density"), axis="x") proj.plot() This is equivalent to: .. code-block:: python - proj = ds.proj("density", "x", data_source=reg, method="mip") + proj = ds.proj(("gas", "density"), "x", data_source=reg, method="mip") proj.plot() The ``min`` operator does not do this, however, as a minimum intensity projection is not currently implemented. -You can also compute the ``mean`` value, which accepts a field, axis and wight +You can also compute the ``mean`` value, which accepts a field, axis and weight function. If the axis is not specified, it will return the average value of the specified field, weighted by the weight argument. The weight argument defaults to ``ones``, which performs an arithmetic average. For instance: .. code-block:: python - mean_rho = reg.mean("density") - rho_by_vol = reg.mean("density", weight="cell_volume") + mean_rho = reg.mean(("gas", "density")) + rho_by_vol = reg.mean(("gas", "density"), weight=("gas", "cell_volume")) This is equivalent to: .. code-block:: python - mean_rho = reg.quantities.weighted_average("density", weight_field="ones") - rho_by_vol = reg.quantities.weighted_average("density", weight_field="cell_volume") + mean_rho = reg.quantities.weighted_average( + ("gas", "density"), weight_field=("index", "ones") + ) + rho_by_vol = reg.quantities.weighted_average( + ("gas", "density"), weight_field=("gas", "cell_volume") + ) If an axis is provided, it will project along that axis and return it to you: .. code-block:: python - rho_proj = reg.mean("temperature", axis="y", weight="density") + rho_proj = reg.mean(("gas", "temperature"), axis="y", weight=("gas", "density")) rho_proj.plot() The ``sum`` function will add all the values in the data object. It accepts a @@ -622,7 +631,7 @@ the values in the object: .. code-block:: python - vol = reg.sum("cell_volume") + vol = reg.sum(("gas", "cell_volume")) If the axis is specified, it will compute a projection using the method ``sum`` (which does *not* take into account varying path length!) and return that to @@ -630,7 +639,7 @@ you. .. code-block:: python - cell_count = reg.sum("ones", axis="z") + cell_count = reg.sum(("index", "ones"), axis="z") cell_count.plot() To compute a projection where the path length *is* taken into account, you can @@ -638,7 +647,7 @@ use the ``integrate`` function: .. code-block:: python - proj = reg.integrate("density", "x") + proj = reg.integrate(("gas", "density"), "x") All of these projections supply the data object as their base input. @@ -648,14 +657,14 @@ this. .. code-block:: python - reg.argmin("density", axis="temperature") + reg.argmin(("gas", "density"), axis=("gas", "temperature")) This will return the temperature at the minimum density. If you don't specify an ``axis``, it will return the spatial position of the maximum value of the queried field. Here is an example:: - x, y, z = reg.argmin("density") + x, y, z = reg.argmin(("gas", "density")) Available Derived Quantities ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -851,7 +860,9 @@ whether or not to conduct it in log space. .. code-block:: python sp = ds.sphere("max", (1.0, "pc")) - contour_values, connected_sets = sp.extract_connected_sets("density", 3, 1e-30, 1e-20) + contour_values, connected_sets = sp.extract_connected_sets( + ("gas", "density"), 3, 1e-30, 1e-20 + ) The first item, ``contour_values``, will be an array of the min value for each set of level sets. The second (``connected_sets``) will be a dict of dicts. diff --git a/doc/source/analyzing/parallel_computation.rst b/doc/source/analyzing/parallel_computation.rst index 8140e54ec88..8b0512dd920 100644 --- a/doc/source/analyzing/parallel_computation.rst +++ b/doc/source/analyzing/parallel_computation.rst @@ -19,8 +19,7 @@ Currently, yt is able to perform the following actions in parallel: * Slices (:ref:`slice-plots`) * Cutting planes (oblique slices) (:ref:`off-axis-slices`) * Covering grids (:ref:`examining-grid-data-in-a-fixed-resolution-array`) -* Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`, - :ref:`derived-quantities`) +* Derived Quantities (total mass, angular momentum, etc) * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`) * Halo analysis (:ref:`halo-analysis`) * Volume rendering (:ref:`volume_rendering`) @@ -41,7 +40,7 @@ mpi4py website, but you may have luck by just running: .. code-block:: bash - $ pip install mpi4py + $ python -m pip install mpi4py If you have an Anaconda installation of yt and there is no MPI library on the system you are using try: @@ -99,9 +98,9 @@ in the simulation and then makes a plot of the projected density: yt.enable_parallelism() ds = yt.load("RD0035/RedshiftOutput0035") - v, c = ds.find_max("density") + v, c = ds.find_max(("gas", "density")) print(v, c) - p = yt.ProjectionPlot(ds, "x", "density") + p = yt.ProjectionPlot(ds, "x", ("gas", "density")) p.save() If this script is run in parallel, two of the most expensive operations - @@ -151,8 +150,8 @@ so: yt.enable_parallelism() ds = yt.load("RD0035/RedshiftOutput0035") - v, c = ds.find_max("density") - p = yt.ProjectionPlot(ds, "x", "density") + v, c = ds.find_max(("gas", "density")) + p = yt.ProjectionPlot(ds, "x", ("gas", "density")) if yt.is_root(): print(v, c) p.save() @@ -179,8 +178,8 @@ how to use it: ds = yt.load("RD0035/RedshiftOutput0035") - v, c = ds.find_max("density") - p = yt.ProjectionPlot(ds, "x", "density") + v, c = ds.find_max(("gas", "density")) + p = yt.ProjectionPlot(ds, "x", ("gas", "density")) yt.only_on_root(print_and_save_plot, v, c, plot, verbose=True) Types of Parallelism @@ -339,10 +338,10 @@ processors (or cores). Please see this heavily-commented example: # This copies fn and the min/max of density to the local copy of # my_storage sto.result_id = fn - sto.result = dd.quantities.extrema("density") + sto.result = dd.quantities.extrema(("gas", "density")) # Makes and saves a plot of the gas density. - p = yt.ProjectionPlot(ds, "x", "density") + p = yt.ProjectionPlot(ds, "x", ("gas", "density")) p.save() # At this point, as the loop exits, the local copies of my_storage are @@ -472,8 +471,8 @@ separate processor. for ax in yt.parallel_objects("xyz", njobs=3): # project each field with one of the two cores in the workgroup - for field in yt.parallel_objects(["density", "temperature"]): - p = yt.ProjectionPlot(ds, ax, field, weight_field="density") + for field in yt.parallel_objects([("gas", "density"), ("gas", "temperature")]): + p = yt.ProjectionPlot(ds, ax, field, weight_field=("gas", "density")) p.save("figures/") Note, in the above example, if the inner diff --git a/doc/source/analyzing/saving_data.rst b/doc/source/analyzing/saving_data.rst index 2fb2f02ef5b..6ceeb351beb 100644 --- a/doc/source/analyzing/saving_data.rst +++ b/doc/source/analyzing/saving_data.rst @@ -27,16 +27,17 @@ Geometric Data Containers ------------------------- Data from geometric data containers can be saved with the -:func:`~yt.data_objects.data_containers.save_as_dataset`` function. +:func:`~yt.data_objects.data_containers.YTDataContainer.save_as_dataset` function. .. notebook-cell:: import yt + ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") - sphere = ds.sphere([0.5]*3, (10, "Mpc")) - fn = sphere.save_as_dataset(fields=["density", "particle_mass"]) - print (fn) + sphere = ds.sphere([0.5] * 3, (10, "Mpc")) + fn = sphere.save_as_dataset(fields=[("gas", "density"), ("all", "particle_mass")]) + print(fn) This function will return the name of the file to which the dataset was saved. The filename will be a combination of the name of the @@ -97,7 +98,7 @@ containers. .. code-block:: python cg = ds.covering_grid(level=0, left_edge=[0.25] * 3, dims=[16] * 3) - fn = cg.save_as_dataset(fields=["density", "particle_mass"]) + fn = cg.save_as_dataset(fields=[("gas", "density"), ("all", "particle_mass")]) cg_ds = yt.load(fn) ad = cg_ds.all_data() @@ -114,11 +115,11 @@ Fixed resolution buffers work just the same. .. code-block:: python - my_proj = ds.proj("density", "x", weight_field="density") + my_proj = ds.proj(("gas", "density"), "x", weight_field=("gas", "density")) frb = my_proj.to_frb(1.0, (800, 800)) - fn = frb.save_as_dataset(fields=["density"]) + fn = frb.save_as_dataset(fields=[("gas", "density")]) frb_ds = yt.load(fn) - print(frb_ds.data["density"]) + print(frb_ds.data["gas", "density"]) .. _saving-spatial-plots: @@ -130,7 +131,7 @@ Spatial plots, such as projections, slices, and off-axis slices .. code-block:: python - proj = ds.proj("density", "x", weight_field="density") + proj = ds.proj(("gas", "density"), "x", weight_field=("gas", "density")) proj.save_as_dataset() Once reloaded, they can be handed to their associated plotting @@ -139,7 +140,7 @@ functions to make images. .. code-block:: python proj_ds = yt.load("DD0046_proj.h5") - p = yt.ProjectionPlot(proj_ds, "x", "density", weight_field="density") + p = yt.ProjectionPlot(proj_ds, "x", ("gas", "density"), weight_field=("gas", "density")) p.save() .. _saving-profile-data: @@ -161,20 +162,20 @@ accessed through the ``.data`` attribute. ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") ad = ds.all_data() - profile_2d = yt.create_profile(ad, ["density", "temperature"], - "mass", weight_field=None, + profile_2d = yt.create_profile(ad, [("gas", "density"), ("gas", "temperature")], + ("gas", "mass"), weight_field=None, n_bins=(128, 128)) profile_2d.save_as_dataset() prof_2d_ds = yt.load("DD0046_Profile2D.h5") - print (prof_2d_ds.data["mass"]) + print (prof_2d_ds.data["gas", "mass"]) The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D arrays with "x", "y", and "z". .. code-block:: python - print(prof_2d_ds.data["x"]) + print(prof_2d_ds.data["gas", "x"]) The bin fields can also be returned with the same shape as the profile data by accessing them with their original names. This allows for @@ -183,7 +184,7 @@ boolean masking of profile data using the bin fields. .. code-block:: python # density is the x bin field - print(prof_2d_ds.data["density"]) + print(prof_2d_ds.data["gas", "density"]) For 1, 2, and 3D profile datasets, a fake profile object will be constructed by accessing the ".profile" attribute. This is used @@ -193,7 +194,13 @@ primarily in the case of 1 and 2D profiles to create figures using .. code-block:: python - p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature", "mass", weight_field=None) + p = yt.PhasePlot( + prof_2d_ds.data, + ("gas", "density"), + ("gas", "temperature"), + ("gas", "mass"), + weight_field=None, + ) p.save() .. _saving-array-data: @@ -215,8 +222,8 @@ selection is not possible, but the data can be accessed through the region = ds.box([0.25]*3, [0.75]*3) sphere = ds.sphere(ds.domain_center, (10, "Mpc")) my_data = {} - my_data["region_density"] = region["density"] - my_data["sphere_density"] = sphere["density"] + my_data["region_density"] = region["gas", "density"] + my_data["sphere_density"] = sphere["gas", "density"] yt.save_as_dataset(ds, "test_data.h5", my_data) array_ds = yt.load("test_data.h5") @@ -238,4 +245,4 @@ dictionary. yt.save_as_dataset(fake_ds, "random_data.h5", my_data) new_ds = yt.load("random_data.h5") - print (new_ds.data["density"]) + print (new_ds.data["gas", "density"]) diff --git a/doc/source/analyzing/time_series_analysis.rst b/doc/source/analyzing/time_series_analysis.rst index 077306c59a2..9f4db0fc2a1 100644 --- a/doc/source/analyzing/time_series_analysis.rst +++ b/doc/source/analyzing/time_series_analysis.rst @@ -117,7 +117,7 @@ After this, time series analysis can be done normally. for ds in my_sim.piter(): all_data = ds.all_data() - print(all_data.quantities.extrema("density")) + print(all_data.quantities.extrema(("gas", "density"))) Additional keywords can be given to :meth:`frontends.enzo.simulation_handling.EnzoSimulation.get_time_series` diff --git a/doc/source/analyzing/units.rst b/doc/source/analyzing/units.rst index 957b64d36a3..b4cafb9f169 100644 --- a/doc/source/analyzing/units.rst +++ b/doc/source/analyzing/units.rst @@ -160,6 +160,9 @@ about the dataset's code unit system and can convert data into it. Unit objects from ``unyt`` or ``yt.units`` will not know about any particular dataset's unit system. + +.. _cosmological-units: + Comoving units for Cosmological Simulations ------------------------------------------- diff --git a/doc/source/conf.py b/doc/source/conf.py index 569e9aa79dd..67fcd4e67c2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,8 +47,8 @@ extensions.append("pythonscript_sphinxext") try: - import nbconvert # NOQA: F401 - import RunNotebook # NOQA: F401 + import nbconvert # noqa: F401 + import RunNotebook # noqa: F401 if not on_rtd: extensions.append("RunNotebook.notebook_sphinxext") @@ -70,16 +70,16 @@ # General information about the project. project = "The yt Project" -copyright = "2013-2020, the yt Project" +copyright = "2013-2021, the yt Project" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = "4.0-dev" +version = "4.1-dev" # The full version, including alpha/beta/rc tags. -release = "4.0-dev" +release = "4.1-dev" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -130,6 +130,7 @@ bootstrap_version="3", bootswatch_theme="readable", navbar_links=[ + ("", ""), # see https://github.com/yt-project/yt/pull/3423 ("How to get help", "help/index"), ("Quickstart notebooks", "quickstart/index"), ("Cookbook", "cookbook/index"), @@ -253,7 +254,7 @@ intersphinx_mapping = { "https://docs.python.org/3/": None, "https://ipython.readthedocs.io/en/stable/": None, - "https://docs.scipy.org/doc/numpy/": None, + "https://numpy.org/doc/stable/": None, "https://matplotlib.org/stable/": None, "https://docs.astropy.org/en/stable": None, "https://pandas.pydata.org/pandas-docs/stable": None, @@ -265,7 +266,6 @@ if not on_rtd: autosummary_generate = glob.glob("reference/api/api.rst") -# as of Sphinx/1.6.1 this is the supported way to link custom style sheets -# see: https://github.com/ryan-roemer/sphinx-bootstrap-theme#adding-custom-css +# as of Sphinx 3.1.2 this is the supported way to link custom style sheets def setup(app): - app.add_stylesheet("custom.css") + app.add_css_file("custom.css") diff --git a/doc/source/cookbook/amrkdtree_downsampling.py b/doc/source/cookbook/amrkdtree_downsampling.py index fd3888172f4..7bb21b62cd8 100644 --- a/doc/source/cookbook/amrkdtree_downsampling.py +++ b/doc/source/cookbook/amrkdtree_downsampling.py @@ -60,7 +60,7 @@ tf.grey_opacity = True sc.save("v3.png", sigma_clip=6.0) # -## That seemed to pick out som interesting structures. Now let's bump up the +## That seemed to pick out some interesting structures. Now let's bump up the ## opacity. # tf.clear() diff --git a/doc/source/cookbook/complex_plots.rst b/doc/source/cookbook/complex_plots.rst index f47a49c1d06..ea8c4c6e9d6 100644 --- a/doc/source/cookbook/complex_plots.rst +++ b/doc/source/cookbook/complex_plots.rst @@ -65,7 +65,7 @@ matter. On the other hand, increasing these without increasing ``buff_size`` accordingly will simply blow up your resolution elements to fill several real pixels. -4. (only for meshed particle data) ``n_ref``, the maximum nubmer of +4. (only for meshed particle data) ``n_ref``, the maximum number of particles in a cell in the oct-tree allowed before it is refined (removed in yt-4.0 as particle data is no longer deposited onto an oct-tree). For particle data, ``n_ref`` effectively sets the @@ -421,26 +421,6 @@ sources. .. yt_cookbook:: vol-lines.py -.. _cookbook-opengl_vr: - -Advanced Interactive Data Visualization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This recipe demonstrates how to manually create all components required to -start the Interactive Data Visualization. For more information see -:ref:`interactive_data_visualization`. - -.. yt_cookbook:: opengl_vr.py - -Embedding Interactive Data Visualization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This recipe demonstrates how to embed the Interactive Data Visualization inside -the Jupyter notebook. For more information see -:ref:`interactive_data_visualization`. - -.. yt_cookbook:: opengl_ipython.py - Plotting Streamlines ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/cookbook/fits_radio_cubes.ipynb b/doc/source/cookbook/fits_radio_cubes.ipynb index 665ed3c8a8d..82db0ac4ba4 100644 --- a/doc/source/cookbook/fits_radio_cubes.ipynb +++ b/doc/source/cookbook/fits_radio_cubes.ipynb @@ -61,7 +61,7 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"z\", [\"gas\", \"intensity\"], origin=\"native\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"fits\", \"intensity\"), origin=\"native\")\n", "slc.show()" ] }, @@ -137,7 +137,7 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"z\", [(\"gas\", \"intensity\")], center=new_center, origin=\"native\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"fits\", \"intensity\"), center=new_center, origin=\"native\")\n", "slc.show()" ] }, @@ -157,7 +157,7 @@ "outputs": [], "source": [ "new_center[2] = ds.spec2pixel(-100000.*u.m/u.s)\n", - "slc = yt.SlicePlot(ds, \"z\", [(\"gas\", \"intensity\")], center=new_center, origin=\"native\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"fits\", \"intensity\"), center=new_center, origin=\"native\")\n", "slc.show()" ] }, @@ -170,7 +170,7 @@ "outputs": [], "source": [ "new_center[2] = ds.spec2pixel(-150000.*u.m/u.s)\n", - "slc = yt.SlicePlot(ds, \"z\", [(\"gas\", \"intensity\")], center=new_center, origin=\"native\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"fits\", \"intensity\"), center=new_center, origin=\"native\")\n", "slc.show()" ] }, @@ -196,7 +196,7 @@ }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", [(\"gas\", \"intensity\")], origin=\"native\")\n", + "prj = yt.ProjectionPlot(ds, \"z\", (\"fits\", \"intensity\"), origin=\"native\")\n", "prj.show()" ] }, @@ -215,7 +215,7 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"x\", [(\"gas\", \"intensity\")], origin=\"native\", window_size=(8,8))\n", + "slc = yt.SlicePlot(ds, \"x\", (\"fits\", \"intensity\"), origin=\"native\", window_size=(8,8))\n", "slc.show()" ] }, @@ -227,7 +227,7 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", window_size=(8,8))\n", + "slc = yt.SlicePlot(ds, \"y\", (\"fits\", \"intensity\"), origin=\"native\", window_size=(8,8))\n", "slc.show()" ] }, @@ -279,7 +279,7 @@ "outputs": [], "source": [ "dd = ds.all_data() # A region containing the entire dataset\n", - "extrema = dd.quantities.extrema((\"gas\", \"temperature\"))\n", + "extrema = dd.quantities.extrema((\"fits\", \"temperature\"))\n", "print (extrema)" ] }, @@ -298,9 +298,9 @@ }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", (\"gas\", \"temperature\"), origin=\"native\", \n", + "prj = yt.ProjectionPlot(ds, \"z\", (\"fits\", \"temperature\"), origin=\"native\", \n", " weight_field=(\"index\", \"ones\")) # \"ones\" weights each cell by 1\n", - "prj.set_log((\"gas\", \"temperature\"), True)\n", + "prj.set_log((\"fits\", \"temperature\"), True)\n", "prj.show()" ] }, @@ -319,7 +319,7 @@ }, "outputs": [], "source": [ - "pplot = yt.ProfilePlot(dd, (\"gas\", \"temperature\"), [(\"index\", \"ones\")], weight_field=None, n_bins=128)\n", + "pplot = yt.ProfilePlot(dd, (\"fits\", \"temperature\"), [(\"index\", \"ones\")], weight_field=None, n_bins=128)\n", "pplot.show()" ] }, @@ -338,7 +338,7 @@ }, "outputs": [], "source": [ - "fc = dd.cut_region([\"obj['gas', 'temperature'] > 0\"])" + "fc = dd.cut_region(['obj[\"fits\", \"temperature\"] > 0'])" ] }, { @@ -356,7 +356,7 @@ }, "outputs": [], "source": [ - "print (fc.quantities.extrema((\"gas\", \"temperature\")))" + "print (fc.quantities.extrema((\"fits\", \"temperature\")))" ] }, { @@ -374,7 +374,7 @@ }, "outputs": [], "source": [ - "fc.quantities.weighted_average_quantity((\"gas\", \"temperature\"), (\"index\", \"ones\"))" + "fc.quantities.weighted_average_quantity((\"fits\", \"temperature\"), (\"index\", \"ones\"))" ] }, { @@ -392,9 +392,9 @@ }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", [(\"gas\", \"temperature\")], data_source=fc, origin=\"native\", \n", + "prj = yt.ProjectionPlot(ds, \"z\", [(\"fits\", \"temperature\")], data_source=fc, origin=\"native\", \n", " weight_field=(\"index\", \"ones\")) # \"ones\" weights each cell by 1\n", - "prj.set_log((\"gas\", \"temperature\"), True)\n", + "prj.set_log((\"fits\", \"temperature\"), True)\n", "prj.show()" ] }, @@ -450,7 +450,7 @@ }, "outputs": [], "source": [ - "print (box_reg.quantities.extrema((\"gas\", \"temperature\")))" + "print (box_reg.quantities.extrema((\"fits\", \"temperature\")))" ] }, { @@ -468,10 +468,10 @@ }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", (\"gas\", \"temperature\"), origin=\"native\", \n", + "prj = yt.ProjectionPlot(ds, \"z\", (\"fits\", \"temperature\"), origin=\"native\",\n", " data_source=box_reg, weight_field=(\"index\", \"ones\")) # \"ones\" weights each cell by 1\n", - "prj.set_zlim((\"gas\", \"temperature\"), 1.0e-2, 1.5)\n", - "prj.set_log((\"gas\", \"temperature\"), True)\n", + "prj.set_zlim((\"fits\", \"temperature\"), 1.0e-2, 1.5)\n", + "prj.set_log((\"fits\", \"temperature\"), True)\n", "prj.show()" ] } diff --git a/doc/source/cookbook/geographic_xforms_and_projections.ipynb b/doc/source/cookbook/geographic_xforms_and_projections.ipynb index c302acfdda0..e9fe9fb5287 100644 --- a/doc/source/cookbook/geographic_xforms_and_projections.ipynb +++ b/doc/source/cookbook/geographic_xforms_and_projections.ipynb @@ -153,7 +153,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will crate a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot. " + "Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will create a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot. " ] }, { diff --git a/doc/source/cookbook/opengl_ipython.py b/doc/source/cookbook/opengl_ipython.py deleted file mode 100644 index 146ac0a4366..00000000000 --- a/doc/source/cookbook/opengl_ipython.py +++ /dev/null @@ -1,30 +0,0 @@ -import yt -from yt.visualization.volume_rendering import glfw_inputhook # NOQA: F401 -from yt.visualization.volume_rendering.interactive_loop import RenderingContext -from yt.visualization.volume_rendering.interactive_vr import ( - BlockCollection, - SceneGraph, - TrackballCamera, -) - -rc = RenderingContext(1280, 960) - -scene = SceneGraph() -collection = BlockCollection() - -ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - -ad = ds.all_data() -collection.add_data(ad, ("gas", "density")) - -scene.add_collection(collection) - -position = (1.0, 1.0, 1.0) -c = TrackballCamera(position=position, focus=ds.domain_center, near_plane=0.1) - -callbacks = rc.setup_loop(scene, c) -rl = rc(scene, c, callbacks) - -# To make this work from IPython execute: -# -# glfw_inputhook.inputhook_manager.enable_gui("glfw", app=rl) diff --git a/doc/source/cookbook/opengl_vr.py b/doc/source/cookbook/opengl_vr.py deleted file mode 100644 index c1b19766386..00000000000 --- a/doc/source/cookbook/opengl_vr.py +++ /dev/null @@ -1,28 +0,0 @@ -import yt -from yt.visualization.volume_rendering.interactive_loop import RenderingContext -from yt.visualization.volume_rendering.interactive_vr import ( - BlockCollection, - SceneGraph, - TrackballCamera, -) - -ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - -# Create GLUT window -rc = RenderingContext(1280, 960) - -# Create a 3d Texture from all_data() -collection = BlockCollection() -dd = ds.all_data() -collection.add_data(dd, "density") - -# Initialize basic Scene and pass the data -scene = SceneGraph() -scene.add_collection(collection) - -# Create default camera -position = (1.0, 1.0, 1.0) -c = TrackballCamera(position=position, focus=ds.domain_center, near_plane=0.1) - -# Start rendering loop -rc.start_loop(scene, c) diff --git a/doc/source/cookbook/streamlines.py b/doc/source/cookbook/streamlines.py index 33c279f784e..2b177486870 100644 --- a/doc/source/cookbook/streamlines.py +++ b/doc/source/cookbook/streamlines.py @@ -33,7 +33,9 @@ # Create a 3D plot, trace the streamlines through the 3D volume of the plot fig = plt.figure() -ax = Axes3D(fig) +ax = Axes3D(fig, auto_add_to_figure=False) +fig.add_axes(ax) + for stream in streamlines.streamlines: stream = stream[np.all(stream != 0.0, axis=1)] ax.plot3D(stream[:, 0], stream[:, 1], stream[:, 2], alpha=0.1) diff --git a/doc/source/cookbook/streamlines_isocontour.py b/doc/source/cookbook/streamlines_isocontour.py index 46f442d6393..abe05da4e28 100644 --- a/doc/source/cookbook/streamlines_isocontour.py +++ b/doc/source/cookbook/streamlines_isocontour.py @@ -32,7 +32,8 @@ # Create a 3D matplotlib figure for visualizing the streamlines fig = plt.figure() -ax = Axes3D(fig) +ax = Axes3D(fig, auto_add_to_figure=False) +fig.add_axes(ax) # Trace the streamlines through the volume of the 3D figure for stream in streamlines.streamlines: diff --git a/doc/source/cookbook/surface_plot.py b/doc/source/cookbook/surface_plot.py index 43eac8c34fb..e9929095913 100644 --- a/doc/source/cookbook/surface_plot.py +++ b/doc/source/cookbook/surface_plot.py @@ -20,7 +20,7 @@ # Create a 3D matplotlib figure for visualizing the surface fig = plt.figure() -ax = fig.gca(projection="3d") +ax = fig.add_subplot(projection="3d") p3dc = Poly3DCollection(surface.triangles, linewidth=0.0) # Set the surface colors in the right scaling [0,1] diff --git a/doc/source/cookbook/various_lens.py b/doc/source/cookbook/various_lens.py index a8d1f16becb..c26f5d22386 100644 --- a/doc/source/cookbook/various_lens.py +++ b/doc/source/cookbook/various_lens.py @@ -5,7 +5,7 @@ field = ("gas", "density") -# normal_vector points from camera to the center of tbe final projection. +# normal_vector points from camera to the center of the final projection. # Now we look at the positive x direction. normal_vector = [1.0, 0.0, 0.0] # north_vector defines the "top" direction of the projection, which is @@ -21,7 +21,7 @@ # Plane-parallel lens cam = sc.add_camera(ds, lens_type="plane-parallel") -# Set the resolution of tbe final projection. +# Set the resolution of the final projection. cam.resolution = [250, 250] # Set the location of the camera to be (x=0.2, y=0.5, z=0.5) # For plane-parallel lens, the location info along the normal_vector (here diff --git a/doc/source/cookbook/yt_gadget_analysis.ipynb b/doc/source/cookbook/yt_gadget_analysis.ipynb index a9e6ca7d3ab..d1fb6605184 100644 --- a/doc/source/cookbook/yt_gadget_analysis.ipynb +++ b/doc/source/cookbook/yt_gadget_analysis.ipynb @@ -160,9 +160,9 @@ }, "outputs": [], "source": [ - "density = ad[(\"PartType0\",\"density\")]\n", + "density = ad[\"PartType0\",\"density\"]\n", "wdens = np.where(density == np.max(density))\n", - "coordinates = ad[(\"PartType0\",\"Coordinates\")]\n", + "coordinates = ad[\"PartType0\",\"Coordinates\"]\n", "center = coordinates[wdens][0]\n", "print ('center = ',center)" ] diff --git a/doc/source/developing/building_the_docs.rst b/doc/source/developing/building_the_docs.rst index fae00dd6fc5..f92dee20d54 100644 --- a/doc/source/developing/building_the_docs.rst +++ b/doc/source/developing/building_the_docs.rst @@ -85,13 +85,12 @@ cross-referencing with API documentation that is automatically generated at build time by Sphinx. We also use Sphinx to run code snippets (e.g. the cookbook and the notebooks) and embed resulting images and example data. -You will want to make sure you have both Sphinx and the Sphinx Bootstrap Theme -installed. This installation is easily performed by running this at the -command line: +Essential tools for building the docs can be installed alongside yt itself. From +the top level of a local copy, run .. code-block:: bash - pip install sphinx sphinx_bootstrap_theme + $ python -m pip install -e .[doc] Quick versus Full Documentation Builds ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -113,21 +112,13 @@ dynamically execute and render the cookbook recipes, the notebooks, etc. Building the Docs (Quick) ^^^^^^^^^^^^^^^^^^^^^^^^^ -You will need to have the yt repository available on your computer, which -is done by default if you have yt installed. In addition, you need a -current version of Sphinx_ (1.1.3) documentation software installed, as -well as the Sphinx -`Bootstrap theme `_, -which can be installed via ``pip install sphinx_bootstrap_theme``. - In order to tell Sphinx not to do all of the dynamic building, you must set the -``$READTHEDOCS`` environment variable to be True by typing this at the command -line: +``$READTHEDOCS`` environment variable to be ``True`` by run from the command +line (using bash syntax for example), as .. code-block:: bash - export READTHEDOCS=True # for bash - setenv READTHEDOCS True # for csh + export READTHEDOCS=True This variable is set for automated builds on the free ReadTheDocs service but can be used by anyone to force a quick, minimal build. diff --git a/doc/source/developing/creating_derived_fields.rst b/doc/source/developing/creating_derived_fields.rst index bc97836604a..dd3e436c428 100644 --- a/doc/source/developing/creating_derived_fields.rst +++ b/doc/source/developing/creating_derived_fields.rst @@ -22,7 +22,11 @@ this approach. def _pressure(field, data): - return (data.ds.gamma - 1.0) * data["density"] * data["specific_thermal_energy"] + return ( + (data.ds.gamma - 1.0) + * data["gas", "density"] + * data["gas", "specific_thermal_energy"] + ) Note that we do a couple different things here. We access the ``gamma`` parameter from the dataset, we access the ``density`` field and we access @@ -35,7 +39,7 @@ In this example, the ``density`` field will return data with units of ``erg/g``, so the result will automatically have units of pressure, ``erg/cm**3``. This assumes the unit system is set to the default, which is CGS: if a different unit system is selected, the result will be in the same -dimensions of pressure but different units. See :ref:`unit_systems` for more +dimensions of pressure but different units. See :ref:`units` for more information. Once we've defined our function, we need to notify yt that the field is @@ -72,7 +76,7 @@ The units parameter is a "raw" string, in the format that yt uses in its :ref:`symbolic units implementation ` (e.g., employing only unit names, numbers, and mathematical operators in the string, and using ``"**"`` for exponentiation). For cosmological datasets and fields, see -:ref:`cosmological-units`. We suggest that you name the function that creates +:ref:`cosmological-units `. We suggest that you name the function that creates a derived field with the intended field name prefixed by a single underscore, as in the ``_pressure`` example above. @@ -100,9 +104,9 @@ instances by making use of the Lastly, if you do not know the units of your field ahead of time, you can specify ``units='auto'`` in the call to ``add_field`` for your field. This will automatically determine the appropriate units based on the units of the data -returned by the field function. This is also a good way to let your derived fields -be automatically converted to the units of the :ref:`unit system ` in -your dataset. +returned by the field function. This is also a good way to let your derived +fields be automatically converted to the units of the unit system in your +dataset. If ``units='auto'`` is set, it is also required to set the ``dimensions`` keyword argument so that error-checking can be done on the derived field to make sure that @@ -115,7 +119,11 @@ the dimensionality of the returned array and the field are the same: def _pressure(field, data): - return (data.ds.gamma - 1.0) * data["density"] * data["specific_thermal_energy"] + return ( + (data.ds.gamma - 1.0) + * data["gas", "density"] + * data["gas", "specific_thermal_energy"] + ) yt.add_field( @@ -141,7 +149,11 @@ the previous example: @derived_field(name="pressure", sampling_type="cell", units="dyne/cm**2") def _pressure(field, data): - return (data.ds.gamma - 1.0) * data["density"] * data["specific_thermal_energy"] + return ( + (data.ds.gamma - 1.0) + * data["gas", "density"] + * data["gas", "specific_thermal_energy"] + ) The :func:`derived_field` decorator takes the same arguments as :func:`add_field`, and is often a more convenient shorthand in cases where @@ -164,9 +176,9 @@ dataset objects. The calling syntax is the same: units="dyne/cm**2", ) -If you specify fields in this way, you can take advantage of the dataset's -:ref:`unit system ` to define the units for you, so that -the units will be returned in the units of that system: +If you specify fields in this way, you can take advantage of the dataset's unit +system to define the units for you, so that the units will be returned in the +units of that system: .. code-block:: python @@ -219,9 +231,9 @@ transparent and simple example). yv = data["gas", "velocity_y"] - bv[1] zv = data["gas", "velocity_z"] - bv[2] center = data.get_field_parameter("center") - x_hat = data["x"] - center[0] - y_hat = data["y"] - center[1] - z_hat = data["z"] - center[2] + x_hat = data["gas", "x"] - center[0] + y_hat = data["gas", "y"] - center[1] + z_hat = data["gas", "z"] - center[2] r = np.sqrt(x_hat * x_hat + y_hat * y_hat + z_hat * z_hat) x_hat /= r y_hat /= r @@ -279,11 +291,11 @@ For example, let's write a field that depends on a field parameter named ``'axis def my_axis_field(field, data): axis = data.get_field_parameter("axis") if axis == 0: - return data["x-velocity"] + return data["gas", "velocity_x"] elif axis == 1: - return data["y-velocity"] + return data["gas", "velocity_y"] elif axis == 2: - return data["z-velocity"] + return data["gas", "velocity_z"] else: raise ValueError @@ -321,7 +333,7 @@ There are a number of options available, but the only mandatory ones are ``name` ``function`` This is a function handle that defines the field ``units`` - This is a string that describes the units, or a query to a :ref:`UnitSystem ` + This is a string that describes the units, or a query to a UnitSystem object, e.g. ``ds.unit_system["energy"]``. Powers must be in Python syntax (``**`` instead of ``^``). Alternatively, it may be set to ``"auto"`` to have the units determined automatically. In this case, the ``dimensions`` keyword must be set to the diff --git a/doc/source/developing/creating_derived_quantities.rst b/doc/source/developing/creating_derived_quantities.rst deleted file mode 100644 index e6a69786243..00000000000 --- a/doc/source/developing/creating_derived_quantities.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _creating_derived_quantities: - -Creating Derived Quantities ---------------------------- - -.. warning:: This section is not yet updated to work with yt 3.0. If you - have a question about making a custom derived quantity, please - contact the mailing list. - -The basic idea is that you need to be able to operate both on a set of data, -and a set of sets of data. (If this is not possible, the quantity needs to be -added with the ``force_unlazy`` option.) - -Two functions are necessary. One will operate on arrays of data, either fed -from each grid individually or fed from the entire data object at once. The -second one takes the results of the first, either as lists of arrays or as -single arrays, and returns the final values. For an example, we look at the -``TotalMass`` function: - -.. code-block:: python - - def _TotalMass(data): - baryon_mass = data["mass"].sum() - particle_mass = data["particle_mass"].sum() - return baryon_mass, particle_mass - - - def _combTotalMass(data, baryon_mass, particle_mass): - return baryon_mass.sum() + particle_mass.sum() - - - add_quantity("TotalMass", function=_TotalMass, combine_function=_combTotalMass, n_ret=2) - -Once the two functions have been defined, we then call :func:`add_quantity` to -tell it the function that defines the data, the collator function, and the -number of values that get passed between them. In this case we return both the -particle and the baryon mass, so we have two total values passed from the main -function into the collator. diff --git a/doc/source/developing/creating_frontend.rst b/doc/source/developing/creating_frontend.rst index 61f12e11fb8..4056c2a3feb 100644 --- a/doc/source/developing/creating_frontend.rst +++ b/doc/source/developing/creating_frontend.rst @@ -37,12 +37,11 @@ Boostraping a new frontend To get started -* make a new directory in ``yt/frontends`` with the name of your code and add the name -into ``yt/frontends/api.py:_frontends`` (in alphabetical order). - -* copy the contents of the ``yt/frontends/_skeleton`` directory, and replace every -occurence of ``Skeleton`` with your frontend's name (preserving case). This adds a lot of -boilerplate for the required classes and methods that are needed. + * make a new directory in ``yt/frontends`` with the name of your code and add the name + into ``yt/frontends/api.py:_frontends`` (in alphabetical order). + * copy the contents of the ``yt/frontends/_skeleton`` directory, and replace every + occurrence of ``Skeleton`` with your frontend's name (preserving case). This + adds a lot of boilerplate for the required classes and methods that are needed. Data Meaning Structures @@ -158,7 +157,7 @@ example of how this is implemented in the FLASH frontend: This function should always be imported and called from within the ``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this function is used, converting between magnetic fields in different -:ref:`unit systems ` will be handled automatically. +unit systems will be handled automatically. Data Localization Structures ---------------------------- diff --git a/doc/source/developing/debugdrive.rst b/doc/source/developing/debugdrive.rst index a66daa9ebec..3ed352a2fab 100644 --- a/doc/source/developing/debugdrive.rst +++ b/doc/source/developing/debugdrive.rst @@ -53,7 +53,7 @@ Use the Python Debugger yt is almost entirely composed of python code, so it makes sense to use the `python debugger`_ as your first stop in trying to debug it. -.. _python debugger: https://docs.python.org/2/library/pdb.html +.. _python debugger: https://docs.python.org/3/library/pdb.html Signaling yt to Do Something ---------------------------- diff --git a/doc/source/developing/index.rst b/doc/source/developing/index.rst index 4bd6fe05cba..44af92b09bf 100644 --- a/doc/source/developing/index.rst +++ b/doc/source/developing/index.rst @@ -24,7 +24,6 @@ and contributing code! releasing creating_datatypes creating_derived_fields - creating_derived_quantities creating_frontend external_analysis deprecating_features diff --git a/doc/source/developing/releasing.rst b/doc/source/developing/releasing.rst index fb8a0fb532b..5325f849ac4 100644 --- a/doc/source/developing/releasing.rst +++ b/doc/source/developing/releasing.rst @@ -140,7 +140,7 @@ Access to yt-project.org mediated via SSH login. Please contact one of the current yt developers for access to the webserver running yt-project.org if you do not already have it. You will need a copy of your SSH public key so that your key can be added to the list of authorized keys. Once you login, use -e.g. ``scp`` to upload a copy of the souce distribution tarball to +e.g. ``scp`` to upload a copy of the source distribution tarball to https://yt-project.org/sdist, like so:: $ scp dist/yt-3.5.1.tar.gz yt_analysis@dickenson.dreamhost.com:yt-project.org/sdist diff --git a/doc/source/developing/testing.rst b/doc/source/developing/testing.rst index 24145b36f10..f36c43c142f 100644 --- a/doc/source/developing/testing.rst +++ b/doc/source/developing/testing.rst @@ -74,28 +74,35 @@ run: Handling yt dependencies ^^^^^^^^^^^^^^^^^^^^^^^^ -We attempt to make yt compatible with a wide variety of upstream software -versions. However, sometimes a specific version of a project that yt depends on +Our dependencies are specified in ``setup.cfg``. Hard dependencies are found in +``options.install_requires``, while optional dependencies are specified in +``options.extras_require``. The ``full`` target contains the specs to run our +test suite, which are intended to be as modern as possible (we don't set upper +limits to versions unless we need to). The ``minimal`` target is used to check +that we don't break backward compatibility with old versions of upstream +projects by accident. It is intended to pin strictly our minimal supported +versions. The ``test`` target specifies the tools need to run the tests, but +not needed by yt itself. + +**Python version support.** +When a new Python version is released, it takes about +a month or two for yt to support it, since we're dependent on bigger projects +like numpy and matplotlib. We vow to follow numpy's deprecation plan regarding +our supported versions for Python and numpy, defined formally in `NEP 29 +`_. However, we try to +avoid bumping our minimal requirements shortly before a yt release. + +**Third party dependencies.** +However, sometimes a specific version of a project that yt depends on causes some breakage and must be blacklisted in the tests or a more experimental project that yt depends on optionally might change sufficiently that the yt community decides not to support an old version of that project. -To handle cases like this, the versions of upstream software projects installed -on the machines running the yt test suite are pinned to specific version -numbers that must be updated manually. This prevents breaking the yt tests when -a new version of an upstream dependency is released and allows us to manage -updates in upstream projects at our pace. - -If you would like to add a new dependency for yt (even an optional dependency) -or would like to update a version of a yt dependency, you must edit the -``tests/test_requirements.txt`` file, this path is relative to the root of the -repository. This file contains an enumerated list of direct dependencies and -pinned version numbers. For new dependencies, simply append the name of the new -dependency to the end of the file, along with a pin to the latest version -number of the package. To update a package's version, simply update the version -number in the entry for that package. - -Finally, we also run a set of tests with "minimal" dependencies installed. Please make sure any new tests you add that depend on an optional dependency are properly set up so that the test is not run if the dependency is not installed. If for some reason you need to update the listing of packages that are installed for the "minimal" dependency tests, you will need to edit ``tests/test_minimal_requirements.txt``. +**Note.** +Some of our optional dependencies are not trivial to install and their support +may vary across platforms. To manage such issue, we currently use requirement +files in additions to ``setup.cfg``. They are found in +``tests/*requirements.txt`` and used in ``tests/ci_install.sh``. How to Write Unit Tests ^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/examining/Loading_Generic_Array_Data.ipynb b/doc/source/examining/Loading_Generic_Array_Data.ipynb index 5c95aab0d27..6c99a10a48c 100644 --- a/doc/source/examining/Loading_Generic_Array_Data.ipynb +++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb @@ -129,8 +129,8 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", - "slc.set_cmap(\"density\", \"Blues\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"gas\", \"density\"))\n", + "slc.set_cmap((\"gas\", \"density\"), \"Blues\")\n", "slc.annotate_grids(cmap=None)\n", "slc.show()" ] @@ -154,12 +154,12 @@ "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", "posy_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", "posz_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n", - "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n", + "data = dict(density = (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"),\n", " particle_position_x = (posx_arr, 'code_length'), \n", " particle_position_y = (posy_arr, 'code_length'),\n", " particle_position_z = (posz_arr, 'code_length'))\n", "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n", - "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n", + "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"),\n", " bbox=bbox, nprocs=4)" ] }, @@ -178,8 +178,8 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", - "slc.set_cmap(\"density\", \"Blues\")\n", + "slc = yt.SlicePlot(ds, \"z\", (\"gas\", \"density\"))\n", + "slc.set_cmap((\"gas\", \"density\"), \"Blues\")\n", "slc.annotate_particles(0.25, p_size=12.0, col=\"Red\")\n", "slc.show()" ] @@ -297,7 +297,7 @@ }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n", + "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\", \"Temperature\", \"Bx\"], weight_field=\"Density\")\n", "prj.set_log(\"z-velocity\", False)\n", "prj.set_log(\"Bx\", False)\n", "prj.show()" @@ -507,9 +507,9 @@ "outputs": [], "source": [ "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n", - "slc = yt.SlicePlot(ds, \"x\", [\"velocity_x\",\"velocity_y\",\"velocity_z\"])\n", + "slc = yt.SlicePlot(ds, \"x\", [(\"gas\", \"velocity_x\"), (\"gas\", \"velocity_y\"), (\"gas\", \"velocity_z\")])\n", "for ax in \"xyz\":\n", - " slc.set_log(\"velocity_%s\" % (ax), False)\n", + " slc.set_log((\"gas\", f\"velocity_{ax}\"), False)\n", "slc.annotate_velocity()\n", "slc.show()" ] @@ -623,7 +623,7 @@ }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n", + "slc = yt.SlicePlot(ds, \"z\", (\"gas\", \"density\"))\n", "slc.annotate_particles(0.25, p_size=15.0, col=\"Pink\")\n", "slc.show()" ] @@ -654,7 +654,7 @@ "posxb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n", "posyb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n", "poszb_arr = np.random.uniform(low=-1.5, high=1.5, size=20000)\n", - "data = {\"density\": (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"), \n", + "data = {(\"gas\", \"density\"): (np.random.random(size=(64,64,64)), \"Msun/kpc**3\"),\n", " (\"red\", \"particle_position_x\"): (posxr_arr, 'code_length'), \n", " (\"red\", \"particle_position_y\"): (posyr_arr, 'code_length'),\n", " (\"red\", \"particle_position_z\"): (poszr_arr, 'code_length'),\n", @@ -662,7 +662,7 @@ " (\"blue\", \"particle_position_y\"): (posyb_arr, 'code_length'),\n", " (\"blue\", \"particle_position_z\"): (poszb_arr, 'code_length')}\n", "bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])\n", - "ds = yt.load_uniform_grid(data, data[\"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n", + "ds = yt.load_uniform_grid(data, data[\"gas\", \"density\"][0].shape, length_unit=(1.0, \"Mpc\"), mass_unit=(1.0,\"Msun\"), \n", " bbox=bbox, nprocs=4)" ] }, diff --git a/doc/source/examining/Loading_Generic_Particle_Data.ipynb b/doc/source/examining/Loading_Generic_Particle_Data.ipynb index 024a4b08f44..5e5ef0a3b37 100644 --- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb +++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb @@ -74,7 +74,7 @@ "\n", "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])\n", "\n", - "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)" + "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, bbox=bbox)" ] }, { diff --git a/doc/source/examining/Loading_Spherical_Data.ipynb b/doc/source/examining/Loading_Spherical_Data.ipynb index 4b02e2708f0..82fb770b033 100644 --- a/doc/source/examining/Loading_Spherical_Data.ipynb +++ b/doc/source/examining/Loading_Spherical_Data.ipynb @@ -40,16 +40,16 @@ }, "outputs": [], "source": [ - "@yt.derived_field(name = \"sphx\", units = \"cm\", take_log=False)\n", + "@yt.derived_field(name=\"sphx\", units=\"cm\", take_log=False, sampling_type=\"cell\")\n", "def sphx(field, data):\n", " return np.cos(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n", - "@yt.derived_field(name = \"sphy\", units = \"cm\", take_log=False)\n", + "@yt.derived_field(name=\"sphy\", units=\"cm\", take_log=False, sampling_type=\"cell\")\n", "def sphy(field, data):\n", " return np.sin(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n", - "@yt.derived_field(name = \"sphz\", units = \"cm\", take_log=False)\n", + "@yt.derived_field(name=\"sphz\", units=\"cm\", take_log=False, sampling_type=\"cell\")\n", "def sphz(field, data):\n", " return np.cos(data[\"theta\"])*data[\"r\"]\n", - "@yt.derived_field(name = \"funfield\", units=\"cm\", take_log=False)\n", + "@yt.derived_field(name=\"funfield\", units=\"cm\", take_log=False, sampling_type=\"cell\")\n", "def funfield(field, data):\n", " return (np.sin(data[\"phi\"])**2 + np.cos(data[\"theta\"])**2) * (1.0*data[\"r\"].uq+data[\"r\"])" ] diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index 6cac61addb1..9be5d8c9c63 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -143,7 +143,7 @@ Appropriate errors are thrown for other combinations. * particle data: currently not supported (but might come later) * staggered grids (AMRVAC 2.2 and later): yt logs a warning if you load staggered datasets, but the flag is currently ignored. -* "stretched grids" as defined in AMRVAC have no correspondance in yt, +* "stretched grids" as defined in AMRVAC have no correspondence in yt, hence will never be supported. .. note:: @@ -533,8 +533,8 @@ to the z direction. print(ad["raw", "Ex"].shape) print(ds.field_info[("raw", "Bx")].nodal_flag) print(ad["raw", "Bx"].shape) - print(ds.field_info[("boxlib", "Bx")].nodal_flag) - print(ad["boxlib", "Bx"].shape) + print(ds.field_info["raw", "Bx"].nodal_flag) + print(ad["raw", "Bx"].shape) Here, the field ``('raw', 'Ex')`` is nodal in two directions, so four values per cell are returned, corresponding to the four edges in each cell on which the variable @@ -635,11 +635,11 @@ direction. ds.index ad = ds.all_data() print(ds.field_info[("enzo", "Ex")].nodal_flag) - print(ad["raw", "Ex"].shape) + print(ad["enzo", "Ex"].shape) print(ds.field_info[("enzo", "BxF")].nodal_flag) - print(ad["raw", "Bx"].shape) + print(ad["enzo", "Bx"].shape) print(ds.field_info[("enzo", "Bx")].nodal_flag) - print(ad["boxlib", "Bx"].shape) + print(ad["enzo", "Bx"].shape) Here, the field ``('enzo', 'Ex')`` is nodal in two directions, so four values per cell are returned, corresponding to the four edges in each cell on which the @@ -1251,7 +1251,7 @@ where :math:`n_e` and :math:`n_i` are the electron and ion number densities, .. rubric:: Caveats * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to - other :ref:`unit systems ` is also possible. + other unit systems is also possible. .. _loading-gadget-data: @@ -2097,7 +2097,7 @@ visualization of non-SPH particles. See the example below: # Make the SPH projection plot p = yt.ProjectionPlot(ds_dm, "z", ("io", "density"), center=center, width=(1, "Mpc")) - p.set_unit("density", "Msun/kpc**2") + p.set_unit(("io", "density"), "Msun/kpc**2") p.show() Here we see two new things. First, ``load_particles`` accepts a ``data_source`` @@ -2171,11 +2171,9 @@ containers. See :ref:`halo_containers` for more information. If you have access to both the halo catalog and the simulation snapshot from the same redshift, additional analysis can be performed for each halo using -:ref:`halo_catalog`. The resulting product can be reloaded in a similar manner +:ref:`halo-analysis`. The resulting product can be reloaded in a similar manner to the other halo catalogs shown here. -.. _adaptahop: - AdataHOP ^^^^^^^^ @@ -2443,7 +2441,7 @@ information. At this time, halo member particles cannot be loaded. YTHaloCatalog ^^^^^^^^^^^^^ -These are catalogs produced by the analysis discussed in :ref:`halo_catalog`. +These are catalogs produced by the analysis discussed in :ref:`halo-analysis`. In the case where multiple files were produced, one need only provide the path to a single one of them. The field type for all fields is "halos". The fields available here are similar to other catalogs. Any addition @@ -2521,7 +2519,7 @@ for each particle attribute/mesh (in Byte). import yt ds = yt.load("example-3d/hdf5/data00000100.h5", open_pmd_virtual_gridsize=10e4) - sp = yt.SlicePlot(ds, "x", "rho") + sp = yt.SlicePlot(ds, "x", ("openPMD", "rho")) sp.show() Particle data is fully supported: @@ -2533,7 +2531,10 @@ Particle data is fully supported: ds = yt.load("example-3d/hdf5/data00000100.h5") ad = f.all_data() ppp = yt.ParticlePhasePlot( - ad, "particle_position_y", "particle_momentum_y", "particle_weighting" + ad, + ("all", "particle_position_y"), + ("all", "particle_momentum_y"), + ("all", "particle_weighting"), ) ppp.show() @@ -2715,10 +2716,10 @@ It is possible to provide extra arguments to the load function when loading RAMS ds.right_edge == [1, 1, 1] # is True ad = ds.all_data() - ad["particle_position_x"].max() > 0.1 # _may_ be True + ad["all", "particle_position_x"].max() > 0.1 # _may_ be True bb = ds.box(left_edge=bbox[0], right_edge=bbox[1]) - bb["particle_position_x"].max() < 0.1 # is True + bb["all", "particle_position_x"].max() < 0.1 # is True .. note:: When using the bbox argument, yt will read all the CPUs @@ -2964,9 +2965,7 @@ Tipsy Data See :ref:`tipsy-notebook` and :ref:`loading-sph-data` for more details. yt also supports loading Tipsy data. Many of its characteristics are similar -to how Gadget data is loaded; specifically, it shares its definition of -indexing and mesh-identification with that described in -:ref:`particle-indexing-criteria`. +to how Gadget data is loaded. .. code-block:: python diff --git a/doc/source/examining/low_level_inspection.rst b/doc/source/examining/low_level_inspection.rst index 3a5d7062a1e..f57f80224d4 100644 --- a/doc/source/examining/low_level_inspection.rst +++ b/doc/source/examining/low_level_inspection.rst @@ -85,15 +85,15 @@ normal, you can access the grid as you would a normal object: .. code-block:: python g = ds.index.grids[1043] - print(g["density"]) - print(g["density"].min()) + print(g["gas", "density"]) + print(g["gas", "density"].min()) To access the raw data (as found in the file), use .. code-block:: python g = ds.index.grids[1043] - rho = g["density"].in_base("code") + rho = g["gas", "density"].in_base("code") .. _finding-data-at-fixed-points: @@ -123,9 +123,11 @@ to find the value of a mesh field at the location of the particles in a simulation, one could do:: ad = ds.all_data() - ppos = ad['all', 'particle_position'] + ppos = ad["all", "particle_position"] ppos_den_vel = ds.find_field_values_at_points( - ['density', 'velocity_x'], ppos) + [("gas", "density"), ("gas", "velocity_x")], + ppos + ) In this example, ``ppos_den_vel`` will be a list of arrays. The first array will contain the density values at the particle positions, the second will contain @@ -168,13 +170,13 @@ We can now access our underlying data at the lowest level by specifying what .. code-block:: python - print(all_data_level_0["density"].shape) + print(all_data_level_0["gas", "density"].shape) # (64, 64, 64) - print(all_data_level_0["density"]) + print(all_data_level_0["gas", "density"]) # array([[[ 1.92588925e-31, 1.74647692e-31, 2.54787518e-31, ..., - print(all_data_level_0["temperature"].shape) + print(all_data_level_0["gas", "temperature"].shape) # (64, 64, 64) If you create a covering grid that spans two child grids of a single parent @@ -199,10 +201,10 @@ And let's see what's the density in the central location: .. code-block:: python - print(all_data_level_2["density"].shape) + print(all_data_level_2["gas", "density"].shape) (256, 256, 256) - print(all_data_level_2["density"][128, 128, 128]) + print(all_data_level_2["gas", "density"][128, 128, 128]) 1.7747457571203124e-31 There are two different types of covering grids: unsmoothed and smoothed. @@ -221,10 +223,10 @@ to reduce edge effects, it is a nearly identical process: 2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2 ** 2 ) - print(all_data_level_2_s["density"].shape) + print(all_data_level_2_s["gas", "density"].shape) (256, 256, 256) - print(all_data_level_2_s["density"][128, 128, 128]) + print(all_data_level_2_s["gas", "density"][128, 128, 128]) 1.763744852165591e-31 .. _examining-image-data-in-a-fixed-resolution-array: diff --git a/doc/source/faq/index.rst b/doc/source/faq/index.rst index 1e9fb1dcdbe..732bc7f53da 100644 --- a/doc/source/faq/index.rst +++ b/doc/source/faq/index.rst @@ -18,8 +18,8 @@ How can I tell what version of yt I'm using? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you run into problems with yt and you're writing to the mailing list -or contacting developers on IRC, they will likely want to know what version of -yt you're using. Oftentimes, you'll want to know both the yt version, +or contacting developers on Slack, they will likely want to know what version of +yt you're using. Often times, you'll want to know both the yt version, as well as the last changeset that was committed to the branch you're using. To reveal this, go to a command line and type: @@ -27,54 +27,52 @@ To reveal this, go to a command line and type: $ yt version +The result will look something like this: + +.. code-block:: bash + yt module located at: - /Users/username/src/yt-conda/src/yt-git + /Users/mitchell/src/yt-conda/src/yt-git The current version of yt is: --- - Version = 3.4-dev - Changeset = 94033fca00e5 + Version = 4.0.dev0 + Changeset = 9f947a930ab4 --- - This installation CAN be automatically updated. -For more information on this topic, see :ref:`updating-yt`. + +For more information on this topic, see :ref:`updating`. .. _yt-3.0-problems: -I upgraded to yt 3.0 but my code no longer works. What do I do? +I upgraded to yt 4.0 but my code no longer works. What do I do? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Because there are a lot of backwards-incompatible changes in yt 3.0 (see -:ref:`yt3differences`, it can -be a daunting effort in transitioning old scripts from yt 2.x to 3.0. -We have tried to describe the basic process of making that transition -in :ref:`transitioning-to-3.0`. If you just want to change back to yt 2.x -for a while until you're ready to make the transition, you can follow -the instructions in :ref:`switching-between-yt-versions`. +We've tried to keep the number of backward-incompatible changes to a minimum +with the release of yt-4.0, but because of the wide-reaching changes to how +yt manages data, there may be updates you have to make. +You can see many of the changes in :ref:`yt4differences`, and +in :ref:`transitioning-to-4.0` there are helpful tips on how to modify your scripts to update them. Code Errors and Failures ------------------------ -yt fails saying that it cannot import yt modules -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python fails saying that it cannot import yt modules +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is commonly exhibited with an error about not being able to import code that is part of yt. This is likely because the code that is failing to import -needs to be compiled or recompiled, You can do this automatically by running: +needs to be compiled or recompiled. -.. code-block:: bash - - cd $YT_GIT - pip install -e . +This error tends to occur when there are changes in the underlying Cython files +that need to be rebuilt, like after a major code update or when switching +between distant branches. -where ``$YT_GIT`` is the path to the yt git repository. +This is solved by running the install command again. See +:ref:`install-from-source`. -This error tends to occur when there are changes in the underlying cython -files that need to be rebuilt, like after a major code update or in switching -from 2.x to 3.x. For more information on this, see -:ref:`switching-between-yt-versions`. .. _faq-mpi4py: @@ -97,9 +95,9 @@ the pip interface. At the command line, type: .. code-block:: bash - pip install mpi4py + $ python -m pip install mpi4py -What this does is it finds your default installation of python (presumably +What this does is it finds your default installation of Python (presumably in the yt source directory), and it installs the mpi4py module. If this action is successful, you should never have to worry about your aforementioned problems again. If, on the other hand, this installation fails (as it does on @@ -110,7 +108,7 @@ If this is the case, you can specify them explicitly as per: .. code-block:: bash - env MPICC=/path/to/MPICC pip install mpi4py + $ env MPICC=/path/to/MPICC python -m pip install mpi4py So for example, on Kraken, I switch to the gnu C compilers (because yt doesn't work with the portland group C compilers), then I discover that @@ -118,8 +116,8 @@ cc is the mpi-enabled C compiler (and it is in my path), so I run: .. code-block:: bash - module swap PrgEnv-pgi PrgEnv-gnu - env MPICC=cc pip install mpi4py + $ module swap PrgEnv-pgi PrgEnv-gnu + $ env MPICC=cc python -m pip install mpi4py And voila! It installs! If this *still* fails for you, then you can build and install from source and specify the mpi-enabled c and c++ @@ -133,19 +131,13 @@ Units .. _conversion-factors: -How do I get the convert between code units and physical units for my dataset? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Converting between physical units and code units is a common task. In yt-2.x, -the syntax for getting conversion factors was in the units dictionary -(``pf.units['kpc']``). So in order to convert a variable ``x`` in code units to -kpc, you might run: - -.. code-block:: python - - x = x * pf.units["kpc"] +How do I convert between code units and physical units for my dataset? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In yt-3.0, this no longer works. Conversion factors are tied up in the +Starting with yt-3.0, and continuing to yt-4.0, yt uses an internal symbolic +unit system. In yt-3.0 this was bundled with the main yt codebase, and with +yt-4.0 it is now available as a separate package called `unyt +`_. Conversion factors are tied up in the ``length_unit``, ``times_unit``, ``mass_unit``, and ``velocity_unit`` attributes, which can be converted to any arbitrary desired physical unit: @@ -162,7 +154,7 @@ attributes, which can be converted to any arbitrary desired physical unit: print("Velocity unit: ", ds.velocity_unit.in_units("Mpc/year")) So to accomplish the example task of converting a scalar variable ``x`` in -code units to kpc in yt-3.0, you can do one of two things. If ``x`` is +code units to kpc in yt-4.0, you can do one of two things. If ``x`` is already a YTQuantity with units in ``code_length``, you can run: .. code-block:: python @@ -260,7 +252,7 @@ logged, you could type: ds = load("my_data") ds.index - ds.field_info["density"].take_log = False + ds.field_info["gas", "density"].take_log = False From that point forward, data products such as slices, projections, etc., would be presented in linear space. Note that you have to instantiate ds.index before @@ -336,7 +328,7 @@ end. That way the data will always be ordered correctly. As an example you can: my_ray = ds.ray(...) ray_sort = np.argsort(my_ray["t"]) - density = my_ray["density"][ray_sort] + density = my_ray["gas", "density"][ray_sort] There is also a full example in the :ref:`manual-line-plots` section of the docs. @@ -386,6 +378,9 @@ This means you can download these datasets to ``/big_drive/data_for_yt`` , add the appropriate item to ``~/.config/yt/yt.toml``, and no matter which directory you are in when running yt, it will also check in *that* directory. +In many cases, these are also available using the ``load_sample`` command, +described in :ref:`loading-sample-data`. + .. _faq-scroll-up: @@ -398,29 +393,10 @@ environment can use readline, run the following command: .. code-block:: bash - $ ~/yt/bin/pip install gnureadline + $ python -m pip install gnureadline .. _faq-old-data: -yt seems to be plotting from old data -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -yt does check the time stamp of the simulation so that if you -overwrite your data outputs, the new set will be read in fresh by -yt. However, if you have problems or the yt output seems to be -in someway corrupted, try deleting the ``.yt`` and -``.harray`` files from inside your data directory. If this proves to -be a persistent problem add the line: - -.. code-block:: python - - from yt.config import ytcfg - - ytcfg["yt", "serialize"] = "False" - -to the very top of your yt script. Turning off serialization is the default -behavior in yt-3.0. - .. _faq-log-level: How can I change yt's log level? diff --git a/doc/source/help/index.rst b/doc/source/help/index.rst index 1764efc01f3..d91f24358f2 100644 --- a/doc/source/help/index.rst +++ b/doc/source/help/index.rst @@ -34,51 +34,11 @@ Update to the latest version ---------------------------- Sometimes the pace of development is pretty fast on yt, particularly in the -development branch, so a fix to your problem may have already been developed -by the time you encounter it. Many users' problems can simply be corrected -by updating to the latest version of the code and/or its dependencies. If you -have installed the latest stable release of yt then you should update yt using -the package manager appropriate for your python installation. For example, with -``pip``:: - - $ pip install -U yt - -Or with conda:: - - $ conda update yt - -If you installed yt from a checkout of the git repository, you can update to -the latest development version by issuing the following command: - -.. code-block:: bash - - $ yt update - -.. _update-errors: - -Update errors -^^^^^^^^^^^^^ - -If for some reason the ``update`` command fails with errors, or any attempt at -loading yt either from the command line or from within python also fails, it -may simply mean you need to rebuild the yt source (some of the c-code in yt -needs to be rebuilt after major changes). You can do this by navigating to -the root of the yt git repository. If you installed with the all-in-one -installer script, this is the ``yt-/src/yt-git`` directory. Then -execute these commands: - -.. code-block:: bash - - $ pip install -e . - -Now try running yt again with: - -.. code-block:: bash - - $ yt --help - -If you continue to see errors, you should try contacting us via Slack, IRC or -email but you may have to reinstall yt (see :ref:`getting-and-installing-yt`). +development branch, so a fix to your problem may have already been developed by +the time you encounter it. Many users' problems can simply be corrected by +updating to the latest version of the code and/or its dependencies. If you have +installed the latest stable release of yt then you should update yt using the +package manager appropriate for your python installation. See :ref:updating. .. _search-the-documentation: @@ -117,26 +77,15 @@ Look at the source code We've done our best to make the source clean, and it is easily searchable from your computer. -If you have not done so already (see :ref:`source-installation`), clone a copy +If you have not done so already (see :ref:`install-from-source`), clone a copy of the yt git repository and make it the 'active' installation by doing -.. code-block:: bash - - $ pip install -e . - -in the root directory of the yt git repository. - -.. note:: - - This has already been done for you if you installed using the bash install - script. Building yt from source will not work if you do not have a C compiler - installed. Once inside the yt git repository, you can then search for the class, function, or keyword which is giving you problems with ``grep -r *``, which will recursively search throughout the code base. (For a much faster and cleaner experience, we recommend ``grin`` instead of ``grep -r *``. To install ``grin`` -with python, just type ``pip install grin``.) +with python, just type ``python -m pip install grin``.) So let's say that ``SlicePlot`` is giving you problems still, and you want to look at the source to figure out what is going on. @@ -177,25 +126,17 @@ the problem! .. _irc: -Go on Slack or IRC to ask a question ------------------------------------- +Go on Slack to ask a question +----------------------------- If you want a fast, interactive experience, you could try jumping into our Slack -or IRC channels to get your questions answered in a chatroom style environment. +to get your questions answered in a chatroom style environment. To join our slack channel you will need to request an invite by going to https://yt-project.org/development.html, click the "Join as @ Slack!" button, and fill out the form. You will get an invite as soon as an administrator approves your request. -Alternatively you can go to our IRC channel, which does not require an -invite. You don't even need to have any special IRC client in order to join the -IRC channel. We are the #yt channel on irc.freenode.net, but you can also -connect using your web browser by going to https://yt-project.org/irc.html . -There are usually 2-8 members of the user base and development team online, so -you'll probably get your answers quickly. Remember to bring the information -from the :ref:`last step `. - .. _mailing-list: Ask the mailing list @@ -237,7 +178,7 @@ Installation Issues ^^^^^^^^^^^^^^^^^^^ If you are having installation issues and nothing from the -:ref:`installation instructions ` seems to work, you should +:ref:`installation instructions ` seems to work, you should *definitely* email the ``yt-users`` email list. You should provide information about the host, the version of the code you are using, and the output of ``yt_install.log`` from your installation. We are very interested in making diff --git a/doc/source/index.rst b/doc/source/index.rst index bd2f9b81750..18562e2c14a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -130,16 +130,6 @@ Table of Contents

Catering yt to work for your exact use case

- - -

- Sharing Data -

- - -

The yt Hub

- -

@@ -188,6 +178,7 @@ Table of Contents intro/index installing yt Quickstart + yt4differences yt3differences cookbook/index visualizing/index @@ -195,7 +186,6 @@ Table of Contents analyzing/domain_analysis/index examining/index developing/index - sharing_data reference/index faq/index Getting Help diff --git a/doc/source/installing.rst b/doc/source/installing.rst index c743d4283bb..59aeec7f87c 100644 --- a/doc/source/installing.rst +++ b/doc/source/installing.rst @@ -1,4 +1,4 @@ -.. _getting-and-installing-yt: +.. _installing-yt: Getting and Installing yt ========================= @@ -10,589 +10,228 @@ Getting and Installing yt .. _getting-yt: -Getting yt +Disclaimer ---------- -In this document we describe several methods for installing yt. The method that -will work best for you depends on your precise situation: - -* If you do not have root access on your computer, are not comfortable managing - python packages, or are working on a machine where you are not allowed to, you - will probably want to use the bash all-in-one installation script. This - creates a python environment using the `miniconda python - distribution `_ and the - `conda `_ package manager inside of a single - folder in your home directory. See :ref:`install-script` for more details. - -* If you use the `Anaconda `_ python - distribution and already have ``conda`` installed, see - :ref:`anaconda-installation` for details on how to install yt using the - ``conda`` package manager. Note that this is currently the only supported - installation mechanism on Windows. - -* A viable alternative to the installation based on Anaconda is the use of the - `Intel Distribution for Python `_. - For `Parallel Computation `_ - on Intel architectures, especially on supercomputers, a large - `performance and scalability improvement `_ - over several common tasks has been demonstrated. - Detailed installation instructions are provided below as well, see :ref:`conda-intel-python`. - No change in the way yt is managed by ``conda`` is required. - -* Some operating systems have ``yt`` pre-built packages that can be - installed with the system package manager. Note that the packages in some of - these distributions may not be the most recent release. See :ref:`distro-packages` - for a list of available packages. You can always get the current stable - version of ``yt`` via ``conda`` as described in :ref:`anaconda-installation` - or via ``pip`` as described in :ref:`source-installation`. - -* If you want to build a development version of yt or are comfortable with - compilers and know your way around python packaging, - :ref:`source-installation` will probably be the best choice. If you have set - up python using a source-based package manager like `Homebrew - `_ or `MacPorts `_ this choice will - let you install yt using the python installed by the package - manager. Similarly, this will also work for python environments set up via - Linux package managers so long as you have the necessary compilers installed - (e.g. the ``build-essentials`` package on Debian and Ubuntu). +The Python ecosystem offers many viable tools to setup isolated +Python environments, including but not restricted to -.. note:: - See `Parallel Computation - `_ - for a discussion on using yt in parallel. +- `venv `_ (part of the Python standard library) +- `Anaconda/conda `_ +- `virtualenv `_ -.. _branches-of-yt: +We strongly recommend you choose and learn one. However, it is beyond the +scope of this page to cover every situation. -Branches of yt: ``main``, ``stable``, and ``yt-2.x`` -++++++++++++++++++++++++++++++++++++++++++++++++++++++ +We will show you how to install a stable release or from source, using conda +or pip, and we will *assume* that you do so in an isolated environment. -Before you install yt, you must decide which branch (i.e. version) of the code -you prefer to use: +Also note that each yt release supports a limited range of Python versions. +Here's a summary for most recent releases -* ``main`` -- The most up-to-date *development* version with the most current - features but sometimes unstable (the development version of the next release). -* ``stable`` -- The latest stable release of ``yt-3.x``. -* ``yt-2.x`` -- The last stable release of ``yt-2.x``. ++------------+------------+----------------------------------+ +| yt release | Python 2.7 | Python3 min | Python3 max | ++============+============+================+=================+ +| 4.1.x | no | 3.7 (expected) | 3.10 (expected) | ++------------+------------+----------------+-----------------+ +| 4.0.x | no | 3.6 | 3.9 | ++------------+------------+----------------+-----------------+ +| 3.6.x | no | 3.5 | 3.8 | ++------------+------------+----------------+-----------------+ +| 3.5.x | yes | 3.4 | 3.5 | ++------------+------------+----------------+-----------------+ -If this is your first time using the code, we recommend using ``stable``, unless -you specifically need some piece of brand-new functionality only available in -``main`` or need to run an old script developed for ``yt-2.x``. There were major -API and functionality changes made in yt for version 3.0. For a detailed -description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x -(e.g. branches ``main`` and ``stable``) see :ref:`yt3differences`. Lastly, don't -feel like you're locked into one branch when you install yt, because you can -easily change the active branch by following the instructions in -:ref:`switching-between-yt-versions`. +Where the Python3 max column is purely indicative and reflects the newest +*guaranteed* compatible version. -.. _install-script: -All-in-One Installation Script -++++++++++++++++++++++++++++++ +Getting yt +---------- -Because installation of all of the interlocking parts necessary to install yt -itself can be time-consuming, yt provides an all-in-one installation script -which downloads and builds a fully-isolated installation of Python that includes -NumPy, Matplotlib, H5py, git, and yt. +In this document we describe several methods for installing yt. The method that +will work best for you depends on your precise situation: -The install script supports UNIX-like systems, including Linux, OS X, and most -supercomputer and cluster environments. It is particularly suited for deployment -in environments where users do not have root access and can only install -software into their home directory. +* If you need a stable build, see :ref:`install-stable` -Since the install is fully-isolated in a single directory, if you get tired of -having yt on your system, you can just delete the directory and yt and all of -its dependencies will be removed from your system (no scattered files remaining -throughout your system). +* If you want to build the development version of yt see :ref:`install-from-source`. -.. _installing-yt: +.. _install-stable: -Running the Install Script -^^^^^^^^^^^^^^^^^^^^^^^^^^ +Installing a stable release ++++++++++++++++++++++++++++ -You can download the installation script with the following command: +The latest stable release can be obtained from Pypi with pip .. code-block:: bash - $ wget https://raw.githubusercontent.com/yt-project/yt/main/doc/install_script.sh - -If you do not have ``wget``, the following should also work: + $ python -m pip install --upgrade pip + $ python -m pip install --user yt -.. code-block:: bash - $ curl -OL https://raw.githubusercontent.com/yt-project/yt/main/doc/install_script.sh - -By default, the bash install script will create a python environment based on -the `miniconda python distribution `_, -and will install yt's dependencies using the `conda -`_ package manager. To avoid needing a -compilation environment to run the install script, yt itself will also be -installed using ``conda``. - -If you would like to customize your yt installation, you can edit the values of -several variables that are defined at the top of the script. - -If you would like to build yt from source, you will need to edit the install -script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the -yt git repository and build yt form source. The default is -``INST_YT_SOURCE=0``, which installs yt from a binary conda package. - -In addition, you can tell the install script to download and install some -additional packages --- currently these include -`PyX `_, the `Rockstar halo -finder `_, `SciPy `_, -`Astropy `_, -`Cartopy `_, -and the necessary dependencies for -:ref:`unstructured mesh rendering `. The script has -all of the options for installing optional packages near the top of the -file. You should be able to open it and edit it without any knowledge of bash -syntax. For example, to install scipy, change ``INST_SCIPY=0`` to -``INST_SCIPY=1``. - -To execute the install script, run: +Or using the Anaconda/Miniconda Python distributions .. code-block:: bash - $ bash install_script.sh - -Because the installer is downloading and building a variety of packages from -source, this will likely take a few minutes, especially if you have a slow -internet connection. You will get updates of its status at the command prompt -throughout. - -If you receive errors during this process, the installer will provide you -with a large amount of information to assist in debugging your problems. The -file ``yt_install.log`` will contain all of the ``stdout`` and ``stderr`` from -the entire installation process, so it is usually quite cumbersome. By looking -at the last few hundred lines (i.e. ``tail -500 yt_install.log``), you can -potentially figure out what went wrong. If you have problems, though, do not -hesitate to :ref:`contact us ` for assistance. + $ conda install --channel conda-forge yt -.. _activating-yt: -Activating Your Installation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _install-from-source: -Once the installation has completed, there will be instructions on how to set up -your shell environment to use yt. +Building from source +++++++++++++++++++++ -In particular, you will need to ensure that the installation's ``yt-conda/bin`` -directory is prepended to your ``PATH`` environment variable. +To build yt from source, you need ``git``, and a C compiler (such as ``gcc`` +or ``clang``). -For Bash-style shells, you can use the following command in a terminal session -to temporarily activate the yt installation: +Then run .. code-block:: bash - $ export PATH=/path/to/yt-conda/bin:$PATH - -and on csh-style shells: - -.. code-block:: csh + $ git clone https://github.com/yt-project/yt + $ cd yt + $ python -m pip install --upgrade pip + $ python -m pip install --user -e . - $ setenv PATH /path/to/yt-conda/bin:$PATH -If you would like to permanently activate yt, you can also update the init file -appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc) -to include the same command. +.. _optional-runtime-deps: -.. _updating-yt: +Leveraging optional yt runtime dependencies ++++++++++++++++++++++++++++++++++++++++++++ -Updating yt and Its Dependencies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Some relatively heavy runtime dependencies are not included in your build by +default as they may be irrelevant in your workflow. Common examples include +h5py, mpi4py, astropy or scipy. yt implements a on-demand import mechanism that +allows it to run even when they are not installed *until they're needed*, in +which case it will raise an ``ImportError``, pointing to the missing requirement. -With many active developers, code development sometimes occurs at a furious -pace in yt. To make sure you're using the latest version of the code, run -this command at a command-line: +If you wish to get everything from the start, you may specify it when building +yt as by appending ``[full]`` to the target name when calling pip, i.e., .. code-block:: bash - $ conda update yt + $ # stable release + $ python -m pip install --user yt[full] + $ # from source + $ python -m pip install --user -e .[full] -If you want to update your dependencies, run: -.. code-block:: bash +.. _testing-installation: - $ conda update --all +Testing Your Installation ++++++++++++++++++++++++++ -If you have installed yt from source, you can use the following command to get -the latest development version of yt: +To test to make sure everything is installed properly, try running yt at +the command line: .. code-block:: bash - $ yt update + $ python -c "import yt" -.. _removing-yt: +If this runs without raising errors, you have successfully installed yt. Congratulations! -Removing yt and Its Dependencies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Otherwise, read the error message carefully and follow any instructions it gives +you to resolve the issue. Do not hesitate to :ref:`contact us ` +so we can help you figure it out. -Because yt and its dependencies are installed in an isolated directory when -you use the script installer, you can easily remove yt and all of its -dependencies cleanly. Simply remove the install directory and its -subdirectories:: - $ rm -rf yt-conda -If you *really* had problems with the installation process, this is a last -defense for solving: remove and then fully :ref:`re-install ` -from the install script again. +.. _updating: -.. _anaconda-installation: +Updating yt ++++++++++++ -Installing yt Using Anaconda -++++++++++++++++++++++++++++ - -For both the Anaconda and Miniconda installations, make sure that the Anaconda -``bin`` directory is in your path, and then issue: +For pip-based installations: .. code-block:: bash - $ conda install -c conda-forge yt - -which will install stable branch of yt along with all of its dependencies. - -.. _conda-intel-python: + $ python -m pip install --upgrade yt -Using the Intel Distribution for Python from conda -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you use conda, you can install yt with the -`Intel Distribution for Python `_ -(recommended for performance in parallel computations on Intel architectures) instead of -the standard Anaconda distribution. First you need to add the intel channel: +For conda-based installations: .. code-block:: bash - $ conda config --add channels intel - -If you want, at this point you can create a separate environment and switch to it: - -.. code-block:: bash - - $ conda create -c intel -n yt_intel - $ conda activate yt_intel - -Now you need to install the remaining yt dependencies in your current environment. -The following provides the Intel-optimized versions of these underlying packages: - -.. code-block:: bash - - $ conda config --add channels intel - $ conda install -c intel numpy scipy mpi4py cython git sympy ipython matplotlib netCDF4 - -Then you can install yt normally, either from the conda-forge channel as above, or from source (see below). - -.. _distro-packages: - -yt Distribution Packages -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. note:: - - Since the third-party packages listed below are not officially supported by - yt developers, support should not be sought out on the project mailing lists - or Slack channels. All support requests related to these packages should be - directed to their official maintainers. - -While we recommended installing ``yt`` with either the ``conda`` or ``pip`` -package managers, a number of third-party packages exist for the distributions -listed below. If you can't find your distro here, you can always install -``yt``'s current stable version using ``conda`` or ``pip``, or build the latest -development version from source. - -.. image:: https://repology.org/badge/vertical-allrepos/python:yt.svg?header=yt%20packaging%20status - :target: https://repology.org/project/python:yt/versions - -.. note:: - - Please be aware that the packages in some of these distributions may be out-of-date! - -.. _conda-source-build: - -Building yt from Source For Conda-based Installs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -First, ensure that you have all build dependencies installed in your current -conda environment: - -.. code-block:: bash - - $ conda install -c conda-forge cython git sympy ipython matplotlib netCDF4 - -In addition, you will need a C compiler installed. - -Clone the yt repository with: - -.. code-block:: bash + $ conda update yt - $ git clone https://github.com/yt-project/yt -Once inside the yt directory, update to the appropriate branch and run -``pip install -e .``. For example, the following commands will allow -you to see the tip of the development branch. +For git-based installations (yt installed from source), we provide the following +one-liner facility .. code-block:: bash - $ git checkout main - $ pip install -e . - -This will make sure you are running a version of yt corresponding to the -most up-to-date source code. - -.. note:: - - Alternatively, you can replace ``pip install -e .`` with ``conda develop -b .``. - -.. _windows-installation: - -Installing yt on Windows -^^^^^^^^^^^^^^^^^^^^^^^^ - -Installation on 64-bit Microsoft Windows platforms is supported using Anaconda -(see :ref:`anaconda-installation`) and via ``pip``. - -.. _source-installation: - -Installing yt Using ``pip`` -+++++++++++++++++++++++++++ - -If you already have a python installation that you manage using ``pip`` you can -install the latest release of yt by doing:: - - $ pip install yt - -If you do not have root access you may need to append ``--user`` to install to a -location in your home folder. - -Installing yt from source -+++++++++++++++++++++++++ - -.. note:: + $ yt update - If you wish to install yt from source in a conda-based installation of yt, - see :ref:`conda-source-build`. +This will pull any changes from GitHub, and recompile yt if necessary. -To install yt from source, you must make sure you have yt's dependencies -installed on your system. Right now, the dependencies to build yt from -source include: -- ``git`` -- A C compiler such as ``gcc`` or ``clang`` -- ``Python >= 3.6`` +Uninstalling yt ++++++++++++++++ -In addition, building yt from source requires ``numpy`` and ``cython`` -which can be installed with ``pip``: +If you've installed via pip (either from Pypi or from source) .. code-block:: bash - $ pip install --upgrade numpy cython - -You may also want to install some of yt's optional dependencies, including -``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, -``astropy``, or ``cartopy``. + $ python -m pip uninstall yt -The source code for yt may be found on GitHub. If you prefer to install the -development version of yt instead of the latest stable release, you will need -``git`` to clone the official repo: +Or with conda .. code-block:: bash - $ git clone https://github.com/yt-project/yt - $ cd yt - $ git checkout main - $ pip install . --user --install-option="--prefix=" - -.. note:: + $ conda uninstall yt - If you maintain your own user-level python installation separate from the - OS-level python installation, you can leave off ``--user --install-option="--prefix="``, although - you might need ``sudo`` depending on where python is installed. See `This - StackOverflow discussion - `_ - if you are curious why ``--install-option="--prefix="`` is necessary on some systems. -This will install yt into a folder in your home directory -(e.g. ``$HOME/.local/lib64/python/site-packages`` on Linux, -``$HOME/Library/Python//lib/python/site-packages/`` on OSX) Please refer to -the ``setuptools`` documentation for the additional options. +TroubleShooting +--------------- -If you are unable to locate the ``yt`` executable (i.e. executing ``yt version`` +If you are unable to locate the yt executable (i.e. executing ``yt version`` at the bash command line fails), then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your OS) to your PATH. Some Linux distributions do not include this directory in the default search path. -If you choose this installation method, you do not need to run any activation -script since this will install yt into your global python environment. - -If you will be modifying yt, you can also make the clone of the yt git -repository the "active" installed copy: - -.. code-block:: bash - - $ git clone https://github.com/yt-project/yt - $ cd yt - $ git checkout main - $ pip install -e . --user --install-option="--prefix=" - -As above, you can leave off ``--user --install-option="--prefix="`` if you want to install yt into -the default package install path. If you do not have write access for this -location, you might need to use ``sudo``. - -Build errors with ``setuptools`` or ``distribute`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Building yt requires version 18.0 or higher of ``setuptools``. If you see error -messages about this package, you may need to update it. For example, with pip -via - -.. code-block:: bash - - $ pip install --upgrade setuptools - -or your preferred method. If you have ``distribute`` installed, you may also see -error messages for it if it's out of date. You can update with pip via - -.. code-block:: bash - - $ pip install --upgrade distribute - -or via your preferred method. - -Keeping yt Updated via Git -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to maintain your yt installation via updates straight from the -GitHub repository or if you want to do some development on your own, we -suggest you check out some of the :ref:`development docs `, -especially the sections on :ref:`Git ` and -:ref:`building yt from source `. - -You can also make use of the following command to keep yt up to date from the -command line: - -.. code-block:: bash - - $ yt update - -This will detect that you have installed yt from the git repository, pull any -changes from GitHub, and then recompile yt if necessary. - -.. _testing-installation: - -Testing Your Installation -------------------------- - -To test to make sure everything is installed properly, try running yt at -the command line: - -.. code-block:: bash - - $ yt --help - -If this works, you should get a list of the various command-line options for -yt, which means you have successfully installed yt. Congratulations! - -If you get an error, follow the instructions it gives you to debug the problem. -Do not hesitate to :ref:`contact us ` so we can help you -figure it out. There is also information at :ref:`update-errors`. - -If you like, this might be a good time to run the test suite, see :ref:`testing` -for more details. - -.. _switching-between-yt-versions: - -Switching versions of yt: ``yt-2.x``, ``stable``, and ``main`` branches -------------------------------------------------------------------------- - -Here we explain how to switch between different development branches of yt. - -If You Installed yt Using the Bash Install Script -+++++++++++++++++++++++++++++++++++++++++++++++++ - -The instructions for how to switch between branches depend on whether you ran -the install script with ``INST_YT_SOURCE=0`` (the default) or -``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the -output: - -.. code-block:: bash - - $ yt version - -If the output from this command looks like: - -.. code-block:: none - - The current version and changeset for the code is: - - --- - Version = 3.2.3 - --- - -i.e. it does not refer to a specific changeset hash, then you originally chose -``INST_YT_SOURCE=0``. - -On the other hand, if the output from ``yt version`` looks like: - -.. code-block:: none - The current version and changeset for the code is: +Additional Resources +-------------------- - --- - Version = 3.3-dev - Changeset = d8eec89b2c86 - --- - -i.e. it refers to a specific changeset in the yt git repository, then -you installed using ``INST_YT_SOURCE=1``. - -Conda-based installs (``INST_YT_SOURCE=0``) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In this case you can follow the instructions above to build yt from source under conda (see -:ref:`conda-source-build`). - -Source-based installs (``INST_YT_SOURCE=1``) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You already have the git repository, so you simply need to switch -which version you're using. Navigate to the root of the yt git -repository, check out the desired version, and rebuild the source (some of the -C code requires a compilation step for big changes like this): +.. _distro-packages: -.. code-block:: bash +yt Distribution Packages +++++++++++++++++++++++++ - $ cd yt-/src/yt-git - $ git checkout - $ pip install -e . +Some operating systems have yt pre-built packages that can be installed with the +system package manager. Note that the packages in some of these distributions +may be out of date. -Valid versions to jump to are described in :ref:`branches-of-yt`. +.. note:: -You can check which version of yt you have installed by invoking ``yt version`` -at the command line. If you encounter problems, see :ref:`update-errors`. + Since the third-party packages listed below are not officially supported by + yt developers, support should not be sought out on the project mailing lists + or Slack channels. All support requests related to these packages should be + directed to their official maintainers. -If You Installed yt Using from Source or Using pip -++++++++++++++++++++++++++++++++++++++++++++++++++ +While we recommended installing yt with either pip or conda, a number of +third-party packages exist for the distributions listed below. -If you have installed python via ``pip``, remove -any extant installations of yt on your system and clone the git -repository of yt as described in :ref:`source-installation`. +.. image:: https://repology.org/badge/vertical-allrepos/python:yt.svg?header=yt%20packaging%20status + :target: https://repology.org/project/python:yt/versions -.. code-block:: bash - $ pip uninstall yt - $ git clone https://github.com/yt-project/yt +Intel distribution for Python ++++++++++++++++++++++++++++++ -Now, to switch between versions, you need to navigate to the root of the git yt -repository. Use git to update to the appropriate version and recompile. +A viable alternative to the installation based on Anaconda is the use of the +`Intel Distribution for Python +`_. For `Parallel +Computation +`_ on Intel +architectures, especially on supercomputers, a large `performance and +scalability improvement `_ over several common +tasks has been demonstrated. See `Parallel Computation +`_ for a +discussion on using yt in parallel. Leveraing this specialized distribution for +yt requires that you install some dependencies from the intel conda channel +before installing yt itself, like so .. code-block:: bash - $ cd yt - $ git checkout - $ pip install . --user --install-option="--prefix=" - -Valid versions to jump to are described in :ref:`branches-of-yt`). - -You can check which version of yt you have installed by invoking ``yt version`` -at the command line. If you encounter problems, see :ref:`update-errors`. + $ conda install -c intel numpy scipy mpi4py cython git sympy ipython matplotlib netCDF4 + $ python -m install --user yt diff --git a/doc/source/intro/index.rst b/doc/source/intro/index.rst index 22f3c4a6175..eb8648ab649 100644 --- a/doc/source/intro/index.rst +++ b/doc/source/intro/index.rst @@ -37,8 +37,7 @@ cylinders), based on :ref:`field values ` (e.g. all gas > 10^6 K), or for :ref:`constructing new data products ` (e.g. projections, slices, isosurfaces). Furthermore, yt can calculate the :ref:`bulk quantities ` associated with these data -objects (e.g. total mass, bulk velocity, angular momentum) or you can -:ref:`define your own `. +objects (e.g. total mass, bulk velocity, angular momentum). General Analysis ^^^^^^^^^^^^^^^^ @@ -56,8 +55,7 @@ processors simultaneously `. Datasets can be analyzed by simply :ref:`examining raw source data `, or they can be processed in a number of ways to extract relevant information and to explore the data including -:ref:`visualizing data ` and employing :ref:`topic-specific -analysis modules `. +:ref:`visualizing data `. Visualization ^^^^^^^^^^^^^ @@ -96,30 +94,6 @@ renderings `, and :ref:`visualizing isosurfaces in an external interactive tool `. yt even has a special web-based tool for exploring your data with a :ref:`google-maps-like interface `. -Topic-Specific Analysis Modules -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Beyond visualization and general analysis tools, yt contains a number -of :ref:`topic-specific analysis modules ` for processing -datasets. These analysis modules operate somewhat autonomous and oftentimes -use external libraries or codes. While they are installed with yt, they are -not loaded by default in every session so you have to call them specifically. -Examples include :ref:`halo analysis ` (including -:ref:`halo finding `, :ref:`merger trees `, -:ref:`halo mass functions `), :ref:`synthetic observations -` (including :ref:`cosmological light cones -`, :ref:`cosmological light rays `, -:ref:`synthetic absorption spectra `, :ref:`spectral -emission distributions (SEDS) `, :ref:`star formation -rates `, :ref:`synthetic x-ray observations -`, and :ref:`synthetic sunyaev-zeldovich effect -observations `), :ref:`two-point correlations functions -`, :ref:`identification of overdensities in arbitrary -fields (clump finding) `, :ref:`tracking particle trajectories -`, and exporting data to external radiative transfer -codes (e.g. :ref:`Sunrise ` and :ref:`RadMC-3D -`). - Executing and Scripting yt ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -157,8 +131,7 @@ like you to support it, add code, add documentation, etc. As such, we welcome members of the public to join :ref:`our community ` by contributing code, bug reports, documentation, and helping to :ref:`support the code in a number of ways `. Sooner or later, you'll want to -:ref:`add your own derived field `, :ref:`derived -quantity `, :ref:`data object +:ref:`add your own derived field `, :ref:`data object `, :ref:`code frontend ` or :ref:`make yt compatible with an external code `. We have detailed instructions on how to :ref:`contribute code ` @@ -176,7 +149,7 @@ problems. Getting Started ^^^^^^^^^^^^^^^ -We have detailed :ref:`installation instructions ` +We have detailed :ref:`installation instructions ` and support for a number of platforms including Unix, Linux, MacOS, and Windows. If you are new to yt, check out the :ref:`yt Quickstart ` and the :ref:`cookbook ` for a demonstration of yt's diff --git a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb index ed5f560e2d2..785031cef84 100644 --- a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb +++ b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb @@ -6,7 +6,7 @@ "source": [ "# Data Objects and Time Series Data\n", "\n", - "Just like before, we will load up yt. Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook." + "Just like before, we will load up yt. Since we'll be using pyplot to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook." ] }, { @@ -22,7 +22,7 @@ "%matplotlib inline\n", "import yt\n", "import numpy as np\n", - "from matplotlib import pylab" + "import matplotlib.pyplot as plt" ] }, { @@ -91,13 +91,16 @@ }, "outputs": [], "source": [ - "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n", - "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n", - "pylab.ylabel(\"Density ($g/cm^3$)\")\n", - "pylab.xlabel(\"Time (Gyr)\")\n", - "pylab.legend()\n", - "pylab.ylim(1e-32, 1e-21)\n", - "pylab.show()" + "fig, ax = plt.subplots()\n", + "ax.set(\n", + " xlabel=\"Time (Gyr)\",\n", + " ylabel=\"Density ($g/cm^3$)\",\n", + " yscale=\"log\",\n", + " ylim=(1e-32, 1e-21),\n", + ")\n", + "ax.plot(times, rho_ex[:,0], '-xk', label='Minimum')\n", + "ax.plot(times, rho_ex[:,1], '-xr', label='Maximum')\n", + "ax.legend()" ] }, { @@ -127,8 +130,9 @@ }, "outputs": [], "source": [ + "fig, ax = plt.subplots()\n", "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n", - "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))" + "ax.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))" ] }, { @@ -141,7 +145,7 @@ }, "outputs": [], "source": [ - "print (ray[\"dts\"])" + "print(ray[\"dts\"])" ] }, { @@ -154,7 +158,7 @@ }, "outputs": [], "source": [ - "print (ray[\"t\"])" + "print(ray[\"t\"])" ] }, { @@ -167,7 +171,7 @@ }, "outputs": [], "source": [ - "print (ray[(\"gas\", \"x\")])" + "print(ray[\"gas\", \"x\"])" ] }, { @@ -192,10 +196,10 @@ "ds = yt.load_sample(\"IsolatedGalaxy\")\n", "v, c = ds.find_max((\"gas\", \"density\"))\n", "sl = ds.slice(2, c[0])\n", - "print (sl[\"index\", \"x\"])\n", - "print (sl[\"index\", \"z\"])\n", - "print (sl[\"pdx\"])\n", - "print (sl[\"gas\", \"density\"].shape)" + "print(sl[\"index\", \"x\"])\n", + "print(sl[\"index\", \"z\"])\n", + "print(sl[\"pdx\"])\n", + "print(sl[\"gas\", \"density\"].shape)" ] }, { @@ -216,7 +220,7 @@ "outputs": [], "source": [ "frb = sl.to_frb((50.0, 'kpc'), 1024)\n", - "print (frb[\"gas\", \"density\"].shape)" + "print(frb[\"gas\", \"density\"].shape)" ] }, { @@ -345,7 +349,7 @@ "outputs": [], "source": [ "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n", - "print (cg[\"density\"].shape)" + "print(cg[\"density\"].shape)" ] }, { @@ -366,7 +370,7 @@ "outputs": [], "source": [ "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n", - "print (scg[\"density\"].shape)" + "print(scg[\"density\"].shape)" ] }, { @@ -379,9 +383,13 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" + "name": "python3", + "display_name": "Python 3.9.5 64-bit ('yt-dev': pyenv)", + "metadata": { + "interpreter": { + "hash": "14363bd97bed451d1329fb3e06aa057a9e955a9421c5343dd7530f5497723a41" + } + } }, "language_info": { "codemirror_mode": { @@ -393,7 +401,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.9.5" } }, "nbformat": 4, diff --git a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb index ccc85383f49..d208b33caaa 100644 --- a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb +++ b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb @@ -21,10 +21,10 @@ "outputs": [], "source": [ "%matplotlib inline\n", - "import yt\n", + "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from yt import derived_field\n", - "from matplotlib import pylab" + "import yt\n", + "from yt import derived_field" ] }, { @@ -49,7 +49,7 @@ "source": [ "@derived_field(name=\"dinosaurs\", units=\"K * cm/s\", sampling_type=\"cell\")\n", "def _dinos(field, data):\n", - " return data[(\"gas\", \"temperature\")] * data[(\"gas\", \"velocity_magnitude\")]" + " return data[\"gas\", \"temperature\"] * data[\"gas\", \"velocity_magnitude\"]" ] }, { @@ -72,7 +72,7 @@ "source": [ "ds = yt.load_sample(\"IsolatedGalaxy\")\n", "dd = ds.all_data()\n", - "print (list(dd.quantities.keys()))" + "print(list(dd.quantities.keys()))" ] }, { @@ -93,7 +93,7 @@ }, "outputs": [], "source": [ - "print (dd.quantities.extrema((\"gas\", \"dinosaurs\")))" + "print(dd.quantities.extrema((\"gas\", \"dinosaurs\")))" ] }, { @@ -114,7 +114,7 @@ }, "outputs": [], "source": [ - "print (dd.quantities.weighted_average_quantity((\"gas\", \"dinosaurs\"), weight=(\"gas\", \"temperature\")))" + "print(dd.quantities.weighted_average_quantity((\"gas\", \"dinosaurs\"), weight=(\"gas\", \"temperature\")))" ] }, { @@ -141,9 +141,9 @@ "bv = sp.quantities.bulk_velocity()\n", "L = sp.quantities.angular_momentum_vector()\n", "rho_min, rho_max = sp.quantities.extrema((\"gas\", \"density\"))\n", - "print (bv)\n", - "print (L)\n", - "print (rho_min, rho_max)" + "print(bv)\n", + "print(L)\n", + "print(rho_min, rho_max)" ] }, { @@ -172,9 +172,13 @@ "source": [ "prof = yt.Profile1D(sp, (\"gas\", \"density\"), 32, rho_min, rho_max, True, weight_field=(\"gas\", \"mass\"))\n", "prof.add_fields([(\"gas\", \"temperature\"), (\"gas\", \"dinosaurs\")])\n", - "pylab.loglog(np.array(prof.x), np.array(prof[(\"gas\", \"temperature\")]), \"-x\")\n", - "pylab.xlabel('Density $(g/cm^3)$')\n", - "pylab.ylabel('Temperature $(K)$')" + "\n", + "fig, ax = plt.subplots()\n", + "ax.loglog(np.array(prof.x), np.array(prof[\"gas\", \"temperature\"]), \"-x\")\n", + "ax.set(\n", + " xlabel='Density $(g/cm^3)$',\n", + " ylabel='Temperature $(K)$',\n", + ")" ] }, { @@ -195,9 +199,12 @@ }, "outputs": [], "source": [ - "pylab.loglog(np.array(prof.x), np.array(prof[(\"gas\", \"dinosaurs\")]), '-x')\n", - "pylab.xlabel('Density $(g/cm^3)$')\n", - "pylab.ylabel('Dinosaurs $(K cm / s)$')" + "fig, ax = plt.subplots()\n", + "ax.loglog(np.array(prof.x), np.array(prof[\"gas\", \"dinosaurs\"]), '-x')\n", + "ax.set(\n", + " xlabel='Density $(g/cm^3)$',\n", + " ylabel='Dinosaurs $(K cm / s)$',\n", + ")" ] }, { @@ -220,9 +227,13 @@ "source": [ "prof = yt.Profile1D(sp, (\"gas\", \"density\"), 32, rho_min, rho_max, True, weight_field=None)\n", "prof.add_fields([(\"gas\", \"mass\")])\n", - "pylab.loglog(np.array(prof.x), np.array(prof[(\"gas\", \"mass\")].in_units(\"Msun\")), '-x')\n", - "pylab.xlabel('Density $(g/cm^3)$')\n", - "pylab.ylabel('Cell mass $(M_\\odot)$')" + "\n", + "fig, ax = plt.subplots()\n", + "ax.loglog(np.array(prof.x), np.array(prof[\"gas\", \"mass\"].in_units(\"Msun\")), '-x')\n", + "ax.set(\n", + " xlabel='Density $(g/cm^3)$',\n", + " ylabel='Cell mass $(M_\\odot)$',\n", + ")" ] }, { @@ -278,9 +289,9 @@ "sp.set_field_parameter(\"bulk_velocity\", bv)\n", "rv2 = sp.quantities.extrema((\"gas\", \"radial_velocity\"))\n", "\n", - "print (bv)\n", - "print (rv1)\n", - "print (rv2)" + "print(bv)\n", + "print(rv1)\n", + "print(rv2)" ] } ], diff --git a/doc/source/quickstart/index.rst b/doc/source/quickstart/index.rst index d7a28f16970..b232f1028e9 100644 --- a/doc/source/quickstart/index.rst +++ b/doc/source/quickstart/index.rst @@ -16,7 +16,7 @@ To execute the quickstart interactively, you have a couple of options: 1) run the notebook from your own system or 2) run it from the url https://girder.hub.yt/#raft/5b5b4686323d12000122aa8a. Option 1 requires an existing installation of yt (see -:ref:`getting-and-installing-yt`), a copy of the yt source (which you may +:ref:`installing-yt`), a copy of the yt source (which you may already have depending on your installation choice), and a download of the tutorial data-sets (total about 3 GB). If you know you are going to be a yt user and have the time to download the data-sets, option 1 is a good choice. However, diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index 32bcfe64d73..e0f4a692ab8 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -58,6 +58,7 @@ Writing FITS images ~yt.visualization.fits_image.FITSProjection ~yt.visualization.fits_image.FITSOffAxisSlice ~yt.visualization.fits_image.FITSOffAxisProjection + ~yt.visualization.fits_image.FITSParticleProjection Data Sources ------------ @@ -103,7 +104,7 @@ geometric. ~yt.data_objects.selection_objects.object_collection.YTDataCollection ~yt.data_objects.selection_objects.spheroids.YTSphere ~yt.data_objects.selection_objects.spheroids.YTEllipsoid - ~yt.data_objects.selection_objects.cur_region.YTCutRegion + ~yt.data_objects.selection_objects.cut_region.YTCutRegion ~yt.data_objects.index_subobjects.grid_patch.AMRGridPatch ~yt.data_objects.index_subobjects.octree_subset.OctreeSubset ~yt.data_objects.index_subobjects.particle_container.ParticleContainer @@ -157,27 +158,16 @@ These objects generate an "index" into multiresolution data. Units ----- -These classes and functions enable yt's symbolic unit handling system. +yt's symbolic unit handling system is now based on the external library unyt. In +complement, Dataset objects support the following methods to build arrays and +scalars with physical dimensions. .. autosummary:: yt.data_objects.static_output.Dataset.arr yt.data_objects.static_output.Dataset.quan - ~yt.units.unit_object.define_unit - ~yt.units.unit_object.Unit - ~yt.units.unit_registry.UnitRegistry - ~yt.units.unit_systems.UnitSystem - ~yt.units.yt_array.YTArray - ~yt.units.yt_array.YTQuantity - ~yt.units.yt_array.uconcatenate - ~yt.units.yt_array.uintersect1d - ~yt.units.yt_array.uunion1d - ~yt.units.yt_array.unorm - ~yt.units.yt_array.udot - ~yt.units.yt_array.uvstack - ~yt.units.yt_array.uhstack - ~yt.units.yt_array.ustack - ~yt.units.yt_array.display_ytarray + + Frontends --------- @@ -531,11 +521,11 @@ Field Functions ~yt.fields.field_info_container.FieldInfoContainer.add_field ~yt.data_objects.static_output.Dataset.add_field - ~yt.data_objects.static_outputs.add_deposited_particle_field - ~yt.data_objects.static_outputs.add_mesh_sampling_particle_field - ~yt.data_objects.static_outputs.add_smoothed_particle_field - ~yt.data_objects.static_outputs.add_gradient_fields - ~yt.frontends.stream.data_structures.add_SPH_fields + ~yt.data_objects.static_output.Dataset.add_deposited_particle_field + ~yt.data_objects.static_output.Dataset.add_mesh_sampling_particle_field + ~yt.data_objects.static_output.Dataset.add_smoothed_particle_field + ~yt.data_objects.static_output.Dataset.add_gradient_fields + ~yt.frontends.stream.data_structures.StreamParticlesDataset.add_sph_fields Particle Filters ---------------- @@ -797,7 +787,7 @@ Miscellaneous Types .. autosummary:: - ~yt.config.YTConfigParser + ~yt.config.YTConfig ~yt.utilities.parameter_file_storage.ParameterFileStore ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface @@ -819,7 +809,6 @@ Cosmology Calculator ~yt.utilities.cosmology.Cosmology.angular_scale ~yt.utilities.cosmology.Cosmology.luminosity_distance ~yt.utilities.cosmology.Cosmology.lookback_time - ~yt.utilities.cosmology.Cosmology.hubble_time ~yt.utilities.cosmology.Cosmology.critical_density ~yt.utilities.cosmology.Cosmology.hubble_parameter ~yt.utilities.cosmology.Cosmology.expansion_factor @@ -858,8 +847,6 @@ These are for the pytest infrastructure: .. autosummary:: - ~conftest.tempdir - ~conftest.answer_file ~conftest.hashing ~yt.utilities.answer_testing.answer_tests.grid_hierarchy ~yt.utilities.answer_testing.answer_tests.parentage_relationships @@ -867,8 +854,6 @@ These are for the pytest infrastructure: ~yt.utilities.answer_testing.answer_tests.projection_values ~yt.utilities.answer_testing.answer_tests.field_values ~yt.utilities.answer_testing.answer_tests.pixelized_projection_values - ~yt.utilities.answer_testing.answer_tests.simulated_halo_mass_function - ~yt.utilities.answer_testing.answer_tests.analytic_halo_mass_function ~yt.utilities.answer_testing.answer_tests.small_patch_amr ~yt.utilities.answer_testing.answer_tests.big_patch_amr ~yt.utilities.answer_testing.answer_tests.generic_array @@ -878,6 +863,5 @@ These are for the pytest infrastructure: ~yt.utilities.answer_testing.answer_tests.phase_plot_attribute ~yt.utilities.answer_testing.answer_tests.generic_image ~yt.utilities.answer_testing.answer_tests.axial_pixelization - ~yt.utilities.answer_testing.answer_tests.light_cone_projection ~yt.utilities.answer_testing.answer_tests.extract_connected_sets ~yt.utilities.answer_testing.answer_tests.VR_image_comparison diff --git a/doc/source/reference/changelog.rst b/doc/source/reference/changelog.rst index 73a00467703..f31b5943681 100644 --- a/doc/source/reference/changelog.rst +++ b/doc/source/reference/changelog.rst @@ -12,6 +12,250 @@ The `CREDITS file `_ contains the most up-to-date list of everyone who has contributed to the yt source code. +yt 4.0 +------ + +Welcome to yt 4.0! This release is the result of several years worth of +developer effort and has been in progress since the mid 3.x series. Please keep +in mind that this release **will** have breaking changes. Please see the yt 4.0 +differences page for how you can expect behavior to differ from the 3.x series. + +This is a manually curated list of pull requests that went in to yt 4.0, +representing a subset of `the full +list `__. + +New Functions +^^^^^^^^^^^^^ + +- ``yt.load_sample`` (PR + #\ `2417 `__, PR + #\ `2496 `__, PR + #\ `2875 `__, PR + #\ `2877 `__, PR + #\ `2894 `__, PR + #\ `3262 `__, PR + #\ `3263 `__, PR + #\ `3277 `__, PR + #\ `3309 `__, and PR + #\ `3336 `__) +- ``yt.set_log_level`` (PR + #\ `2869 `__ and PR + #\ `3094 `__) +- ``list_annotations`` method for plots (PR + #\ `2562 `__) + +API improvements +^^^^^^^^^^^^^^^^ + +- ``yt.load`` with support for ``os.PathLike`` objects, improved UX + and moved a new ``yt.loaders`` module, along with sibling functions (PR + #\ `2405 `__, PR + #\ `2722 `__, PR + #\ `2695 `__, PR + #\ `2818 `__, and PR + #\ `2831 `__, PR + #\ `2832 `__) +- ``Dataset`` now has a more useful repr (PR + #\ `3217 `__) +- Explicit JPEG export support (PR + #\ `2549 `__) +- ``annotate_clear`` is now ``clear_annotations`` (PR + #\ `2569 `__) +- Throw an error if field access is ambiguous (PR + #\ `2967 `__) + +Newly supported data formats +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Arepo +~~~~~ + +- PR #\ `1807 `__ +- PR #\ `2236 `__ +- PR #\ `2244 `__ +- PR #\ `2344 `__ +- PR #\ `2434 `__ +- PR #\ `3258 `__ +- PR #\ `3265 `__ +- PR #\ `3291 `__ + +Swift +~~~~~ + +- PR #\ `1962 `__ + +Improved support and frontend specific bugfixes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +adaptahop +~~~~~~~~~ + +- PR #\ `2678 `__ + +AMRVAC +~~~~~~ + +- PR #\ `2541 `__ +- PR #\ `2745 `__ +- PR #\ `2746 `__ +- PR #\ `3215 `__ + +ART +~~~ + +- PR #\ `2688 `__ + +ARTIO +~~~~~ + +- PR #\ `2613 `__ + +Athena++ +~~~~~~~~ + +- PR #\ `2985 `__ + +Boxlib +~~~~~~ + +- PR #\ `2807 `__ +- PR #\ `2814 `__ +- PR #\ `2938 `__ (AMReX) + +Enzo-E (formerly Enzo-P) +~~~~~~~~~~~~~~~~~~~~~~~~ + +- PR #\ `3273 `__ +- PR #\ `3274 `__ +- PR #\ `3290 `__ +- PR #\ `3372 `__ + +fits +~~~~ + +- PR #\ `2246 `__ +- PR #\ `2345 `__ + +Gadget +~~~~~~ + +- PR #\ `2145 `__ +- PR #\ `3233 `__ +- PR #\ `3258 `__ + +Gadget FOF Halo +~~~~~~~~~~~~~~~ + +- PR #\ `2296 `__ + +GAMER +~~~~~ + +- PR #\ `3033 `__ + +Gizmo +~~~~~ + +- PR #\ `3234 `__ + +MOAB +~~~~ + +- PR #\ `2856 `__ + +Owls +~~~~ + +- PR #\ `3325 `__ + +Ramses +~~~~~~ + +- PR #\ `2679 `__ +- PR #\ `2714 `__ +- PR #\ `2960 `__ +- PR #\ `3017 `__ +- PR #\ `3018 `__ + +Tipsy +~~~~~ + +- PR #\ `2193 `__ + +Octree Frontends +~~~~~~~~~~~~~~~~ + +- Ghost zone access (PR + #\ `2425 `__ and PR + #\ `2958 `__) +- Volume Rendering (PR + #\ `2610 `__) + +Configuration file +^^^^^^^^^^^^^^^^^^ + +- Config files are now in `TOML `__ (PR + #\ `2981 `__) +- Allow a local plugin file (PR + #\ `2534 `__) +- Allow per-field local config (PR + #\ `1931 `__) + +yt CLI +^^^^^^ + +- Fix broken command-line options (PR + #\ `3361 `__) +- Drop yt hub command (PR + #\ `3363 `__) + +Deprecations +^^^^^^^^^^^^ + +- Smoothed fields are no longer necessary (PR + #\ `2194 `__) +- Energy and momentum field names are more accurate (PR + #\ `3059 `__) +- Incorrectly-named ``WeightedVariance`` is now + ``WeightedStandardDeviation`` and the old name has been deprecated + (PR #\ `3132 `__) +- Colormap auto-registration has been changed and yt 4.1 will not + register ``cmocean`` (PR + #\ `3175 `__ and PR + #\ `3214 `__) + +Removals +~~~~~~~~ + +- ``analysis_modules`` has been + `extracted `__ (PR + #\ `2081 `__) +- Interactive volume rendering has been + `extracted `__ (PR + #\ `2896 `__) +- The bundled version of ``poster`` has been removed (PR + #\ `2783 `__) +- The deprecated ``particle_position_relative`` field has been removed + (PR #\ `2901 `__) +- Deprecated functions have been removed (PR + #\ `3007 `__) +- Vendored packages have been removed (PR + #\ `3008 `__) +- ``yt.pmods`` has been removed (PR + #\ `3061 `__) +- yt now utilizes unyt as an external package (PR + #\ `2219 `__, PR + #\ `2300 `__, and PR + #\ `2303 `__) + +Version 3.6.1 +------------- + +Version 3.6.1 is a bugfix release. It includes the following backport: + +- hotfix: support matplotlib 3.3.0. + See `PR 2754 `__. + Version 3.6.0 ------------- @@ -39,33 +283,35 @@ Major Changes and New Features ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- New frontend support for the code AMRVAC. Many thanks to Clement Robert - and Niels Claes who were major contributors to this initiative. Relevant PRs include: - - Initial PR to support AMRVAC native data files - `PR 2321 `__. - - added support for dust fields and derived fields - `PR 2387 `__. - - added support for derived fields for hydro runs - `PR 2381 `__. - - API documentation and docstrings for AMRVAC frontend - `PR 2384 `__, - `PR 2380 `__, - `PR 2382 `__. - - testing-related PRs for AMRVAC: - `PR 2379 `__, - `PR 2360 `__. - - add verbosity to logging of geometry or `geometry_override` - `PR 2421 `__. - - add attribute to `_code_unit_attributes` specific to AMRVAC to ensure - consistent renormalisation of AMRVAC datasets. See - `PR 2357 `__. - - parse AMRVAC's parfiles if user-provided - `PR 2369 `__. - - ensure that min_level reflects dataset that has refinement - `PR 2475 `__. - - fix derived unit parsing `PR 2362 `__. - - update energy field to be `energy_density` and have units of code - pressure `PR 2376 `__. +- New frontend support for the code AMRVAC. Many thanks to Clément Robert + and Niels Claes who were major contributors to this initiative. Relevant PRs include + + - Initial PR to support AMRVAC native data files + `PR 2321 `__. + - added support for dust fields and derived fields + `PR 2387 `__. + - added support for derived fields for hydro runs + `PR 2381 `__. + - API documentation and docstrings for AMRVAC frontend + `PR 2384 `__, + `PR 2380 `__, + `PR 2382 `__. + - testing-related PRs for AMRVAC: + `PR 2379 `__, + `PR 2360 `__. + - add verbosity to logging of geometry or ``geometry_override`` + `PR 2421 `__. + - add attribute to ``_code_unit_attributes`` specific to AMRVAC to ensure + consistent renormalisation of AMRVAC datasets. See + `PR 2357 `__. + - parse AMRVAC's parfiles if user-provided + `PR 2369 `__. + - ensure that min_level reflects dataset that has refinement + `PR 2475 `__. + - fix derived unit parsing `PR 2362 `__. + - update energy field to be ``energy_density`` and have units of code + pressure `PR 2376 `__. + - Support for the AdaptaHOP halo finder code `PR 2385 `__. - yt now supports geographic transforms and projections of data with @@ -363,8 +609,8 @@ Other Infrastructure - Added a welcomebot to our github page for new contributors, see `PR 2181 `__. - Added a pep8 bot to pre-run before tests, see - `PR 2179 `__ - `PR 2184 `__, + `PR 2184 `__ and `PR 2185 `__. Version 3.5.0 @@ -493,8 +739,8 @@ Additional Improvements will be registered for a dataset if the dependent particle filter is registered with a dataset. See `PR 1624 `__. -- The ``save()`` method of the various yt plot objets now optionally can accept - a tuple of strings instead of a string. If a tuple is supplied, the elments +- The ``save()`` method of the various yt plot objects now optionally can accept + a tuple of strings instead of a string. If a tuple is supplied, the elements are joined with ``os.sep`` to form a path. See `PR 1630 `__. - The quiver callback now accepts a ``plot_args`` keyword argument that allows @@ -544,7 +790,7 @@ Additional Improvements 1914 `__. - ``ParticleProjectionPlot`` now supports the ``annotate_particles`` plot callback. See `PR 1765 `__. -- Optmized the performance of off-axis projections for octree AMR data. See `PR +- Optimized the performance of off-axis projections for octree AMR data. See `PR 1766 `__. - Added support for several radiative transfer fields in the ARTIO frontend. See `PR 1804 `__. @@ -1147,7 +1393,7 @@ Major enhancements :ref:`GAMER `, and :ref:`Gizmo ` data formats. * The unit system associated with a dataset is now customizable, defaulting to - CGS. See :ref:`unit_systems`. + CGS. * Enhancements and usability improvements for analysis modules, especially the ``absorption_spectrum``, ``photon_simulator``, and ``light_ray`` modules. See :ref:`synthetic-observations`. diff --git a/doc/source/reference/code_support.rst b/doc/source/reference/code_support.rst index d200e83a385..4f320cd32ce 100644 --- a/doc/source/reference/code_support.rst +++ b/doc/source/reference/code_support.rst @@ -12,66 +12,78 @@ as inputs. Below is a list showing what level of support is provided for each code. See :ref:`loading-data` for examples of loading a dataset from each supported output format using yt. -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Capability ► | Fluid | Particles | Parameters | Units | Read on | Load Raw | Part of | Level of | -| Code/Format ▼ | Quantities | | | | Demand | Data | test suite | Support | -+=======================+============+===========+============+=======+==========+==========+============+==========+ -| AMRVAC | Y | N | Y | Y | Y | Y | Y | Partial | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| ART | Y | Y | Y | Y | Y [#f2]_ | Y | N | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| ARTIO | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Athena | Y | N | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Castro | Y | Y [#f3]_ | Partial | Y | Y | Y | N | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Chombo | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Enzo | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| FITS | Y | N/A | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| FLASH | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Gadget | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| GAMER | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Gasoline | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Gizmo | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Grid Data Format (GDF)| Y | N/A | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Maestro | Y [#f1]_ | N | Y | Y | Y | Y | N | Partial | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| MOAB | Y | N/A | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Nyx | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| openPMD | Y | Y | N | Y | Y | Y | N | Partial | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Orion | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| OWLS/EAGLE | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Piernik | Y | N/A | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Pluto | Y | N | Y | Y | Y | Y | Y | Partial | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| RAMSES | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| Tipsy | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ -| WarpX | Y | Y | Y | Y | Y | Y | Y | Full | -+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Capability ► | Fluid | Particles | Parameters | Units | Read on | Load Raw | Part of | Level of | +| Code/Format ▼ | Quantities | | | | Demand | Data | test suite | Support | ++=======================+============+===========+============+=======+==========+==========+============+=============+ +| AMRVAC | Y | N | Y | Y | Y | Y | Y | Partial | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| AREPO | Y | Y | Y | Y | Y | Y | Y | Full [#f4]_ | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| ART | Y | Y | Y | Y | Y [#f2]_ | Y | N | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| ARTIO | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Athena | Y | N | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Athena++ | Y | N | Y | Y | Y | Y | Y | Partial | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Castro | Y | Y [#f3]_ | Partial | Y | Y | Y | N | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Chombo | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Enzo | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Enzo-E | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Exodus II | ? | ? | ? | ? | ? | ? | ? | ? | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| FITS | Y | N/A | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| FLASH | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Gadget | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| GAMER | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Gasoline | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Gizmo | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Grid Data Format (GDF)| Y | N/A | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| IAMR | ? | ? | ? | ? | ? | ? | ? | ? | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Maestro | Y [#f1]_ | N | Y | Y | Y | Y | N | Partial | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| MOAB | Y | N/A | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Nyx | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| openPMD | Y | Y | N | Y | Y | Y | N | Partial | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Orion | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| OWLS/EAGLE | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Piernik | Y | N/A | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Pluto | Y | N | Y | Y | Y | Y | Y | Partial | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| RAMSES | Y | Y | Y | Y | Y [#f2]_ | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| Tipsy | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ +| WarpX | Y | Y | Y | Y | Y | Y | Y | Full | ++-----------------------+------------+-----------+------------+-------+----------+----------+------------+-------------+ .. [#f1] one-dimensional base-state not read in currently. .. [#f2] These handle mesh fields using an in-memory octree that has not been parallelized. Datasets larger than approximately 1024^3 will not scale well. .. [#f3] Newer versions of Castro that use BoxLib's standard particle format are supported. - The older ASCII format is not. + The older ASCII format is not. +.. [#f4] The Voronoi cells are currently treated as SPH-like particles, with a smoothing + length proportional to the cube root of the cell volume. If you have a dataset that uses an output format not yet supported by yt, you can either input your data following :ref:`loading-numpy-array` or diff --git a/doc/source/reference/command-line.rst b/doc/source/reference/command-line.rst index 54a6d9e61cd..27cf466f790 100644 --- a/doc/source/reference/command-line.rst +++ b/doc/source/reference/command-line.rst @@ -8,6 +8,10 @@ Command-Line Usage Interactive Prompt ~~~~~~~~~~~~~~~~~~ +.. warning:: + + This section describes a script targeted for removal in yt 4.2.0 + The interactive prompt offers a number of excellent opportunities for exploration of data. While there are challenges for repeatability, and some operations will be more challenging to operate in parallel, interactive prompts @@ -35,8 +39,8 @@ or yt load DD0030/DD0030 This will spawn ``iyt``, but the dataset given on the command line will -already be in the namespace as ``ds``. With interactive mode, you can use the -``pylab`` module to interactively plot. +already be in the namespace as ``ds``. With interactive mode, you can use +``matplotlib.pyplot`` to build plots interactively. Command-line Functions ~~~~~~~~~~~~~~~~~~~~~~ @@ -45,7 +49,7 @@ The :code:`yt` command-line tool allows you to access some of yt's basic functionality without opening a python interpreter. The tools is a collection of subcommands. These can quickly making plots of slices and projections through a dataset, updating yt's codebase, print basic statistics about a dataset, launch -an IPython notebook session, and more. To get a quick list of what is +an IPython notebook session, and more. To get a quick list of what is available, just type: .. code-block:: bash @@ -82,21 +86,21 @@ First let's see what our options are for plotting: $ yt plot --help -There are many! We can choose whether we want a slice (default) or a +There are many! We can choose whether we want a slice (default) or a projection (``-p``), the field, the colormap, the center of the image, the width and unit of width of the image, the limits, the weighting field for -projections, and on and on. By default the plotting command will execute the +projections, and on and on. By default the plotting command will execute the same thing along all three axes, so keep that in mind if it takes three times -as long as you'd like! The center of a slice defaults to the center of +as long as you'd like! The center of a slice defaults to the center of the domain, so let's just give that a shot and see what it looks like: .. code-block:: bash $ yt plot DD0010/moving7_0010 -Well, that looks pretty bad! What has happened here is that the center of the +Well, that looks pretty bad! What has happened here is that the center of the domain only has some minor shifts in density, so the plot is essentially -incomprehensible. Let's try it again, but instead of slicing, let's project. +incomprehensible. Let's try it again, but instead of slicing, let's project. This is a line integral through the domain, and for the density field this becomes a column density: @@ -104,10 +108,10 @@ becomes a column density: $ yt plot -p DD0010/moving7_0010 -Now that looks much better! Note that all three axes' projections appear +Now that looks much better! Note that all three axes' projections appear nearly indistinguishable, because of how the two spheres are located in the -domain. We could center our domain on one of the spheres and take a slice, as -well. Now let's see what the domain looks like with grids overlaid, using the +domain. We could center our domain on one of the spheres and take a slice, as +well. Now let's see what the domain looks like with grids overlaid, using the ``--show-grids`` option: .. code-block:: bash @@ -131,43 +135,27 @@ help Help lists all of the various command-line options in yt. - -bugreport -+++++++++ - -Encountering a bug in your own code can be a big hassle, but it can be -exponentially worse to find it in someone else's. That's why we tried to -make it as easy as possible for users to report bugs they find in yt. -After you go through the necessary channels to make sure you're not just -making a mistake (see :ref:`asking-for-help`), you can submit bug -reports using this nice utility. - instinfo and version ++++++++++++++++++++ This gives information about where your yt installation is, what version and changeset you're using and more. -load -++++ - -This will start the iyt interactive environment with your specified -dataset already loaded. See :ref:`interactive-prompt` for more details. mapserver +++++++++ -Ever wanted to interact with your data using the -`google maps `_ interface? Now you can by using the -yt mapserver. See :ref:`mapserver` for more details. +Ever wanted to interact with your data using a +`google maps `_-style interface? Now you can by using the +yt mapserver. See :ref:`mapserver` for more details. pastebin and pastebin_grab ++++++++++++++++++++++++++ The `pastebin `_ is an online location where you can anonymously post code snippets and error messages to share with -other users in a quick, informal way. It is often useful for debugging -code or co-developing. By running the ``pastebin`` subcommand with a +other users in a quick, informal way. It is often useful for debugging +code or co-developing. By running the ``pastebin`` subcommand with a text file, you send the contents of that file to an anonymous pastebin; .. code-block:: bash @@ -205,28 +193,18 @@ By specifying the axis, center, width, etc. (run ``yt help plot`` for details), you can create slices and projections easily at the command-line. -upload_notebook -+++++++++++++++ - -This command will accept the filename of a ``.ipynb`` file (generated from an -IPython notebook session) and upload it to the `yt hub -`__ where others will be able to view it, and -download it. This is an easy method for recording a sequence of commands, -their output, narrative information, and then sharing that with others. These -notebooks will be viewable online, and the appropriate URLs will be returned on -the command line. - rpdb ++++ -Connect to a currently running (on localhost) rpd session. +Connect to a currently running (on localhost) rpdb session. See +:ref:`remote-debugging` for more info. notebook ++++++++ -Launches an IPython notebook server and prints out instructions on how to open -an ssh tunnel to connect to the notebook server with a web browser. This is -most useful when you want to run an IPython notebook using CPUs on a remote +Launches a Jupyter notebook server and prints out instructions on how to open +an ssh tunnel to connect to the notebook server with a web browser. This is +most useful when you want to run a Jupyter notebook using CPUs on a remote host. stats @@ -234,14 +212,34 @@ stats This subcommand provides you with some basic statistics on a given dataset. It provides you with the number of grids and cells in each level, the time -of the dataset, the resolution, and the maximum density in a variety of units. -It is tantamount to performing the ``print_stats()`` inside of yt. +of the dataset, and the resolution. It is tantamount to calling the +``Dataset.print_stats`` method. + +Additionally, there is the option to print the minimum, maximum, or both for +a given field. The field is assumed to be density by default: + +.. code-block:: bash + + yt stats GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150 --max --min + +or a different field can be specified using the ``-f`` flag: + +.. code-block:: bash + + yt stats GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150 --max --min -f gas,temperature + +The field-related stats output from this command can be directed to a file using +the ``-o`` flag: + +.. code-block:: bash + + yt stats GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150 --max -o out_stats.dat update ++++++ This subcommand updates the yt installation to the most recent version for -your repository (e.g. stable, 2.0, development, etc.). Adding the ``--all`` +your repository (e.g. stable, 2.0, development, etc.). Adding the ``--all`` flag will update the dependencies as well. .. _upload-image: @@ -265,38 +263,6 @@ The image uploaded using ``upload_image`` is assigned with a unique hash that can be used to remove it. This subcommand provides an easy way to send a delete request directly to the `imgur.com `_. -Hub helper -~~~~~~~~~~ - -The :code:`yt hub` command-line tool allows to interact with the `yt hub -`__. The following subcommands are currently available: - -.. config_help:: yt hub - -register -++++++++ - -This subcommand starts an interactive process of creating an account on the `yt -hub `__. Please note that the yt Hub also supports multiple OAuth -providers such as Google, Bitbucket and GitHub for authentication. -See :ref:`hub-APIkey` for more information. - -start -+++++ - -This subcommand launches the Jupyter Notebook on the `yt Hub `__ -with a chosen Hub folder mounted to the ``/data`` directory inside the notebook. -If no path is given all the `example yt datasets -`_ are mounted by default. The appropriate URL -allowing to access the Notebook will be returned on the commandline. - -Example: - -.. code-block:: bash - - $ yt hub start - $ yt hub start /user/xarthisius/Public - download ~~~~~~~~ diff --git a/doc/source/reference/configuration.rst b/doc/source/reference/configuration.rst index 0f6aa2f6fb4..1a95abe64ca 100644 --- a/doc/source/reference/configuration.rst +++ b/doc/source/reference/configuration.rst @@ -14,7 +14,9 @@ The Configuration The configuration is stored in simple text files (in the `toml `_ format). The files allow to set internal yt variables to custom default values to be used in future sessions. -The configuration can either be stored :ref:`globally` or :ref:`locally`. +The configuration can either be stored :ref:`globally ` or :ref:`locally `. + +.. _global-conf: Global Configuration ^^^^^^^^^^^^^^^^^^^^ @@ -45,6 +47,8 @@ options from the configuration file, e.g.: $ yt config rm yt maximum_stored_datasets +.. _local-conf: + Local Configuration ^^^^^^^^^^^^^^^^^^^ @@ -98,10 +102,10 @@ file. Note that a log level of 1 means that all log messages are printed to stdout. To disable logging, set the log level to 50. -.. _global-config: +.. _config-options: -Available Global Configuration Options -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Available Configuration Options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following external parameters are available. A number of parameters are used internally. @@ -152,7 +156,7 @@ It is possible to customize the default behaviour of plots using per-field confi The default options for plotting a given field can be specified in the configuration file in ``[plot.field_type.field_name]`` blocks. The available keys are -* ``cmap`` (default: ``yt.default_colormap``, see :ref:`global-config`): the colormap to +* ``cmap`` (default: ``yt.default_colormap``, see :ref:`config-options`): the colormap to use for the field. * ``log`` (default: ``True``): use a log scale (or symlog if ``linthresh`` is also set). * ``linthresh`` (default: ``None``): if set to a float different than ``None`` and ``log`` is @@ -203,9 +207,8 @@ by defining ``plugin_filename`` in your ``yt.toml`` file, as mentioned above. .. note:: You can tell that your system plugin file is being parsed by watching for a logging - message when you import yt. Note that both the ``yt load`` and ``iyt`` - command line entry points parse the plugin file, so the ``my_plugins.py`` - file will be parsed if you enter yt that way. + message when you import yt. Note that the ``yt load`` command line entry point parses + the plugin file. Local project plugin file @@ -221,7 +224,7 @@ Plugin File Format Plugin files should contain pure Python code. If accessing yt functions and classes they will not require the ``yt.`` prefix, because of how they are loaded. -For example, if I created a plugin file containing: +For example, if one created a plugin file containing: .. code-block:: python @@ -285,8 +288,8 @@ Adding Custom Colormaps To add custom :ref:`colormaps` to your plugin file, you must use the :func:`~yt.visualization.color_maps.make_colormap` function to generate a -colormap of your choice and then add it to the plugin file. You can see -an example of this in :ref:`custom-colormaps`. Remember that you don't need +colormap of your choice and then add it to the plugin file. You can see +an example of this in :ref:`custom-colormaps`. Remember that you don't need to prefix commands in your plugin file with ``yt.``, but you'll only be able to access the colormaps when you load the ``yt.mods`` module, not simply ``yt``. diff --git a/doc/source/reference/demeshening.rst b/doc/source/reference/demeshening.rst index 9a6ab30c89e..cb1aaf848ae 100644 --- a/doc/source/reference/demeshening.rst +++ b/doc/source/reference/demeshening.rst @@ -7,9 +7,9 @@ With yt-4.0, the method by which particles are indexed changed considerably. Whereas in previous versions, particles were indexed based on their position in an octree (the structure of which was determined by particle number density), in yt-4.0 this system was overhauled to utilize a `bitmap -index`_ based on a space-filling +index `_ based on a space-filling curve, using a `enhanced word-aligned -hybrid` boolean array as their +hybrid `_ boolean array as their backend. .. note:: @@ -21,7 +21,7 @@ backend. By avoiding the use of octrees as a base mesh, yt is able to create *much* more accurate SPH visualizations. We have a `gallery demonstrating -this`_ but even in this +this `_ but even in this side-by-side comparison the differences can be seen quite easily, with the left image being from the old, octree-based approach and the right image the new, meshless approach. @@ -39,10 +39,8 @@ load only those particles it needs. .. note:: The theory and implementation of yt's bitmap indexing system is described in - some detail in the `yt 4.0 - paper`_ in the section entitled - `Indexing Discrete-Point - Datasets`_. + some detail in the `yt 4.0 paper `_ + in the section entitled `Indexing Discrete-Point Datasets `_. In brief, however, what this relies on is two numbers, ``index_order1`` and ``index_order2``. These control the "coarse" and "refined" sets of indices, @@ -146,7 +144,7 @@ be read from disk before expensive selection operations are conducted. For those situations that involve particles with regions of influence -- such as smoothed particle hydrodynamics, where particles have associated smoothing -lenghts -- these are taken into account when conducting the indexing system. +lengths -- these are taken into account when conducting the indexing system. Efficiency of Index Orders -------------------------- diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 20b4b725d37..999f0e2be80 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -13,7 +13,9 @@ code. code_support command-line api/api + api/modules configuration python_introduction field_list + demeshening changelog diff --git a/doc/source/reference/python_introduction.rst b/doc/source/reference/python_introduction.rst index b12db3ea86b..38e1826c88e 100644 --- a/doc/source/reference/python_introduction.rst +++ b/doc/source/reference/python_introduction.rst @@ -50,22 +50,16 @@ signs. Let's inaugurate the occasion appropriately -- type this:: As you can see, this printed out the string "Hello, world." just as we expected. Now let's try a more advanced string, one with a number in it. For -this we'll use the percent (``%``) operator, which is the manner by which -values are fed into a formatted string. We'll print pi, but only with three -digits of accuracy.:: +this we'll use an "f-string", which is the preferred way to format strings in modern Python. +We'll print pi, but only with three digits of accuracy.:: - >>> print("Pi is precisely %0.2f" % (3.1415926)) + >>> print(f"Pi is precisely {3.1415926:0.2f}") This took the number we fed it (3.1415926) and printed it out as a floating -point number with two decimal places. Now let's try something a bit different +point number with two decimal places. Now let's try something a bit different -- let's print out both the name of the number and its value.:: - >>> print("%s is precisely %0.2f" % ("pi", 3.1415926)) - -As you can see, we used ``%s`` to say that the string should print a value as a -string (the supplied value does not have to be a string -- ``"pi"`` could be -replaced with, for instance, another number!) and then supplied the string to -print, as well. + >>> print(f"{'pi'} is precisely {3.1415926:0.2f}") And there you have it -- the very basics of starting up Python, and some very simple mechanisms for printing values out. Now let's explore a few types of @@ -128,9 +122,6 @@ This works for floating points as well. Now we can do math on these numbers:: >>> print(a + 5.1) >>> print(a / 2.0) -Because of a historical aversion to floating point division in Python (which is -now changing) it's always safest to ensure that either the numerator or the -denominator is a floating point number. Now that we have a couple primitive datatypes, we can move on to sequences -- lists and tuples. These two objects are very similar, in that they are @@ -643,7 +634,7 @@ covers mechanisms for combining arrays of different shapes and sizes, which can be tricky but also extremely powerful. We won't discuss the idea of broadcasting here, simply because I don't know that I could do it justice! The NumPy Docs have a great `section on broadcasting -`_. +`_. Scripted Usage ++++++++++++++ @@ -736,8 +727,8 @@ perspective and interactively. Python and Related References +++++++++++++++++++++++++++++ * `Python quickstart `_ - * `Learn Python the Hard Way `_ + * `Learn Python the Hard Way `_ * `Byte of Python `_ - * `Dive Into Python `_ - * `Numpy docs `_ + * `Dive Into Python `_ + * `Numpy docs `_ * `Matplotlib docs `_ diff --git a/doc/source/sharing_data.rst b/doc/source/sharing_data.rst deleted file mode 100644 index 05d300549df..00000000000 --- a/doc/source/sharing_data.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _sharing-data: - -The yt Hub -========== - -.. contents:: - :depth: 2 - :local: - :backlinks: none - -What is the yt Hub? -------------------- - -The yt data Hub is a mechanism by which images, data objects and projects can be -shared with other people. For instance, one can upload a dataset and allow other -people to remotely analyze it with a jupyter notebook or upload notebooks and -view them from any web browser. - -.. note:: All items posted on the hub are public! - -Over time, more widgets will be added, and more datatypes will be able to be -uploaded. If you are interested in adding more ways of sharing data, please -email the developers' list. We would like to add support for 3D widgets such -as isocontours as well as interactive binning and rebinning of data from yt -data objects, to be displayed as phase plots and profiles. - -.. note:: Working with the Hub requires additional dependencies to be installed. - You can obtain them by running: ``pip install yt[hub]``. - -.. _hub-APIkey: - -Obtaining an API key --------------------- - -In order to interact with the yt Hub, you need to obtain API key, which is -available only for authenticated users. You can `log into -`_ the Hub using your Google, GitHub or -Bitbucket account. After you log in, an API key can be generated under the *My -account* page, which can be accessed through the dropdown menu in the upper -right corner. - -.. image:: _static/apiKey01.jpg - :width: 50 % - -Select the *API keys* tab and press *Create new key* button: - -.. image:: _static/apiKey02.jpg - :width: 50 % - -By convention, the *Name* field of API keys can be used to specify what -application is making use of the key in a human-readable way e.g. ``yt -command``, although you may name your key however you want. - -.. image:: _static/apiKey03.jpg - :width: 50 % - -After the API Key is created you can obtain it by clicking *show* link: - -.. image:: _static/apiKey04.jpg - :width: 50 % - -For more information about API keys please see `this document -`__. - -After you have gotten your API key, update your config file: - -.. code-block:: none - - $ yt config set yt hub_api_key 3fd1de56c2114c13a2de4dd51g10974b - -Replace ``3fd1de56c2114c13a2de4dd51g10974b`` with your API key. - -Registering a User -^^^^^^^^^^^^^^^^^^ - -If you do not wish to use OAuth authentication, you can create a Hub account -using ``yt`` command. To register a user: - -.. code-block:: bash - - $ yt hub register - -This will walk you through the process of registering. You will need to supply -a name, a username, a password and an email address. Apart from creating a new -user account, it will also generate an API key and append it to the yt's config -file. At this point, you're ready to go! - -What Can Be Uploaded --------------------- - -Currently, the yt hub can accept these types of data: - - * Raw data files, scripts. - * IPython notebooks: these are stored on the hub and are made available for - download and via the IPython `nbviewer `_ - service. - -How to Upload Data ------------------- - -Uploading data can be performed using the ``girder-cli`` command tool or -directly via the web interface. Please refer to ``girder-cli`` `documentation page -`_ for additional -information. - -Uploading Notebooks -^^^^^^^^^^^^^^^^^^^ - -Notebooks can be uploaded from the bash command line: - -.. code-block:: bash - - yt upload_notebook notebook_file.ipynb - -After the notebook is finished uploading, yt will print a link to the raw -notebook as well as an nbviewer link to the same notebook. Your notebooks will -be stored under your hub Public directory. diff --git a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb index 62766865fea..12ab3e9a24b 100644 --- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb +++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb @@ -178,7 +178,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For the change to the camera to take affect, we have to explictly render again: " + "For the change to the camera to take affect, we have to explicitly render again: " ] }, { diff --git a/doc/source/visualizing/callbacks.rst b/doc/source/visualizing/callbacks.rst index 76601767ac8..ec5898cafbb 100644 --- a/doc/source/visualizing/callbacks.rst +++ b/doc/source/visualizing/callbacks.rst @@ -22,7 +22,7 @@ of available callbacks. For example: .. code-block:: python - slc = SlicePlot(ds, 0, "density") + slc = SlicePlot(ds, "x", ("gas", "density")) slc.annotate_title("This is a Density plot") would add the :func:`~yt.visualization.plot_modifications.TitleCallback` to @@ -30,7 +30,7 @@ the plot object. All of the callbacks listed below are available via similar ``annotate_`` functions. To clear one or more annotations from an existing plot, see the -:ref:`clear_annotations() function `. +:ref:`clear_annotations function `. For a brief demonstration of a few of these callbacks in action together, see the cookbook recipe: :ref:`annotations-recipe`. @@ -62,7 +62,7 @@ of the x-plane (i.e. with axes in the y and z directions): import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - s = yt.SlicePlot(ds, "x", "density") + s = yt.SlicePlot(ds, "x", ("gas", "density")) s.set_axes_unit("kpc") # Plot marker and text in data coords @@ -97,7 +97,7 @@ dataset from AMRVAC : import yt ds = yt.load("amrvac/bw_polar_2D0000.dat") - s = yt.plot_2d(ds, "density") + s = yt.plot_2d(ds, ("gas", "density")) s.set_background_color("density", "black") # Plot marker and text in data coords @@ -136,12 +136,15 @@ Clear Callbacks (Some or All) to the plot. Note that the index goes from 0..N, and you can specify the index of the last added annotation as -1. + (This is a proxy for + :func:`~yt.visualization.plot_window.clear_annotations`.) + .. python-script:: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_scale() p.annotate_timestamp() @@ -158,15 +161,18 @@ List Currently Applied Callbacks This function will print a list of each of the currently applied callbacks together with their index. The index can be used with - :ref:`clear_annotations() function ` to remove a + :ref:`clear_annotations() function ` to remove a specific callback. + (This is a proxy for + :func:`~yt.visualization.plot_window.list_annotations`.) + .. python-script:: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_scale() p.annotate_timestamp() p.list_annotations() @@ -191,7 +197,7 @@ Overplot Arrow import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc"), center="c") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc"), center="c") slc.annotate_arrow((0.5, 0.5, 0.5), length=0.06, plot_args={"color": "blue"}) slc.save() @@ -218,16 +224,16 @@ Clump Finder Callback ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") data_source = ds.disk([0.5, 0.5, 0.5], [0.0, 0.0, 1.0], (8.0, "kpc"), (1.0, "kpc")) - c_min = 10 ** np.floor(np.log10(data_source["density"]).min()) - c_max = 10 ** np.floor(np.log10(data_source["density"]).max() + 1) + c_min = 10 ** np.floor(np.log10(data_source[("gas", "density")]).min()) + c_max = 10 ** np.floor(np.log10(data_source[("gas", "density")]).max() + 1) - master_clump = Clump(data_source, "density") + master_clump = Clump(data_source, ("gas", "density")) master_clump.add_validator("min_cells", 20) find_clumps(master_clump, c_min, c_max, 2.0) leaf_clumps = master_clump.leaves - prj = yt.ProjectionPlot(ds, 2, "density", center="c", width=(20, "kpc")) + prj = yt.ProjectionPlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) prj.annotate_clumps(leaf_clumps) prj.save("clumps") @@ -253,8 +259,8 @@ Overplot Contours import yt ds = yt.load("Enzo_64/DD0043/data0043") - s = yt.SlicePlot(ds, "x", "density", center="max") - s.annotate_contour("temperature") + s = yt.SlicePlot(ds, "x", ("gas", "density"), center="max") + s.annotate_contour(("gas", "temperature")) s.save() .. _annotate-quivers: @@ -288,12 +294,13 @@ Axis-Aligned Data Sources p = yt.ProjectionPlot( ds, "z", - "density", + ("gas", "density"), center=[0.5, 0.5, 0.5], weight_field="density", width=(20, "kpc"), ) - p.annotate_quiver("velocity_x", "velocity_y", factor=16, plot_args={"color": "purple"}) + p.annotate_quiver(("gas", "velocity_x"), ("gas", "velocity_y"), factor=16, + plot_args={"color": "purple"}) p.save() Off-Axis Data Sources @@ -319,10 +326,10 @@ Off-Axis Data Sources import yt ds = yt.load("Enzo_64/DD0043/data0043") - s = yt.OffAxisSlicePlot(ds, [1, 1, 0], ["density"], center="c") + s = yt.OffAxisSlicePlot(ds, [1, 1, 0], [("gas", "density")], center="c") s.annotate_cquiver( - "cutting_plane_velocity_x", - "cutting_plane_velocity_y", + ("gas", "cutting_plane_velocity_x"), + ("gas", "cutting_plane_velocity_y"), factor=10, plot_args={"color": "orange"}, ) @@ -353,7 +360,7 @@ Overplot Grids import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc"), center="max") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc"), center="max") slc.annotate_grids() slc.save() @@ -379,7 +386,7 @@ Overplot Cell Edges import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc"), center="max") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc"), center="max") slc.annotate_cell_edges() slc.save() @@ -434,7 +441,7 @@ Overplot Halo Annotations data_ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") halos_ds = yt.load("rockstar_halos/halos_0.0.bin") - prj = yt.ProjectionPlot(data_ds, "z", "density") + prj = yt.ProjectionPlot(data_ds, "z", ("gas", "density")) prj.annotate_halos(halos_ds, annotate_field="particle_identifier") prj.save() @@ -457,7 +464,7 @@ Overplot a Straight Line import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ProjectionPlot(ds, "z", "density", center="m", width=(10, "kpc")) + p = yt.ProjectionPlot(ds, "z", ("gas", "density"), center="m", width=(10, "kpc")) p.annotate_line((0.3, 0.4), (0.8, 0.9), coord_system="axis") p.save() @@ -493,7 +500,7 @@ Overplot Magnetic Field Quivers "mass_unit": (1e17, "Msun"), }, ) - p = yt.ProjectionPlot(ds, "z", "density", center="c", width=(300, "kpc")) + p = yt.ProjectionPlot(ds, "z", ("gas", "density"), center="c", width=(300, "kpc")) p.annotate_magnetic_field(plot_args={"headlength": 3}) p.save() @@ -515,7 +522,7 @@ Annotate a Point With a Marker import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - s = yt.SlicePlot(ds, "z", "density", center="c", width=(10, "kpc")) + s = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(10, "kpc")) s.annotate_marker((-2, -2), coord_system="plot", plot_args={"color": "blue", "s": 500}) s.save() @@ -544,7 +551,7 @@ Overplotting Particle Positions import yt ds = yt.load("Enzo_64/DD0043/data0043") - p = yt.ProjectionPlot(ds, "x", "density", center="m", width=(10, "Mpc")) + p = yt.ProjectionPlot(ds, "x", ("gas", "density"), center="m", width=(10, "Mpc")) p.annotate_particles((10, "Mpc")) p.save() @@ -555,7 +562,7 @@ To plot only the central particles import yt ds = yt.load("Enzo_64/DD0043/data0043") - p = yt.ProjectionPlot(ds, "x", "density", center="m", width=(10, "Mpc")) + p = yt.ProjectionPlot(ds, "x", ("gas", "density"), center="m", width=(10, "Mpc")) sp = ds.sphere([0.5, 0.5, 0.5], ds.quan(1, "Mpc")) p.annotate_particles((10, "Mpc"), data_source=sp) p.save() @@ -578,7 +585,7 @@ Overplot a Circle on a Plot import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ProjectionPlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.ProjectionPlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_sphere([0.5, 0.5, 0.5], radius=(2, "kpc"), circle_args={"color": "black"}) p.save() @@ -606,8 +613,8 @@ Overplot Streamlines import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - s = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) - s.annotate_streamlines("velocity_x", "velocity_y") + s = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) + s.annotate_streamlines(("gas", "velocity_x"), ("gas", "velocity_y")) s.save() .. _annotate-line-integral-convolution: @@ -636,8 +643,8 @@ Overplot Line Integral Convolution import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - s = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) - s.annotate_line_integral_convolution("velocity_x", "velocity_y", lim=(0.5, 0.65)) + s = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) + s.annotate_line_integral_convolution(("gas", "velocity_x"), ("gas", "velocity_y"), lim=(0.5, 0.65)) s.save() .. _annotate-text: @@ -660,7 +667,7 @@ Overplot Text import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - s = yt.SlicePlot(ds, "z", "density", center="max", width=(10, "kpc")) + s = yt.SlicePlot(ds, "z", ("gas", "density"), center="max", width=(10, "kpc")) s.annotate_text((2, 2), "Galaxy!", coord_system="plot") s.save() @@ -681,7 +688,7 @@ Add a Title import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ProjectionPlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.ProjectionPlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_title("Density Plot") p.save() @@ -709,7 +716,7 @@ Overplot Quivers for the Velocity Field import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.SlicePlot(ds, "z", "density", center="m", width=(10, "kpc")) + p = yt.SlicePlot(ds, "z", ("gas", "density"), center="m", width=(10, "kpc")) p.annotate_velocity(plot_args={"headwidth": 4}) p.save() @@ -742,7 +749,7 @@ Add the Current Time and/or Redshift import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_timestamp() p.save() @@ -752,7 +759,7 @@ Add a Physical Scale Bar ~~~~~~~~~~~~~~~~~~~~~~~~ .. function:: annotate_scale(corner='lower_right', coeff=None, \ - unit=None, pos=None, + unit=None, pos=None, \ scale_text_format="{scale} {units}", \ max_frac=0.16, min_frac=0.015, \ coord_system='axis', text_args=None, \ @@ -781,7 +788,7 @@ Add a Physical Scale Bar import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.SlicePlot(ds, "z", "density", center="c", width=(20, "kpc")) + p = yt.SlicePlot(ds, "z", ("gas", "density"), center="c", width=(20, "kpc")) p.annotate_scale() p.save() @@ -808,10 +815,10 @@ Annotate Triangle Facets Callback import yt # Load data file - pf = yt.load("MoabTest/fng_usrbin22.h5m") + ds = yt.load("MoabTest/fng_usrbin22.h5m") # Create the desired slice plot - s = yt.SlicePlot(pf, "z", ("moab", "TALLY_TAG")) + s = yt.SlicePlot(ds, "z", ("moab", "TALLY_TAG")) # get triangle vertices from file (in this case hdf5) @@ -847,7 +854,7 @@ Annotate Mesh Lines Callback import yt ds = yt.load("MOOSE_sample_data/out.e") - sl = yt.SlicePlot(ds, 2, ("connect1", "nodal_aux")) + sl = yt.SlicePlot(ds, "z", ("connect1", "nodal_aux")) sl.annotate_mesh_lines(plot_args={"color": "black"}) sl.save() @@ -863,9 +870,9 @@ Overplot the Path of a Ray Adds a line representing the projected path of a ray across the plot. The ray can be either a - :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`, - :class:`~yt.data_objects.selection_data_containers.YTRay`, or a - :class:`~trident.light_ray.LightRay` + :class:`~yt.data_objects.selection_objects.ray.YTOrthoRay`, + :class:`~yt.data_objects.selection_objects.ray.YTRay`, or a + Trident :class:`~trident.light_ray.LightRay` object. annotate_ray() will properly account for periodic rays across the volume. @@ -876,7 +883,7 @@ Overplot the Path of a Ray ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") oray = ds.ortho_ray(0, (0.3, 0.4)) ray = ds.ray((0.1, 0.2, 0.3), (0.6, 0.7, 0.8)) - p = yt.ProjectionPlot(ds, "z", "density") + p = yt.ProjectionPlot(ds, "z", ("gas", "density")) p.annotate_ray(oray) p.annotate_ray(ray) p.save() diff --git a/doc/source/visualizing/colormaps/index.rst b/doc/source/visualizing/colormaps/index.rst index f08cfcba265..e4366ef946e 100644 --- a/doc/source/visualizing/colormaps/index.rst +++ b/doc/source/visualizing/colormaps/index.rst @@ -115,7 +115,7 @@ available in a local window: yt.show_colormaps() -or to output just a few colormaps to an image file, try: +or to output the original yt colormaps to an image file, try: .. code-block:: python @@ -144,12 +144,12 @@ callback: .. code-block:: python ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ProjectionPlot(ds, "z", "density") + p = yt.ProjectionPlot(ds, "z", ("gas", "density")) - p.set_cmap(field="density", cmap="jet") + p.set_cmap(field=("gas", "density"), cmap="turbo") p.save("proj_with_jet_cmap.png") - p.set_cmap(field="density", cmap="hot") + p.set_cmap(field=("gas", "density"), cmap="hot") p.save("proj_with_hot_cmap.png") For more information about the callbacks available to Plot Window objects, diff --git a/doc/source/visualizing/geographic_projections_and_transforms.rst b/doc/source/visualizing/geographic_projections_and_transforms.rst index fc4caa76b23..14ca5c3e010 100644 --- a/doc/source/visualizing/geographic_projections_and_transforms.rst +++ b/doc/source/visualizing/geographic_projections_and_transforms.rst @@ -49,9 +49,9 @@ would execute the following commands: .. code-block:: bash - brew install proj geos - brew upgrade proj geos - pip install --no-binary :all: shapely cartopy + $ brew install proj geos + $ brew upgrade proj geos + $ python -m pip install --no-binary :all: shapely cartopy On ubuntu you'll need to install the following packages: ``libproj-dev``, diff --git a/doc/source/visualizing/interactive_data_visualization.rst b/doc/source/visualizing/interactive_data_visualization.rst index d04facc4473..1de537d1c9b 100644 --- a/doc/source/visualizing/interactive_data_visualization.rst +++ b/doc/source/visualizing/interactive_data_visualization.rst @@ -3,94 +3,10 @@ Interactive Data Visualization ============================== -In version 3.3 of yt, an experimental, hardware-accelerated interactive volume -renderer was introduced. This interactive renderer is based on OpenGL and -natively understands adaptive mesh refinement data; this enables -(GPU) memory-efficient loading of data. The data is copied from CPU memory -onto the GPU as a series of 3D textures, which are then rendered to an -interactive window. The window itself is the view from a conceptual "camera", -which can be rotated, zoomed, and so on. The color of each displayed pixel is -computed by a "fragment shader" which is executed on each grid that is -displayed. The fragment shaders currently implemented in yt enable computing -(and then mapping to a colormap) the maximum value along each pixel's line of -sight and an unweighted integration of values along each pixel's line of sight -(and subsequent mapping to a colormap.) An experimental transfer function -shader has been implemented, but is not yet functioning correctly. For more -information, see :ref:`projection-types`. +The interactive, OpenGL-based volume rendering system for yt has been exported +into its own package, called ``yt_idv``. -A comprehensive description of the OpenGL volume rendering is beyond the scope -of this document. However, a more detailed explanation can be found in `this -guide `_. - -Much of the Interactive Data Visualization (IDV) interface is designed to -mimic the interface available for software volume rendering (see -:ref:`volume_rendering`) so that in future versions API compatibility may lead -to greater code reuse both for scripts that create visualizations and for -internal visualization objects. - -Installation -^^^^^^^^^^^^ - -In order to use Interactive Data Visualization (IDV) you need to install -`PyOpenGL `_ and `cyglfw3 -`_ along with their respective -dependencies, e.g. `glfw3 `_ is required to be installed -before you can ``pip install cyglfw3``. Please carefully read installation -instructions provided on pypi pages of both packages. - -Using the interactive renderer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can simply pass dataset to :meth:`~yt.interactive_render`. By default -it will load all data and render gas density: - -.. code-block:: python - - import yt - - ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - yt.interactive_render(ds) - -Alternatively you can provide a data object as a first argument to -:meth:`~yt.interactive_render` if your dataset is too big to fit GPU memory: - -.. code-block:: python - - import yt - - ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - sp = ds.sphere("max", (0.1, "Mpc")) - - cam_pos = ds.arr([0.1, 0.1, 0.1], "Mpc").in_units("code_length") - yt.interactive_render( - sp, field="pressure", cam_position=cam_pos, window_size=(512, 512) - ) - -A successful call to :meth:`~yt.interactive_render` should create a new window -called *vol_render*. - -.. image:: _images/idv.jpg - :width: 1000 - -By default it renders a Maximum Intensity Projection of the density field (see -:ref:`projection-types` for more information). The rendering can be -dynamically modified using the following keybindings: - -1 - Switch to MIP fragment shader -2 - Switch to integration fragment shader -L - Switch between linear and logarithmic scales -W - Zoom in the camera -S - Zoom out the camera -C - Change the colormap - -Pressing the *h* key will print all the available key bindings in a terminal window. -The camera can be moved around by holding a left mouse button while moving the mouse. - -More advanced initialization of interactive volume renderer can be found in -:ref:`cookbook-opengl_vr`. +Documentation, including installation instructions, can be found at `its +website `_, and the source code is +hosted under the yt-project organization on github at `yt_idv +`_. diff --git a/doc/source/visualizing/manual_plotting.rst b/doc/source/visualizing/manual_plotting.rst index d702bd17959..56dd140c053 100644 --- a/doc/source/visualizing/manual_plotting.rst +++ b/doc/source/visualizing/manual_plotting.rst @@ -44,14 +44,14 @@ of any data two-dimensional data object: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - c = ds.find_max("density")[1] - proj = ds.proj("density", 0) + _, c = ds.find_max(("gas", "density")) + proj = ds.proj(("gas", "density"), 0) width = (10, "kpc") # we want a 1.5 mpc view res = [1000, 1000] # create an image with 1000x1000 pixels frb = proj.to_frb(width, res, center=c) - plt.imshow(np.array(frb["density"])) + plt.imshow(np.array(frb["gas", "density"])) plt.savefig("my_perfect_figure.png") Note that in the above example the axes tick marks indicate pixel indices. If you @@ -93,7 +93,7 @@ using them matters. frb = slc.to_frb((20, "kpc"), 512) frb.apply_gauss_beam(nbeam=30, sigma=2.0) frb.apply_white_noise(5e-23) - plt.imshow(frb["density"].d) + plt.imshow(frb["gas", "density"].d) plt.savefig("frb_filters.png") Currently available filters: @@ -130,7 +130,7 @@ This is perhaps the simplest thing to do. yt provides a number of one dimensional objects, and these return a 1-D numpy array of their contents with direct dictionary access. As a simple example, take a :class:`~yt.data_objects.selection_data_containers.YTOrthoRay` object, which can be -created from a index by calling ``pf.ortho_ray(axis, center)``. +created from a index by calling ``ds.ortho_ray(axis, center)``. .. python-script:: @@ -143,7 +143,7 @@ created from a index by calling ``pf.ortho_ray(axis, center)``. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - c = ds.find_max("density")[1] + _, c = ds.find_max(("gas", "density")) ax = 0 # take a line cut along the x axis # cutting through the y0,z0 such that we hit the max density @@ -151,13 +151,13 @@ created from a index by calling ``pf.ortho_ray(axis, center)``. # Sort the ray values by 'x' so there are no discontinuities # in the line plot - srt = np.argsort(ray["x"]) + srt = np.argsort(ray["index", "x"]) plt.subplot(211) - plt.semilogy(np.array(ray["x"][srt]), np.array(ray["density"][srt])) + plt.semilogy(np.array(ray["index", "x"][srt]), np.array(ray["gas", "density"][srt])) plt.ylabel("density") plt.subplot(212) - plt.semilogy(np.array(ray["x"][srt]), np.array(ray["temperature"][srt])) + plt.semilogy(np.array(ray["index", "x"][srt]), np.array(ray["gas", "temperature"][srt])) plt.xlabel("x") plt.ylabel("temperature") diff --git a/doc/source/visualizing/mapserver.rst b/doc/source/visualizing/mapserver.rst index d0eeb50066f..a175c38d028 100644 --- a/doc/source/visualizing/mapserver.rst +++ b/doc/source/visualizing/mapserver.rst @@ -3,13 +3,19 @@ Mapserver - A Google-Maps-like Interface to your Data ----------------------------------------------------- -The mapserver is a new, experimental feature. It's based on `Leaflet +The mapserver is an experimental feature. It's based on `Leaflet `_, a library written to create zoomable, map-tile interfaces. (Similar to Google Maps.) yt provides everything you need to start up a web server that will interactively re-pixelize an adaptive image. This means you can explore your datasets in a fully pan-n-zoom interface. +.. note:: + + Previous versions of yt bundled the necessary dependencies, but with more + recent released you will need to install the package ``bottle`` via pip or + conda. + To start up the mapserver, you can use the command yt (see :ref:`command-line`) with the ``mapserver`` subcommand. It takes several of the same options and arguments as the ``plot`` subcommand. For instance: @@ -30,5 +36,4 @@ dragging drags. :scale: 50% This is also functional on touch-capable devices such as Android Tablets and -iPads/iPhones. In future versions, we hope to add halo-overlays and -markers-of-interest to this. +iPads/iPhones. diff --git a/doc/source/visualizing/plots.rst b/doc/source/visualizing/plots.rst index 73e178f965b..971ca569f37 100644 --- a/doc/source/visualizing/plots.rst +++ b/doc/source/visualizing/plots.rst @@ -4,6 +4,15 @@ How to Make Plots ================= +.. note:: + + In this document, and the rest of the yt documentation, we use field tuples; + for instance, we specify density as ``("gas", "density")`` whereas in + previous versions of this document we typically just used ``"density"``. + While the latter will still work in many or most cases, and may suffice for + your purposes, for ensuring we explicitly avoid ambiguity we use field tuples + here. + In this section we explain how to use yt to create visualizations of simulation data, derived fields, and the data produced by yt analysis objects. For details about the data extraction and @@ -22,7 +31,7 @@ profiles (phase plots), all of which are described below. Viewing Plots ------------- -YT uses an environment neutral plotting mechanism that detects the appropriate +yt uses an environment neutral plotting mechanism that detects the appropriate matplotlib configuration for a given environment, however it defaults to a basic renderer. To utilize interactive plots in matplotlib supported environments (Qt, GTK, WX, etc.) simply call the ``toggle_interactivity()`` function. Below is an @@ -82,7 +91,7 @@ opened and stored in ``ds``: .. code-block:: python - slc = yt.SlicePlot(ds, "z", "density") + slc = yt.SlicePlot(ds, "z", ("gas", "density")) slc.save() These two commands will create a slice object and store it in a variable we've @@ -95,7 +104,7 @@ stick around, you can accomplish the same thing in one line: .. code-block:: python - yt.SlicePlot(ds, "z", "density").save() + yt.SlicePlot(ds, "z", ("gas", "density")).save() It's nice to keep the slice object around if you want to modify the plot. By default, the plot width will be set to the size of the simulation box. To zoom @@ -104,7 +113,7 @@ object: .. code-block:: python - slc = yt.SlicePlot(ds, "z", "density") + slc = yt.SlicePlot(ds, "z", ("gas", "density")) slc.zoom(10) slc.save("zoom") @@ -112,13 +121,14 @@ This will save a new plot to disk with a different filename - prepended with 'zoom' instead of the name of the dataset. If you want to set the width manually, you can do that as well. For example, the following sequence of commands will create a slice, set the width of the plot to 10 kiloparsecs, and -save it to disk. +save it to disk, with the filename prefix being ``10kpc`` and the rest determined +by the field, visualization method, etc. .. code-block:: python from yt.units import kpc - slc = yt.SlicePlot(ds, "z", "density") + slc = yt.SlicePlot(ds, "z", ("gas", "density")) slc.set_width(10 * kpc) slc.save("10kpc") @@ -140,7 +150,9 @@ and the width of the plot: .. code-block:: python - yt.SlicePlot(ds, "z", "density", center=[0.2, 0.3, 0.8], width=(10, "kpc")).save() + yt.SlicePlot( + ds, "z", ("gas", "density"), center=[0.2, 0.3, 0.8], width=(10, "kpc") + ).save() Note that, by default, :class:`~yt.visualization.plot_window.SlicePlot` shifts the @@ -158,14 +170,14 @@ the following alternative options for the ``center`` keyword: where for the last two objects any spatial field, such as ``"density"``, ``"velocity_z"``, -etc., may be used, e.g. ``center=("min","temperature")``. +etc., may be used, e.g. ``center=("min", ("gas", "temperature"))``. The effective resolution of the plot (i.e. the number of resolution elements in the image itself) can be controlled with the ``buff_size`` argument: .. code-block:: python - yt.SlicePlot(ds, "z", "density", buff_size=(1000, 1000)) + yt.SlicePlot(ds, "z", ("gas", "density"), buff_size=(1000, 1000)) Here is an example that combines all of the options we just discussed. @@ -179,7 +191,7 @@ Here is an example that combines all of the options we just discussed. slc = yt.SlicePlot( ds, "z", - "density", + ("gas", "density"), center=[0.5, 0.5, 0.5], width=(20, "kpc"), buff_size=(1000, 1000), @@ -200,7 +212,7 @@ into the data. For example: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "pressure", center="c") + slc = yt.SlicePlot(ds, "z", ("gas", "pressure"), center="c") slc.save() slc.zoom(30) slc.save("zoom") @@ -238,8 +250,8 @@ of a three-dimensional one: import yt ds = yt.load("WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030") - p = yt.plot_2d(ds, "density", center=[1.0, 0.4]) - p.set_log("density", False) + p = yt.plot_2d(ds, ("gas", "density"), center=[1.0, 0.4]) + p.set_log(("gas", "density"), False) p.save() See :func:`~yt.visualization.plot_window.plot_2d` for the full description @@ -268,7 +280,7 @@ plane, and the name of the fields to plot. Just like an ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") L = [1, 1, 0] # vector normal to cutting plane north_vector = [-1, 1, 0] - cut = yt.SlicePlot(ds, L, "density", width=(25, "kpc"), north_vector=north_vector) + cut = yt.SlicePlot(ds, L, ("gas", "density"), width=(25, "kpc"), north_vector=north_vector) cut.save() In this case, a normal vector for the cutting plane is supplied in the second @@ -295,10 +307,10 @@ example: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") prj = yt.ProjectionPlot( ds, - 2, - "temperature", + "z", + ("gas", "temperature"), width=25 * kpc, - weight_field="density", + weight_field=("gas", "density"), buff_size=(1000, 1000), ) prj.save() @@ -398,7 +410,7 @@ projection through a simulation. W = [0.02, 0.02, 0.02] c = [0.5, 0.5, 0.5] N = 512 - image = yt.off_axis_projection(ds, c, L, W, N, "density") + image = yt.off_axis_projection(ds, c, L, W, N, ("gas", "density")) yt.write_image(np.log10(image), "%s_offaxis_projection.png" % ds) Here, ``W`` is the width of the projection in the x, y, *and* z @@ -418,7 +430,7 @@ to project along, and a field to project. For example: L = [1, 1, 0] # vector normal to cutting plane north_vector = [-1, 1, 0] prj = yt.OffAxisProjectionPlot( - ds, L, "density", width=(25, "kpc"), north_vector=north_vector + ds, L, ("gas", "density"), width=(25, "kpc"), north_vector=north_vector ) prj.save() @@ -496,7 +508,7 @@ a dataset that uses 6-node wedge elements: import yt ds = yt.load("MOOSE_sample_data/wedge_out.e") - sl = yt.SlicePlot(ds, 2, ("connect2", "diffused")) + sl = yt.SlicePlot(ds, "z", ("connect2", "diffused")) sl.save() Slices can also be used to examine 2D unstructured mesh datasets, but the @@ -508,7 +520,7 @@ an example using another MOOSE dataset that uses triangular mesh elements: import yt ds = yt.load("MOOSE_sample_data/out.e") - sl = yt.SlicePlot(ds, 2, ("connect1", "nodal_aux")) + sl = yt.SlicePlot(ds, "z", ("connect1", "nodal_aux")) sl.save() You may run into situations where you have a variable you want to visualize that @@ -534,16 +546,16 @@ Below are some important caveats to note when visualizing particle data. However, axis-aligned slice plots (as described in :ref:`slice-plots`) will work. -2. Off axis projections (as in :ref:`off-axis-projection`) will only work +2. Off axis projections (as in :ref:`off-axis-projections`) will only work for SPH particles, i.e., particles that have a defined smoothing length. Two workaround methods are available for plotting non-SPH particles with off-axis projections. 1. :ref:`smooth-non-sph` - this method involves extracting particle data to be - reloaded with :ref:`~yt.loaders.load_particles` and using the - :ref:`~yt.frontends.stream.data_structures.add_SPH_fields` function to - create smoothing lengths. This works well for relatively small datasets, + reloaded with :class:`~yt.loaders.load_particles` and using the + :class:`~yt.frontends.stream.data_structures.StreamParticlesDataset.add_sph_fields` + function to create smoothing lengths. This works well for relatively small datasets, but is not parallelized and may take too long for larger data. 2. Plot from a saved @@ -554,7 +566,7 @@ projections. This second method is illustrated below. First, construct one of the grid data objects listed above. Then, use the -:func:`~yt.data_objects.data_containers.YTDataContainer.save_as_dataset` +:class:`~yt.data_objects.data_containers.YTDataContainer.save_as_dataset` function (see :ref:`saving_data`) to save a deposited particle field (see :ref:`deposited-particle-fields`) as a reloadable dataset. This dataset can then be loaded and visualized using both off-axis projections and slices. @@ -565,7 +577,7 @@ Note, the change in the field name from ``("deposit", "nbody_mass")`` to import yt - ds = yt.load("gadget_cosmology_plus/snap_N128L16_132.hdf5") + ds = yt.load("gizmo_cosmology_plus/snap_N128L16_132.hdf5") # create a 128^3 covering grid over the entire domain L = 7 cg = ds.covering_grid(level=L, left_edge=ds.domain_left_edge, dims=[2**L]*3) @@ -587,7 +599,7 @@ will modify the following plot. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.save() Panning and zooming @@ -604,7 +616,7 @@ deltas. from yt.units import kpc ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.pan((2 * kpc, 2 * kpc)) slc.save() @@ -616,7 +628,7 @@ in units relative to the field of view of the plot. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.pan_rel((0.1, -0.1)) slc.save() @@ -627,7 +639,7 @@ in units relative to the field of view of the plot. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.zoom(2) slc.save() @@ -642,7 +654,7 @@ the axes unit labels. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_axes_unit("Mpc") slc.save() @@ -660,8 +672,8 @@ the customization of the units used for the image and colorbar. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) - slc.set_unit("density", "Msun/pc**3") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) + slc.set_unit(("gas", "density"), "Msun/pc**3") slc.save() If the unit you would like to convert to needs an equivalency, this can be @@ -674,8 +686,8 @@ an energy unit instead of a temperature unit: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "temperature", width=(10, "kpc")) - slc.set_unit("temperature", "keV", equivalency="thermal") + slc = yt.SlicePlot(ds, "z", ("gas", "temperature"), width=(10, "kpc")) + slc.set_unit(("gas", "temperature"), "keV", equivalency="thermal") slc.save() Set the plot center @@ -690,7 +702,7 @@ two element tuples. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_center((0.5, 0.503)) slc.save() @@ -709,7 +721,7 @@ by the user at initialization: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") # slicing with non right-handed coordinates - slc = yt.SlicePlot(ds, "x", "velocity_x", right_handed=False) + slc = yt.SlicePlot(ds, "x", ("gas", "velocity_x"), right_handed=False) slc.annotate_title("Not Right Handed") slc.save("NotRightHanded.png") @@ -732,7 +744,7 @@ minimal plots that focus on the data: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.hide_colorbar() slc.hide_axes() slc.save() @@ -752,7 +764,7 @@ customization. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_font({"family": "sans-serif", "style": "italic", "weight": "bold", "size": 24}) slc.save() @@ -772,8 +784,8 @@ Use any of the colormaps listed in the :ref:`colormaps` section. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) - slc.set_cmap("density", "RdBu_r") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) + slc.set_cmap(("gas", "density"), "RdBu_r") slc.save() The :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_log` function @@ -786,8 +798,8 @@ linear. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) - slc.set_log("density", False) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) + slc.set_log(("gas", "density"), False) slc.save() Specifically, a field containing both positive and negative values can be plotted @@ -812,7 +824,7 @@ As an example, p.save() Symlog is very versatile, and will work with positive or negative dataset ranges. -Here is an example using symlog scaling to plot a postive field with a linear range of +Here is an example using symlog scaling to plot a positive field with a linear range of ``(0, linthresh)``. .. python-script:: @@ -820,8 +832,8 @@ Here is an example using symlog scaling to plot a postive field with a linear ra import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "x-velocity", width=(30, "kpc")) - slc.set_log("x-velocity", True, linthresh=1.0e1) + slc = yt.SlicePlot(ds, "z", ("gas", "velocity_x"), width=(30, "kpc")) + slc.set_log(("gas", "velocity_x"), True, linthresh=1.0e1) slc.save() The :meth:`~yt.visualization.plot_container.ImagePlotContainer.set_background_color` @@ -834,10 +846,10 @@ value of the color map. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(1.5, "Mpc")) - slc.set_background_color("density") + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(1.5, "Mpc")) + slc.set_background_color(("gas", "density")) slc.save("bottom_colormap_background") - slc.set_background_color("density", color="black") + slc.set_background_color(("gas", "density"), color="black") slc.save("black_background") If you would like to change the background for a plot and also hide the axes, @@ -864,8 +876,8 @@ function makes it possible to set a custom colormap range. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) - slc.set_zlim("density", 1e-30, 1e-25) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) + slc.set_zlim(("gas", "density"), 1e-30, 1e-25) slc.save() Annotations @@ -881,7 +893,7 @@ For example: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.annotate_grids() slc.save() @@ -906,7 +918,7 @@ image to see the difference more clearly. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_figure_size(10) slc.save() @@ -918,7 +930,7 @@ To change the resolution of the image, call the import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_buff_size(1600) slc.save() @@ -941,7 +953,7 @@ function for the colorbar axis. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - slc = yt.SlicePlot(ds, "z", "density", width=(10, "kpc")) + slc = yt.SlicePlot(ds, "z", ("gas", "density"), width=(10, "kpc")) slc.set_minorticks("all", False) slc.set_colorbar_minorticks("all", False) slc.save() @@ -960,8 +972,8 @@ accessed via the ``plots`` dictionary attached to each .. code-block:: python - slc = SlicePlot(ds, 2, ["density", "temperature"]) - dens_plot = slc.plots["density"] + slc = SlicePlot(ds, 2, [("gas", "density"), ("gas", "temperature")]) + dens_plot = slc.plots["gas", "density"] In this example ``dens_plot`` is an instance of :class:`~yt.visualization.plot_window.WindowPlotMPL`, an object that wraps the @@ -1010,7 +1022,7 @@ to be profiled. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") my_galaxy = ds.disk(ds.domain_center, [0.0, 0.0, 1.0], 10 * kpc, 3 * kpc) - plot = yt.ProfilePlot(my_galaxy, "density", ["temperature"]) + plot = yt.ProfilePlot(my_galaxy, ("gas", "density"), [("gas", "temperature")]) plot.save() This will create a :class:`~yt.data_objects.selection_data_containers.YTDisk` @@ -1028,7 +1040,7 @@ For instance: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") my_sphere = ds.sphere([0.5, 0.5, 0.5], (100, "kpc")) - plot = yt.ProfilePlot(my_sphere, "temperature", ["mass"], weight_field=None) + plot = yt.ProfilePlot(my_sphere, ("gas", "temperature"), [("gas", "mass")], weight_field=None) plot.save() Note that because we have specified the weighting field to be ``None``, the @@ -1050,10 +1062,16 @@ generate a plot of the enclosed mass in a sphere: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") my_sphere = ds.sphere([0.5, 0.5, 0.5], (100, "kpc")) plot = yt.ProfilePlot( - my_sphere, "radius", ["mass"], weight_field=None, accumulation=True + my_sphere, "radius", [("gas", "mass")], weight_field=None, accumulation=True ) plot.save() +Notably, above we have specified the field tuple for the mass, but not for the +``radius`` field. The ``radius`` field will not be ambiguous, but if you want +to ensure that it refers to the radius of the cells on which the "gas" field +type is defined, you can specify it using the field tuple ``("index", +"radius")``. + You can also access the data generated by profiles directly, which can be useful for overplotting average quantities on top of phase plots, or for exporting and plotting multiple profiles simultaneously from a time series. @@ -1063,12 +1081,14 @@ The profiled fields can be accessed from the dictionary ``field_data``. .. code-block:: python - plot = ProfilePlot(my_sphere, "temperature", ["mass"], weight_field=None) + plot = ProfilePlot( + my_sphere, ("gas", "temperature"), [("gas", "mass")], weight_field=None + ) profile = plot.profiles[0] # print the bin field, in this case temperature print(profile.x) # print the profiled mass field - print(profile["mass"]) + print(profile["gas", "mass"]) Other options, such as the number of bins, are also configurable. See the documentation for :class:`~yt.visualization.profile_plotter.ProfilePlot` for @@ -1102,8 +1122,8 @@ method and then given to the ProfilePlot object. profiles.append( yt.create_profile( ad, - ["temperature"], - fields=["mass"], + [("gas", "temperature")], + fields=[("gas", "mass")], weight_field=None, accumulation=True, ) @@ -1139,10 +1159,10 @@ This function accepts a dictionary of ``(max, min)`` tuples keyed to field names sp = ds.sphere("m", 10 * u.kpc) profiles = yt.create_profile( sp, - "temperature", - "density", + ("gas", "temperature"), + ("gas", "density"), weight_field=None, - extrema={"temperature": (1e3, 1e7), "density": (1e-26, 1e-22)}, + extrema={("gas", "temperature"): (1e3, 1e7), ("gas", "density"): (1e-26, 1e-22)}, ) plot = yt.ProfilePlot.from_profiles(profiles) plot.save() @@ -1165,9 +1185,9 @@ does not accept a field name as the first argument. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sp = ds.sphere("m", 10 * u.kpc) - plot = yt.ProfilePlot(sp, "temperature", "density", weight_field=None) + plot = yt.ProfilePlot(sp, ("gas", "temperature"), ("gas", "density"), weight_field=None) plot.set_xlim(1e3, 1e7) - plot.set_ylim("density", 1e-26, 1e-22) + plot.set_ylim(("gas", "density"), 1e-26, 1e-22) plot.save() @@ -1189,8 +1209,8 @@ masses per cubic parsec as a function of radius in kiloparsecs. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sp = ds.sphere("m", 10 * u.kpc) - plot = yt.ProfilePlot(sp, "radius", "density", weight_field=None) - plot.set_unit("density", "msun/pc**3") + plot = yt.ProfilePlot(sp, "radius", ("gas", "density"), weight_field=None) + plot.set_unit(("gas", "density"), "msun/pc**3") plot.set_unit("radius", "kpc") plot.save() @@ -1214,8 +1234,8 @@ negative, we set the scaling to be linear for this field. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sp = ds.sphere("m", 10 * u.kpc) - plot = yt.ProfilePlot(sp, "radius", "x-velocity", weight_field=None) - plot.set_log("x-velocity", False) + plot = yt.ProfilePlot(sp, "radius", ("gas", "velocity_x"), weight_field=None) + plot.set_log(("gas", "velocity_x"), False) plot.save() Setting axis labels @@ -1238,9 +1258,9 @@ function of radius. The xlabel is set to "Radius", for all plots, and the ylabel ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") ad = ds.all_data() - plot = yt.ProfilePlot(ad, "density", ["temperature", "velocity_x"], weight_field=None) + plot = yt.ProfilePlot(ad, "radius", [("gas", "temperature"), ("gas", "velocity_x")], weight_field=None) plot.set_xlabel("Radius") - plot.set_ylabel("velocity_x", "velocity in x direction") + plot.set_ylabel(("gas", "velocity_x"), "velocity in x direction") plot.save() Adding plot title @@ -1260,7 +1280,7 @@ In the following example we create a plot and set the plot title. ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") ad = ds.all_data() - plot = yt.ProfilePlot(ad, "density", ["temperature"], weight_field=None) + plot = yt.ProfilePlot(ad, ("gas", "density"), [("gas", "temperature")], weight_field=None) plot.annotate_title("Temperature vs Density Plot") plot.save() @@ -1274,23 +1294,23 @@ specific plot. ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") sphere = ds.sphere("max", (1.0, "Mpc")) profiles = [] - profiles.append(yt.create_profile(sphere, ["radius"], fields=["density"], n_bins=64)) + profiles.append(yt.create_profile(sphere, ["radius"], fields=[("gas", "density")], n_bins=64)) profiles.append( yt.create_profile(sphere, ["radius"], fields=["dark_matter_density"], n_bins=64) ) plot = yt.ProfilePlot.from_profiles(profiles) - plot.annotate_title("Plot Title: Density", "density") + plot.annotate_title("Plot Title: Density", ("gas", "density")) plot.annotate_title("Plot Title: Dark Matter Density", "dark_matter_density") plot.save() -Here, ``plot.annotate_title("Plot Title: Density", "density")`` will only set the plot title for the ``"density"`` +Here, ``plot.annotate_title("Plot Title: Density", ("gas", "density"))`` will only set the plot title for the ``"density"`` field. Thus, allowing us the option to have different plot titles for different fields. Annotating plot with text ~~~~~~~~~~~~~~~~~~~~~~~~~ -Plots can be annotated at a desired (x,y) co-ordinate using :meth:`~yt.visualization.profile_plotter.ProfilePlot.annotate_text` function. +Plots can be annotated at a desired (x,y) coordinate using :meth:`~yt.visualization.profile_plotter.ProfilePlot.annotate_text` function. This function accepts the x-position, y-position, a text string to be annotated in the plot area, and an optional list of fields for annotating plots with the specified field. Furthermore, any keyword argument accepted by the matplotlib ``axes.text`` function could also be passed which will can be useful to change fontsize, text-alignment, text-color or other such properties of annotated text. @@ -1303,15 +1323,18 @@ In the following example we create a plot and add a simple annotation. ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046") ad = ds.all_data() - plot = yt.ProfilePlot(ad, "density", ["temperature"], weight_field=None) + plot = yt.ProfilePlot(ad, ("gas", "density"), [("gas", "temperature")], weight_field=None) plot.annotate_text(1e-30, 1e7, "Annotated Text") plot.save() -To add annotations to a particular set of fields we need to pass in the list of fields as follows: +To add annotations to a particular set of fields we need to pass in the list of fields as follows, +where ``"ftype1"`` and ``"ftype2"`` are the field types (and may be the same): .. code-block:: python - plot.annotate_text(1e-30, 1e7, "Annotation", ["field1", "field2"]) + plot.annotate_text( + 1e-30, 1e7, "Annotation", [("ftype1", "field1"), ("ftype2", "field2")] + ) To change the text annotated text properties, we need to pass the matplotlib ``axes.text`` arguments as follows: @@ -1397,10 +1420,10 @@ respectively. The below code snippet combines all the features we've discussed: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - plot = yt.LinePlot(ds, "density", [0, 0, 0], [1, 1, 1], 512) - plot.annotate_legend("density") + plot = yt.LinePlot(ds, ("gas", "density"), [0, 0, 0], [1, 1, 1], 512) + plot.annotate_legend(("gas", "density")) plot.set_x_unit("cm") - plot.set_unit("density", "kg/cm**3") + plot.set_unit(("gas", "density"), "kg/cm**3") plot.save() If a list of fields is passed to ``LinePlot``, yt will create a number of @@ -1459,7 +1482,7 @@ temperature bins, you can do: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") my_sphere = ds.sphere("c", (50, "kpc")) plot = yt.PhasePlot( - my_sphere, "density", "temperature", ["mass"], weight_field=None + my_sphere, ("gas", "density"), ("gas", "temperature"), [("gas", "mass")], weight_field=None ) plot.save() @@ -1474,7 +1497,7 @@ something like: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") my_sphere = ds.sphere("c", (50, "kpc")) - plot = yt.PhasePlot(my_sphere, "density", "temperature", ["H_fraction"]) + plot = yt.PhasePlot(my_sphere, ("gas", "density"), ("gas", "temperature"), [("gas", "H_p0_fraction")]) plot.save() Customizing Phase Plots @@ -1497,9 +1520,9 @@ and ``show_colorbar``. rvir = ds.quan(1e-1, "Mpccm/h") sph = ds.sphere(center, rvir) - plot = yt.PhasePlot(sph, "density", "temperature", "mass", weight_field=None) - plot.set_unit("density", "Msun/pc**3") - plot.set_unit("mass", "Msun") + plot = yt.PhasePlot(sph, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"), weight_field=None) + plot.set_unit(("gas", "density"), "Msun/pc**3") + plot.set_unit(("gas", "mass"), "Msun") plot.set_xlim(1e-5, 1e1) plot.set_ylim(1, 1e7) plot.save() @@ -1518,14 +1541,14 @@ limits. The following example illustrates this workflow: center = ds.arr([64.0, 64.0, 64.0], "code_length") rvir = ds.quan(1e-1, "Mpccm/h") sph = ds.sphere(center, rvir) - units = dict(density="Msun/pc**3", cell_mass="Msun") - extrema = dict(density=(1e-5, 1e1), temperature=(1, 1e7)) + units = {("gas", "density"): "Msun/pc**3", ("gas", "mass"): "Msun"} + extrema = {("gas", "density"): (1e-5, 1e1), ("gas", "temperature"): (1, 1e7)} profile = yt.create_profile( sph, - ["density", "temperature"], + [("gas", "density"), ("gas", "temperature")], n_bins=[128, 128], - fields=["mass"], + fields=[("gas", "mass")], weight_field=None, units=units, extrema=extrema, @@ -1569,7 +1592,7 @@ is to use the convenience routine. This has the syntax: .. code-block:: python - p = yt.ParticlePlot(ds, "particle_position_x", "particle_position_y") + p = yt.ParticlePlot(ds, ("all", "particle_position_x"), ("all", "particle_position_y")) p.save() Here, ``ds`` is a dataset we've previously opened. The commands create a particle @@ -1590,7 +1613,7 @@ For instance, we can zoom in: .. code-block:: python - p = yt.ParticlePlot(ds, "particle_position_x", "particle_position_y") + p = yt.ParticlePlot(ds, ("all", "particle_position_x"), ("all", "particle_position_y")) p.zoom(10) p.save("zoom") @@ -1604,7 +1627,7 @@ or change the axis units: .. code-block:: python - p.set_unit("particle_position_x", "Mpc") + p.set_unit(("all", "particle_position_x"), "Mpc") Here is a full example that shows the simplest way to use :class:`~yt.visualization.particle_plots.ParticlePlot`: @@ -1614,7 +1637,7 @@ Here is a full example that shows the simplest way to use import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticlePlot(ds, "particle_position_x", "particle_position_y") + p = yt.ParticlePlot(ds, ("all", "particle_position_x"), ("all", "particle_position_y")) p.save() In the above examples, we are simply splatting particle x and y positions onto @@ -1627,8 +1650,8 @@ similar to a projection. import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticlePlot(ds, "particle_position_x", "particle_position_y", "particle_mass") - p.set_unit("particle_mass", "Msun") + p = yt.ParticlePlot(ds, ("all", "particle_position_x"), ("all", "particle_position_y"), ("all", "particle_mass")) + p.set_unit(("all", "particle_mass"), "Msun") p.zoom(32) p.save() @@ -1642,12 +1665,12 @@ pixel is the weighted average along the line of sight. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") p = yt.ParticlePlot( ds, - "particle_position_x", - "particle_position_y", - "particle_mass", - weight_field="particle_ones", + ("all", "particle_position_x"), + ("all", "particle_position_y"), + ("all", "particle_mass"), + weight_field=("all", "particle_ones"), ) - p.set_unit("particle_mass", "Msun") + p.set_unit(("all", "particle_mass"), "Msun") p.zoom(32) p.save() @@ -1681,12 +1704,12 @@ to set the colorbar and shows off some of the modification functions for ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") p = yt.ParticlePlot( ds, - "particle_position_x", - "particle_position_y", - "particle_mass", + ("all", "particle_position_x"), + ("all", "particle_position_y"), + ("all", "particle_mass"), width=(0.5, 0.5), ) - p.set_unit("particle_mass", "Msun") + p.set_unit(("all", "particle_mass"), "Msun") p.zoom(32) p.annotate_title("Zoomed-in Particle Plot") p.save() @@ -1710,10 +1733,10 @@ of ``particle_position_x`` versus ``particle_velocity_z``, with the ``particle_m import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticlePlot(ds, "particle_position_x", "particle_velocity_z", ["particle_mass"]) - p.set_unit("particle_position_x", "Mpc") - p.set_unit("particle_velocity_z", "km/s") - p.set_unit("particle_mass", "Msun") + p = yt.ParticlePlot(ds, ("all", "particle_position_x"), ("all", "particle_velocity_z"), ("all", "particle_mass")) + p.set_unit(("all", "particle_position_x"), "Mpc") + p.set_unit(("all", "particle_velocity_z"), "km/s") + p.set_unit(("all", "particle_mass"), "Msun") p.save() and here is one with the particle x and y velocities on the plot axes: @@ -1723,10 +1746,10 @@ and here is one with the particle x and y velocities on the plot axes: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticlePlot(ds, "particle_velocity_x", "particle_velocity_y", "particle_mass") - p.set_unit("particle_velocity_x", "km/s") - p.set_unit("particle_velocity_y", "km/s") - p.set_unit("particle_mass", "Msun") + p = yt.ParticlePlot(ds, ("all", "particle_velocity_x"), ("all", "particle_velocity_y"), ("all", "particle_mass")) + p.set_unit(("all", "particle_velocity_x"), "km/s") + p.set_unit(("all", "particle_velocity_y"), "km/s") + p.set_unit(("all", "particle_mass"), "Msun") p.set_ylim(-400, 400) p.set_xlim(-400, 400) p.save() @@ -1743,8 +1766,8 @@ domain: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticleProjectionPlot(ds, 2, ["particle_mass"], width=(0.5, 0.5), depth=0.01) - p.set_unit("particle_mass", "Msun") + p = yt.ParticleProjectionPlot(ds, 2, [("all", "particle_mass")], width=(0.5, 0.5), depth=0.01) + p.set_unit(("all", "particle_mass"), "Msun") p.save() and here is an example of using the ``data_source`` argument to :class:`~yt.visualization.particle_plots.ParticlePhasePlot` @@ -1759,11 +1782,14 @@ to only consider the particles that lie within a 50 kpc sphere around the domain my_sphere = ds.sphere("c", (50.0, "kpc")) p = yt.ParticlePhasePlot( - my_sphere, "particle_velocity_x", "particle_velocity_y", "particle_mass" + my_sphere, + ("all", "particle_velocity_x"), + ("all", "particle_velocity_y"), + ("all", "particle_mass") ) - p.set_unit("particle_velocity_x", "km/s") - p.set_unit("particle_velocity_y", "km/s") - p.set_unit("particle_mass", "Msun") + p.set_unit(("all", "particle_velocity_x"), "km/s") + p.set_unit(("all", "particle_velocity_y"), "km/s") + p.set_unit(("all", "particle_mass"), "Msun") p.set_ylim(-400, 400) p.set_xlim(-400, 400) @@ -1782,8 +1808,8 @@ mass density: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ParticleProjectionPlot(ds, 2, ["particle_mass"], width=(0.5, 0.5), density=True) - p.set_unit("particle_mass", "Msun/kpc**2") # Note that the dimensions reflect the density flag + p = yt.ParticleProjectionPlot(ds, 2, [("all", "particle_mass")], width=(0.5, 0.5), density=True) + p.set_unit(("all", "particle_mass"), "Msun/kpc**2") # Note that the dimensions reflect the density flag p.save() Finally, with 1D and 2D Profiles, you can create a :class:`~yt.data_objects.profiles.ParticleProfile` @@ -1803,16 +1829,16 @@ pixel, instead of the total: profile = yt.create_profile( ad, - ["particle_velocity_x", "particle_velocity_y"], - ["particle_mass"], + [("all", "particle_velocity_x"), ("all", "particle_velocity_y")], + [("all", "particle_mass")], n_bins=800, - weight_field="particle_ones", + weight_field=("all", "particle_ones"), ) p = yt.ParticlePhasePlot.from_profile(profile) - p.set_unit("particle_velocity_x", "km/s") - p.set_unit("particle_velocity_y", "km/s") - p.set_unit("particle_mass", "Msun") + p.set_unit(("all", "particle_velocity_x"), "km/s") + p.set_unit(("all", "particle_velocity_y"), "km/s") + p.set_unit(("all", "particle_mass"), "Msun") p.set_ylim(-400, 400) p.set_xlim(-400, 400) p.save() @@ -1851,8 +1877,8 @@ plot and then call ``.show()`` and the image will appear inline: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p = yt.ProjectionPlot(ds, "z", "density", center='m', width=(10,'kpc'), - weight_field='density') + p = yt.ProjectionPlot(ds, "z", ("gas", "density"), center='m', width=(10,'kpc'), + weight_field=("gas", "density")) p.set_figure_size(5) p.show() @@ -1870,7 +1896,7 @@ the plot filenames. If you don't care what the filenames are, just calling the import yt ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100") - slc = yt.SlicePlot(ds, "z", ["kT", "density"], width=(500.0, "kpc")) + slc = yt.SlicePlot(ds, "z", [("gas", "kT"), ("gas", "density")], width=(500.0, "kpc")) slc.save() which will yield PNG plots with the filenames @@ -1947,14 +1973,14 @@ be separated from the easy part (generating images). The intermediate slice, projection, and profile objects can be saved as reloadable datasets, then handed back to the plotting machinery discussed here. -For slices and projections, the savable object is associated with the +For slices and projections, the saveable object is associated with the plot object as ``data_source``. This can be saved with the -:func:`~yt.data_objects.data_containers.save_as_dataset`` function. For +:func:`~yt.data_objects.data_containers.YTDataContainer.save_as_dataset` function. For more information, see :ref:`saving_data`. .. code-block:: python - p = yt.ProjectionPlot(ds, "x", "density", weight_field="density") + p = yt.ProjectionPlot(ds, "x", ("gas", "density"), weight_field=("gas", "density")) fn = p.data_source.save_as_dataset() This function will optionally take a ``filename`` keyword that follows @@ -1968,7 +1994,9 @@ arguments. One can now continue to tweak the figure to one's liking. .. code-block:: python new_ds = yt.load(fn) - new_p = yt.ProjectionPlot(new_ds, "x", "density", weight_field="density") + new_p = yt.ProjectionPlot( + new_ds, "x", ("gas", "density"), weight_field=("gas", "density") + ) new_p.save() The same functionality is available for profile and phase plots. In @@ -1980,13 +2008,20 @@ For ``ProfilePlot``: .. code-block:: python ad = ds.all_data() - p1 = yt.ProfilePlot(ad, "density", "temperature", weight_field="mass") + p1 = yt.ProfilePlot( + ad, ("gas", "density"), ("gas", "temperature"), weight_field=("gas", "mass") + ) # note that ProfilePlots can hold a list of profiles fn = p1.profiles[0].save_as_dataset() new_ds = yt.load(fn) - p2 = yt.ProfilePlot(new_ds.data, "density", "temperature", weight_field="mass") + p2 = yt.ProfilePlot( + new_ds.data, + ("gas", "density"), + ("gas", "temperature"), + weight_field=("gas", "mass"), + ) p2.save() For ``PhasePlot``: @@ -1994,11 +2029,19 @@ For ``PhasePlot``: .. code-block:: python ad = ds.all_data() - p1 = yt.PhasePlot(ad, "density", "temperature", "mass", weight_field=None) + p1 = yt.PhasePlot( + ad, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"), weight_field=None + ) fn = p1.profile.save_as_dataset() new_ds = yt.load(fn) - p2 = yt.PhasePlot(new_ds.data, "density", "temperature", "mass", weight_field=None) + p2 = yt.PhasePlot( + new_ds.data, + ("gas", "density"), + ("gas", "temperature"), + ("gas", "mass"), + weight_field=None, + ) p2.save() .. _eps-writer: @@ -2020,10 +2063,7 @@ filesize. .. note:: PyX must be installed, which can be accomplished either manually - with ``pip install pyx`` or with the install script by setting - ``INST_PYX=1``. If you are using python2, you must install pyx - version 0.12.1 with ``pip install pyx==0.12.1``, since that is - the last version with python2 support. + with ``python -m pip install pyx``. This module can take any of the plots mentioned above and create an EPS or PDF figure. For example, @@ -2032,7 +2072,7 @@ EPS or PDF figure. For example, import yt.visualization.eps_writer as eps - slc = yt.SlicePlot(ds, "z", "density") + slc = yt.SlicePlot(ds, "z", ("gas", "density")) slc.set_width(25, "kpc") eps_fig = eps.single_plot(slc) eps_fig.save_fig("zoom", format="eps") @@ -2060,7 +2100,14 @@ from a PlotWindow. For example, import yt.visualization.eps_writer as eps slc = yt.SlicePlot( - ds, "z", ["density", "temperature", "pressure", "velocity_magnitude"] + ds, + "z", + [ + ("gas", "density"), + ("gas", "temperature"), + ("gas", "pressure"), + ("gas", "velocity_magnitude"), + ], ) slc.set_width(25, "kpc") eps_fig = eps.multiplot_yt(2, 2, slc, bare_axes=True) @@ -2083,17 +2130,25 @@ an example that includes slices and phase plots: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - p1 = SlicePlot(ds, 0, "density") + p1 = SlicePlot(ds, "x", ("gas", "density")) p1.set_width(10, "kpc") - p2 = SlicePlot(ds, 0, "temperature") + p2 = SlicePlot(ds, "x", ("gas", "temperature")) p2.set_width(10, "kpc") - p2.set_cmap("temperature", "hot") + p2.set_cmap(("gas", "temperature"), "hot") sph = ds.sphere(ds.domain_center, (10, "kpc")) - p3 = PhasePlot(sph, "radius", "density", "temperature", weight_field="mass") + p3 = PhasePlot( + sph, + "radius", + ("gas", "density"), + ("gas", "temperature"), + weight_field=("gas", "mass"), + ) - p4 = PhasePlot(sph, "radius", "density", "pressure", "mass") + p4 = PhasePlot( + sph, "radius", ("gas", "density"), ("gas", "pressure"), weight_field=("gas", "mass") + ) mp = multiplot_yt( 2, diff --git a/doc/source/visualizing/sketchfab.rst b/doc/source/visualizing/sketchfab.rst index 15d16ae529b..2218a7d7b5c 100644 --- a/doc/source/visualizing/sketchfab.rst +++ b/doc/source/visualizing/sketchfab.rst @@ -57,14 +57,14 @@ value. For example: ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sphere = ds.sphere("max", (1.0, "Mpc")) - surface = ds.surface(sphere, "density", 1e-27) + surface = ds.surface(sphere, ("gas", "density"), 1e-27) This object, ``surface``, can be queried for values on the surface. For instance: .. code-block:: python - print(surface["temperature"].min(), surface["temperature"].max()) + print(surface["gas", "temperature"].min(), surface["gas", "temperature"].max()) will return the values 11850.7476943 and 13641.0663899. These values are interpolated to the face centers of every triangle that constitutes a portion @@ -122,12 +122,12 @@ Now you can run a script like this: bounds = [[dd.center[i] - 250 * kpc, dd.center[i] + 250 * kpc] for i in range(3)] - surf = ds.surface(dd, "density", rho) + surf = ds.surface(dd, ("gas", "density"), rho) upload_id = surf.export_sketchfab( title="galaxy0030 - 1e-28", description="Extraction of Density (colored by temperature) at 1e-28 g/cc", - color_field="temperature", + color_field=("gas", "temperature"), color_map="hot", color_log=True, bounds=bounds, @@ -172,9 +172,12 @@ galaxy simulation: sphere = ds.sphere("max", (1.0, "Mpc")) for i, r in enumerate(rho): - surf = ds.surface(sphere, "density", r) + surf = ds.surface(sphere, ("gas", "density"), r) surf.export_obj( - filename, transparency=trans[i], color_field="temperature", plot_index=i + filename, + transparency=trans[i], + color_field=("gas", "temperature"), + plot_index=i, ) The calling sequence is fairly similar to the ``export_ply`` function @@ -246,18 +249,18 @@ to output one more type of variable on your surfaces. For example: def emissivity(field, data): - return data["density"] * data["density"] * np.sqrt(data["temperature"]) + return data["gas", "density"] ** 2 * np.sqrt(data["gas", "temperature"]) add_field("emissivity", function=_Emissivity, sampling_type="cell", units=r"g*K/cm**6") sphere = ds.sphere("max", (1.0, "Mpc")) for i, r in enumerate(rho): - surf = ds.surface(sphere, "density", r) + surf = ds.surface(sphere, ("gas", "density"), r) surf.export_obj( filename, transparency=trans[i], - color_field="temperature", + color_field=("gas", "temperature"), emit_field="emissivity", plot_index=i, ) @@ -274,7 +277,7 @@ scripts in Blender. For example, on a Mac, you would modify the file "/Applications/Blender/blender.app/Contents/MacOS/2.65/scripts/addons/io_scene_obj/import_obj.py", in the function "create_materials" with: -.. code-block:: patch +.. code-block:: diff # ... diff --git a/doc/source/visualizing/streamlines.rst b/doc/source/visualizing/streamlines.rst index ffc3f19b8a0..5c81ce94e90 100644 --- a/doc/source/visualizing/streamlines.rst +++ b/doc/source/visualizing/streamlines.rst @@ -78,9 +78,9 @@ Example Script streamlines = Streamlines( ds, pos, - "velocity_x", - "velocity_y", - "velocity_z", + ("gas", "velocity_x"), + ("gas", "velocity_y"), + ("gas", "velocity_z"), length=1.0 * Mpc, get_magnitude=True, ) @@ -88,7 +88,8 @@ Example Script # Create a 3D plot, trace the streamlines through the 3D volume of the plot fig = plt.figure() - ax = Axes3D(fig) + ax = Axes3D(fig, auto_add_to_figure=False) + fig.add_axes(ax) for stream in streamlines.streamlines: stream = stream[np.all(stream != 0.0, axis=1)] ax.plot3D(stream[:, 0], stream[:, 1], stream[:, 2], alpha=0.1) @@ -121,6 +122,7 @@ Example Script .. code-block:: python + import matplotlib.pyplot as plt import yt from yt.visualization.api import Streamlines @@ -128,7 +130,7 @@ Example Script streamlines = Streamlines(ds, ds.domain_center) streamlines.integrate_through_volume() stream = streamlines.path(0) - matplotlib.pylab.semilogy(stream["t"], stream["density"], "-x") + plt.semilogy(stream["t"], stream["gas", "density"], "-x") Running in Parallel diff --git a/doc/source/visualizing/unstructured_mesh_rendering.rst b/doc/source/visualizing/unstructured_mesh_rendering.rst index 17ee373046e..1efe80f8e83 100644 --- a/doc/source/visualizing/unstructured_mesh_rendering.rst +++ b/doc/source/visualizing/unstructured_mesh_rendering.rst @@ -15,29 +15,17 @@ below, or you can skip to the examples. Optional Embree Installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To install yt with Embree support, you can install yt from source using the -``install_script.sh`` script. Be sure to set the ``INST_YT_SOURCE``, -``INST_EMBREE``, and ``INST_NETCDF4`` flags to 1 at the top of the script. The -``install_script.sh`` script can be downloaded by doing: +You'll need to `install Python bindings for netCDF4 `_. +Then you'll need to get Embree itself and its corresponding Python bindings (pyembree). +For conda-based systems, this is trivial, see +`pyembree's doc `_ -.. code-block:: bash - - wget https://raw.githubusercontent.com/yt-project/yt/main/doc/install_script.sh - -and then run like so: - -.. code-block:: bash - - bash install_script.sh - -Alternatively, you can install the additional dependencies by hand. -First, you will need to install Embree, either by compiling from source +For systems other than conda, you will need to install Embree first, either by +`compiling from source `_ or by using one of the pre-built binaries available at Embree's `downloads `_ page. -Second, the python bindings for Embree (called -`pyembree `_) must also be installed. To -do so, first obtain a copy, by .e.g. cloning the repo: +Then you'll want to install pyembree from source as follows. .. code-block:: bash @@ -53,11 +41,12 @@ usr/local. To account for this, you would do: CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install -Once Embree and pyembree are installed, you must rebuild yt from source in order to use -the unstructured mesh rendering capability. Once again, if embree is installed in a -location that is not part of your default search path, you must tell yt where to find it. -There are a number of ways to do this. One way is to again manually pass in the flags -when running the setup script in the yt-git directory: +Once Embree and pyembree are installed, a,d in order to use the unstructured +mesh rendering capability, you must :ref:`rebuild yt from source +`, . Once again, if embree is installed in a location that +is not part of your default search path, you must tell yt where to find it. +There are a number of ways to do this. One way is to again manually pass in the +flags when running the setup script in the yt-git directory: .. code-block:: bash @@ -461,7 +450,7 @@ file with a fixed camera position: .. code-block:: python - import pylab as plt + import matplotlib.pyplot as plt import yt from yt.visualization.volume_rendering.api import MeshSource diff --git a/doc/source/visualizing/volume_rendering.rst b/doc/source/visualizing/volume_rendering.rst index 67b3673a722..ebf5c40d134 100644 --- a/doc/source/visualizing/volume_rendering.rst +++ b/doc/source/visualizing/volume_rendering.rst @@ -209,7 +209,7 @@ simulation: # Plot the transfer function, along with the CDF of the density field to # see how the transfer function corresponds to structure in the CDF - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) # save the image, flooring especially bright pixels for better contrast sc.save("rendering.png", sigma_clip=6.0) @@ -232,7 +232,7 @@ For fun, let's make the same volume_rendering, but this time setting source.tfh.set_log(True) source.tfh.grey_opacity = False - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) sc.save("rendering.png", sigma_clip=4.0) @@ -278,7 +278,7 @@ colormap to determine the colors of the layers. source = sc[0] - source.set_field("density") + source.set_field(("gas", "density")) source.set_log(True) bounds = (3e-31, 5e-27) @@ -292,7 +292,7 @@ colormap to determine the colors of the layers. source.tfh.tf = tf source.tfh.bounds = bounds - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) sc.save("rendering.png", sigma_clip=6) @@ -314,7 +314,7 @@ To add a single gaussian layer with a color determined by a colormap value, use source = sc[0] - source.set_field("density") + source.set_field(("gas", "density")) source.set_log(True) bounds = (3e-31, 5e-27) @@ -328,7 +328,7 @@ To add a single gaussian layer with a color determined by a colormap value, use source.tfh.tf = tf source.tfh.bounds = bounds - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) sc.save("rendering.png", sigma_clip=6) @@ -351,7 +351,7 @@ If you would like to add a gaussian with a customized color or no color, use source = sc[0] - source.set_field("density") + source.set_field(("gas", "density")) source.set_log(True) bounds = (3e-31, 5e-27) @@ -365,7 +365,7 @@ If you would like to add a gaussian with a customized color or no color, use source.tfh.tf = tf source.tfh.bounds = bounds - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) sc.save("rendering.png", sigma_clip=6) @@ -392,7 +392,7 @@ the volume rendering. source = sc[0] - source.set_field("density") + source.set_field(("gas", "density")) source.set_log(True) bounds = (3e-31, 5e-27) @@ -413,7 +413,7 @@ the volume rendering. source.tfh.tf = tf source.tfh.bounds = bounds - source.tfh.plot("transfer_function.png", profile_field="density") + source.tfh.plot("transfer_function.png", profile_field=("gas", "density")) sc.save("rendering.png", sigma_clip=6) @@ -635,8 +635,8 @@ function to quickly set up defaults is: # load the data ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - # volume render the 'density' field, and save the resulting image - im, sc = yt.volume_render(ds, "density", fname="rendering.png") + # volume render the ("gas", "density") field, and save the resulting image + im, sc = yt.volume_render(ds, ("gas", "density"), fname="rendering.png") # im is the image array generated. it is also saved to 'rendering.png'. # sc is an instance of a Scene object, which allows you to further refine @@ -664,7 +664,7 @@ function. Example: import yt ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - sc = yt.create_scene(ds, "density") + sc = yt.create_scene(ds, ("gas", "density")) source = sc[0] diff --git a/doc/source/yt3differences.rst b/doc/source/yt3differences.rst index a6a5b904b1b..a1bf41b8304 100644 --- a/doc/source/yt3differences.rst +++ b/doc/source/yt3differences.rst @@ -18,7 +18,7 @@ Updating to yt 3.0 from Old Versions (and going back) ----------------------------------------------------- First off, you need to update your version of yt to yt 3.0. If you're -installing yt for the first time, please visit :ref:`getting-and-installing-yt`. +installing yt for the first time, please visit :ref:`installing-yt`. If you already have a version of yt installed, you should just need one command: @@ -29,17 +29,12 @@ command: This will update yt to the most recent version and rebuild the source base. If you installed using the installer script, it will assure you have all of the latest dependencies as well. This step may take a few minutes. To test -to make sure yt is running, try: +that yt is correctly installed, try: .. code-block:: bash - $ yt --help + $ python -c "import yt" -If you receive no errors, then you are ready to go. If you have -an error, then consult :ref:`update-errors` for solutions. - -If you want to switch back to an old version of yt (2.x), see -:ref:`switching-between-yt-versions`. .. _transitioning-to-3.0: diff --git a/doc/source/yt4differences.rst b/doc/source/yt4differences.rst index 7b0547d9491..b9e4309fdf4 100644 --- a/doc/source/yt4differences.rst +++ b/doc/source/yt4differences.rst @@ -36,7 +36,7 @@ The list below is arranged in order of most to least important changes. In the past, you could specify fields as strings like ``"density"``, but with the growth of yt and its many derived fields, there can be sometimes be overlapping field names (e.g., ``("gas", "density")`` and - ``("PartType0", "density")``, where yt doesn't know which to use. To remove + ``("PartType0", "density")``), where yt doesn't know which to use. To remove any ambiguity, it is now strongly recommended to explicitly specify the full tuple form of all fields. Just search for all field accesses in your scripts, and replace strings with tuples (e.g. replace ``"a"`` with @@ -70,9 +70,9 @@ The list below is arranged in order of most to least important changes. Fields representing energy and momentum quantities are now given names which reflect their dimensionality. For example, the ``("gas", "kinetic_energy")`` field was actually a field for kinetic energy density, and so it has been - renamed to ``"gas", "kinetic_energy_density"``. The old name still exists + renamed to ``("gas", "kinetic_energy_density")``. The old name still exists as an alias as of yt v4.0.0, but it will be removed in yt v4.1.0. See - :ref:`deprecated_field_names` below for more information. + next item below for more information. Other examples include ``"gas", "specific_thermal_energy"`` for thermal energy per unit mass, and ``("gas", "momentum_density_x")`` for the x-axis component of momentum density. See :ref:`efields` for more information. @@ -150,7 +150,7 @@ As mentioned, previously operations such as slice, projection and arbitrary grids would smooth the particle data onto the global octree. As this is no longer used, a different approach was required to visualize the SPH data. Using SPLASH as inspiration, SPH smoothing pixelization operations were created using -smooting operations via "scatter" and "gather" approaches. We estimate the +smoothing operations via "scatter" and "gather" approaches. We estimate the contributions of a particle to a single pixel by considering the point at the centre of the pixel and using the standard SPH smoothing formula. The heavy lifting in these functions is undertaken by cython functions. @@ -180,7 +180,7 @@ method: In the above example the ``covering_grid`` and the ``arbitrary_grid`` will return the same data. In fact, these containers are very similar but provide a -slighlty different API. +slightly different API. The above code can be modified to use the gather approach by changing a global setting for the dataset. This can be achieved with @@ -194,7 +194,7 @@ disable the normalization for all future interpolations. The gather approach requires finding nearest neighbors using the KDTree. The first call will generate a KDTree for the entire dataset which will be stored in -a sidecar file. This will be loaded whenever neccesary. +a sidecar file. This will be loaded whenever necessary. Off-Axis Projection for SPH Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -225,7 +225,7 @@ Smoothing Data onto an Octree ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whilst the move away from the global octree is a promising one in terms of -perfomance and dealing with SPH data in a more intuitive manner, it does remove +performance and dealing with SPH data in a more intuitive manner, it does remove a useful feature. We are aware that many users will have older scripts which take advantage of the global octree. @@ -266,6 +266,8 @@ search (DFS) means that tree starts refining at the root node (this is the largest node which contains every particles) and refines as far as possible along each branch before backtracking. +.. _yt-units-is-now-unyt: + ``yt.units`` Is Now a Wrapper for ``unyt`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -361,8 +363,18 @@ You can also use alternate unit names in more complex algebraic unit expressions In this example the common british spelling ``"kilometre"`` is resolved to ``"km"`` and ``"hour"`` is resolved to ``"hr"``. +Field-Specific Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can now set configuration values on a per-field basis. For instance, this +means that if you always want a particular colormap associated with a particular +field, you can do so! + +This is documented under :ref:`per-field-config`, and was added in `PR +1931 `_. + New Method for Accessing Sample Datasets -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There is now a function entitled ``load_sample()`` that allows the user to automatically load sample data from the yt hub in a local yt session. @@ -372,5 +384,24 @@ and load them into a yt session, but now this occurs from within a python session. For more information see: :ref:`Loading Sample Data ` -API Changes ------------ +Some Widgets +^^^^^^^^^^^^ + +In yt, we now have some simple display wrappers for objects if you are running +in a Jupyter environment with the `ipywidgets +`_ package installed. For instance, the +``ds.fields`` object will now display field information in an interactive +widget, and three-element unyt arrays (such as ``ds.domain_left_edge``) will be +displayed interactively as well. + +The package `widgyts `_ provides interactive, +yt-specific visualization of slices, projections, and additional dataset display +information. + +New External Packages +^^^^^^^^^^^^^^^^^^^^^ + +As noted above (:ref:`yt-units-is-now-unyt`), unyt has been extracted from +yt, and we now use it as an external library. In addition, other parts of yt +such as :ref:`interactive_data_visualization` have been extracted, and we are +working toward a more modular approach for things such as Jupyter widgets and other "value-added" integrations. diff --git a/pyproject.toml b/pyproject.toml index 926ebb1d6fa..a870568cac5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,23 +1,26 @@ [build-system] -# See https://github.com/scipy/scipy/pull/10431 for the AIX issue. requires = [ "setuptools>=19.6", - "wheel", - - # keep in sync with travis.yml "minimal" specs (Cython and numpy for py36) + # see https://github.com/numpy/numpy/pull/18389 + "wheel>=0.36.2", # cython version is imposed by that of numpy, see release notes # https://github.com/numpy/numpy/releases/tag/v1.19.2 - "Cython>=0.26.1; python_version=='3.6'", - "Cython>=0.29.21; python_version>='3.7'", - "numpy==1.13.3; python_version=='3.6' and platform_system!='AIX'", - "numpy>=1.19.2; python_version>='3.7' and platform_system!='AIX'", + # Cython 3.0 is the next version after 0.29, and a major change, + # we forbid it until we can properly test against it + "Cython>=0.26.1,<3.0; python_version=='3.6'", + "Cython>=0.29.21,<3.0; python_version>='3.7'", + "oldest-supported-numpy", ] [tool.black] line-length = 88 -# note : 'py39' is not an available option as of black 19.10b0 -target-version = ['py36', 'py37', 'py38'] +target-version = [ + 'py36', + 'py37', + 'py38', + 'py39', +] include = '\.pyi?$' exclude = ''' /( diff --git a/scripts/iyt b/scripts/iyt index d00b60c7dd7..de0a1700d09 100755 --- a/scripts/iyt +++ b/scripts/iyt @@ -8,7 +8,12 @@ from IPython.terminal.interactiveshell import TerminalInteractiveShell from yt.data_objects.data_containers import YTDataContainer from yt.mods import * +from yt._maintenance.deprecation import issue_deprecation_warning +issue_deprecation_warning( + "The iyt script is no longer maintained and targeted for removal.", + since="4.1.0", removal="4.2.0", +) namespace = locals().copy() namespace.pop("__builtins__", None) @@ -32,7 +37,7 @@ except ImportError: ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc, display_banner = True) if "DISPLAY" in os.environ: - ip_shell.enable_pylab(import_all=False) + ip_shell.enable_matplotlib(import_all=False) # The rest is a modified version of the IPython default profile code diff --git a/setup.cfg b/setup.cfg index c7bea81087e..f1ec9d0025c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = yt -version = 4.0.dev0 +version = 4.1.dev0 description = An analysis and visualization toolkit for volumetric data long_description = file: README.md long_description_content_type = text/markdown @@ -38,16 +38,18 @@ project_urls = packages = find: install_requires = ewah-bool-utils>=0.1.0 - IPython>=1.0 - matplotlib!=3.4.2,>=2.0.2,<3.5 + iPython>=1.0 + matplotlib!=3.4.2,>=2.0.2,<3.6 more-itertools>=8.4 - numpy>=1.10.4 + numpy>=1.13.3 + packaging>=20.9 + pyyaml>=4.2b1 setuptools>=19.6 sympy>=1.2 toml>=0.10.2 tqdm>=3.4.0 - unyt>=2.7.2 -python_requires = >=3.6 + unyt>=2.8.0 +python_requires = >=3.6,<3.12 include_package_data = True scripts = scripts/iyt zip_safe = False @@ -59,10 +61,52 @@ nose.plugins.0.10 = answer-testing = yt.utilities.answer_testing.framework:AnswerTesting [options.extras_require] -hub = - girder_client +doc = + alabaster + bottle + nbconvert==5.6.1 + pyregion + pyx>=0.15 + runnotebook + sphinx==3.1.2 + sphinx-bootstrap-theme + sphinx-rtd-theme +full = + astropy>=4.0.1,<5.0.0 + f90nml>=1.1.2 + fastcache~=1.0.2 + glueviz~=0.13.3 + h5py>=3.1.0,<4.0.0 + libconf~=1.0.1 + miniballcpp>=0.2.1 + mpi4py~=3.0.3 + netCDF4~=1.5.3 + pandas~=1.1.2 + pooch>=0.7.0 + pyaml~=17.10.0 + pykdtree~=1.3.1 + pyqt5~=5.15.2 + pyx~=0.15 + requests~=2.20.0 + scipy~=1.5.0 + xarray~=0.16.1 mapserver = bottle +minimal = + ipython==1.0.0 + matplotlib==2.0.2 + more-itertools==8.4 + numpy==1.13.3 + sympy==1.2 + unyt==2.8.0 +test = + codecov~=2.0.15 + coverage~=4.5.1 + nose~=1.3.7 + nose-exclude + nose-timer~=1.0.0 + pytest>=6.1 + pytest-xdist~=2.1.0 [flake8] max-line-length = 88 diff --git a/setup.py b/setup.py index 417be462e0b..803d6699cbe 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ install_ccompiler() -VERSION = "4.0.dev0" +VERSION = "4.1.dev0" if os.path.exists("MANIFEST"): os.remove("MANIFEST") diff --git a/setupext.py b/setupext.py index 2378bc76731..5824fec6c20 100644 --- a/setupext.py +++ b/setupext.py @@ -12,7 +12,6 @@ from distutils.ccompiler import CCompiler, new_compiler from distutils.errors import CompileError, LinkError from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion from subprocess import PIPE, Popen from sys import platform as _platform @@ -374,32 +373,10 @@ def create_build_ext(lib_exts, cythonize_aliases): class build_ext(_build_ext): # subclass setuptools extension builder to avoid importing cython and numpy # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869 + # NOTE: this is likely not necessary anymore since + # pyproject.toml was introduced in the project + def finalize_options(self): - try: - import cython - import numpy - except ImportError as e: - raise ImportError( - """Could not import cython or numpy. Building yt from source requires - cython and numpy to be installed. Please install these packages using - the appropriate package manager for your python environment.""" - ) from e - if LooseVersion(cython.__version__) < LooseVersion("0.26.1"): - # keep in sync with pyproject.toml [build-system] - raise RuntimeError( - """Building yt from source requires Cython 0.26.1 or newer but - Cython %s is installed. Please update Cython using the appropriate - package manager for your python environment.""" - % cython.__version__ - ) - if LooseVersion(numpy.__version__) < LooseVersion("1.13.3"): - # keep in sync with pyproject.toml [build-system] - raise RuntimeError( - """Building yt from source requires NumPy 1.13.3 or newer but - NumPy %s is installed. Please update NumPy using the appropriate - package manager for your python environment.""" - % numpy.__version__ - ) from Cython.Build import cythonize # Override the list of extension modules diff --git a/tests/cartopy_requirements.txt b/tests/cartopy_requirements.txt new file mode 100644 index 00000000000..bddb74fdffe --- /dev/null +++ b/tests/cartopy_requirements.txt @@ -0,0 +1,5 @@ +# cartopy and its dependencies are non trivial to install +# so we can't easily put these specs into setup.cfg +shapely +--no-binary=shapely +cartopy~=0.18.0 diff --git a/tests/ci_install.sh b/tests/ci_install.sh index 3c9b51d12e4..cc97035027e 100644 --- a/tests/ci_install.sh +++ b/tests/ci_install.sh @@ -36,43 +36,34 @@ cp tests/matplotlibrc . # Step 1: pre-install required packages if [[ "${RUNNER_OS}" == "Windows" ]] && [[ ${dependencies} != "minimal" ]]; then - # Install some dependencies using conda (if not doing a minimal run) - CYTHON=$(grep cython tests/test_prerequirements.txt) - NUMPY=$(grep numpy tests/test_prerequirements.txt) - - CARTOPY=$(grep cartopy tests/test_requirements.txt) - H5PY=$(grep h5py tests/test_requirements.txt) - MATPLOTLIB=$(grep matplotlib tests/test_requirements.txt) - SCIPY=$(grep scipy tests/test_requirements.txt) - conda config --set always_yes yes - conda info -a - conda install --quiet --yes -c conda-forge \ - $CYTHON $NUMPY $CARTOPY $H5PY $MATPLOTLIB $SCIPY + # windows_conda_requirements.txt is a survivance of test_requirements.txt + # keep in sync: setup.cfg + while read requirement; do conda install --yes $requirement; done < tests/windows_conda_requirements.txt else python -m pip install --upgrade pip python -m pip install --upgrade wheel python -m pip install --upgrade setuptools fi -# Step 2: install required packages (depending on whether the build is minimal) +# Step 2: install deps and yt if [[ ${dependencies} == "minimal" ]]; then - # Ensure numpy and cython are installed so dependencies that need to be built - # don't error out - # The first numpy to support py3.6 is 1.12, but numpy 1.13 matches - # unyt so we'll match it here. - python -m pip install numpy==1.13.3 cython==0.26.1 - python -m pip install -r tests/test_minimal_requirements.txt + python -m pip install -e .[test,minimal] else - # Getting cartopy installed requires getting cython and numpy installed - # first; this is potentially going to be fixed with the inclusion of - # pyproject.toml in cartopy. - # These versions are pinned, so we will need to update/remove them when - # the hack is no longer necessary. - python -m pip install -r tests/test_prerequirements.txt - CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" python -m pip install -r tests/test_requirements.txt -fi + # Cython and numpy are build-time requirements to the following optional deps in yt + # - cartopy + # - netcdf4 + # - pyqt5 + # The build system is however not specified properly in these projects at the moment + # which means we have to install the build-time requirements first. + # It is possible that these problems will be fixed in the future if upstream projects + # include a pyproject.toml file or use any pip-comptatible solution to remedy this. + python -m pip install numpy>=1.19.4 cython~=0.29.21 -# Step 3: install yt -python -m pip install -e . + # this is required for cartopy. It should normally be specified in our setup.cfg as + # cartopy[plotting] + # However it doesn't work on Ubuntu 18.04 (used in CI at the time of writing) + python -m pip install shapely --no-binary=shapely + CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" python -m pip install -e .[test,full] +fi set +x diff --git a/tests/pytest_runner.py b/tests/pytest_runner.py index ea9620f2cf2..bf9b0f1b7ac 100644 --- a/tests/pytest_runner.py +++ b/tests/pytest_runner.py @@ -17,7 +17,7 @@ pytest_args = [ "-s", "-v", - "-rsfE", # it means -r "sfE" (show skiped, failed, errors), no -r -s -f -E + "-rsfE", # it means -r "sfE" (show skipped, failed, errors), no -r -s -f -E "--with-answer-testing", "-m answer_test", f"-n {int(os.environ.get('NUM_WORKERS', 1))}", diff --git a/tests/report_failed_answers.py b/tests/report_failed_answers.py index 433756544d8..8cbfa6c7f55 100644 --- a/tests/report_failed_answers.py +++ b/tests/report_failed_answers.py @@ -63,7 +63,7 @@ def generate_failed_answers_html(failed_answers): the answer tests.

- Acutal Image: plot generated while running the test
+ Actual Image: plot generated while running the test
Expected Image: golden answer image
Difference Image: difference in the "actual" and "expected" image diff --git a/tests/test_minimal_requirements.txt b/tests/test_minimal_requirements.txt deleted file mode 100644 index af4e85d5f42..00000000000 --- a/tests/test_minimal_requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -ipython~=1.0.0 -matplotlib==2.0.2 # 2.0.0 is the first version that came out after Python 3.6, using the most recent patch in this branch as minimal -sympy~=1.2 -nose~=1.3.7 -nose-timer~=0.7.3 -pyyaml>=4.2b1 -coverage~=4.5.1 -codecov~=2.0.15 -unyt~=2.8.0 -more-itertools==8.4 -pytest~=6.1 -nose-exclude diff --git a/tests/test_prerequirements.txt b/tests/test_prerequirements.txt deleted file mode 100644 index cf5bb8782e5..00000000000 --- a/tests/test_prerequirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -# We need this file mostly because of Cartopy.. -numpy>=1.19.4 -cython>=0.29.21 - -# this is sometimes useful to avoid CI breakage when a dependency release comes out -# but some wheels (typically windows) are missing for a few hours/days -wheel diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt deleted file mode 100644 index 98b64655777..00000000000 --- a/tests/test_requirements.txt +++ /dev/null @@ -1,36 +0,0 @@ -astropy~=4.0.1 -codecov~=2.0.15 -coverage~=4.5.4 -fastcache~=1.0.2 -glueviz~=0.13.3 -h5py~=3.1.0 -ipython~=7.6.1 -matplotlib<3.5,!=3.4.2 -nose-timer~=1.0.0 -nose~=1.3.7 -pandas~=1.1.2 -requests~=2.20.0 -scipy~=1.5.0 -sympy~=1.5 -pyqt5~=5.15.2 -netCDF4~=1.5.3 -libconf~=1.0.1 -shapely ---no-binary=shapely -cartopy~=0.18.0 -pyaml~=17.10.0 -mpi4py~=3.0.3 -unyt~=2.8.0 -pyyaml>=4.2b1 -xarray~=0.16.1 -firefly_api>=0.0.2 -f90nml>=1.1.2 -MiniballCpp>=0.2.1 -pooch>=0.7.0 -pykdtree~=1.3.1 -nose-exclude -more-itertools>=8.4 -tqdm>=3.4.0 -toml>=0.10.2 -pytest-xdist~=2.1.0 -pytest~=6.1 diff --git a/tests/tests.yaml b/tests/tests.yaml index de7bbeae5ac..5ffa437ed37 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -81,9 +81,10 @@ answer_tests: local_gizmo_007: # PR 2909 - yt/frontends/gizmo/tests/test_outputs.py:test_gizmo_64 - local_halos_010: + local_halos_011: # PR 3325 - yt/frontends/ahf/tests/test_outputs.py:test_fields_ahf_halos - # - yt/frontends/owls_subfind/tests/test_outputs.py + - yt/frontends/owls_subfind/tests/test_outputs.py:test_fields_g1 + - yt/frontends/owls_subfind/tests/test_outputs.py:test_fields_g8 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42 @@ -91,9 +92,8 @@ answer_tests: - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter - local_pw_036: # PR 3272 + local_pw_037: # PR 3373 - yt/visualization/tests/test_plotwindow.py:test_attributes - - yt/visualization/tests/test_plotwindow.py:test_attributes_wt - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers diff --git a/tests/windows_conda_requirements.txt b/tests/windows_conda_requirements.txt new file mode 100644 index 00000000000..ac2465128d2 --- /dev/null +++ b/tests/windows_conda_requirements.txt @@ -0,0 +1,6 @@ +numpy>=1.19.4 +cython>=0.29.21,<3.0 +cartopy~=0.18.0 +h5py~=3.1.0 +matplotlib>=2.0.2,<3.6 +scipy~=1.5.0 diff --git a/yt/__init__.py b/yt/__init__.py index fc1c97eaf12..bd9aa84f496 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -7,14 +7,7 @@ * Contribute: https://github.com/yt-project/yt """ -import sys - -if sys.version_info[0] < 3: - raise Exception( - "Python 2 is no longer supported. Please install Python 3 for use with yt." - ) - -__version__ = "4.0.dev0" +__version__ = "4.1.dev0" import yt.units as units import yt.utilities.physical_constants as physical_constants diff --git a/yt/_maintenance/deprecation.py b/yt/_maintenance/deprecation.py index a0f240e8119..b33dbc7ecf8 100644 --- a/yt/_maintenance/deprecation.py +++ b/yt/_maintenance/deprecation.py @@ -28,7 +28,7 @@ def issue_deprecation_warning(msg, *, removal, since=None, stacklevel=3): beware that `removal` is required (it doesn't have a default value). This is vital since deprecated code is typically untested and not specifying a required keyword argument will turn the warning into a TypeError. - What it gets us however is that the release manager will know for a fact wether it + What it gets us however is that the release manager will know for a fact whether it is safe to remove a feature at any given point, and users have a better idea when their code will become incompatible. diff --git a/yt/config.py b/yt/config.py index 65e8fbbd59a..c5bd80212c0 100644 --- a/yt/config.py +++ b/yt/config.py @@ -31,9 +31,6 @@ test_storage_dir="/does/not/exist", test_data_dir="/does/not/exist", enzo_db="", - hub_url="https://girder.hub.yt/api/v1", - hub_api_key="", - hub_sandbox="/collection/yt_sandbox/data", notebook_password="", answer_testing_tolerance=3, answer_testing_bitwise=False, @@ -42,7 +39,7 @@ answer_tests_url="http://answers.yt-project.org/{1}_{2}", sketchfab_api_key="None", imagebin_api_key="e1977d9195fe39e", - imagebin_upload_url="https://api.imgur.com/3/upload", + imagebin_upload_url="https://api.imgur.com/3/image", imagebin_delete_url="https://api.imgur.com/3/image/{delete_hash}", curldrop_upload_url="http://use.yt/upload", thread_field_detection=False, diff --git a/yt/convenience.py b/yt/convenience.py deleted file mode 100644 index 3bbd821988c..00000000000 --- a/yt/convenience.py +++ /dev/null @@ -1,3 +0,0 @@ -from yt.utilities.exceptions import YTModuleRemoved - -raise YTModuleRemoved("yt.convenience", "yt.loaders") diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 76c50f2fc63..cfc544394f2 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -8,6 +8,7 @@ from tempfile import NamedTemporaryFile, TemporaryFile import numpy as np +from more_itertools import always_iterable from tqdm import tqdm from yt.config import ytcfg @@ -638,7 +639,10 @@ def __init__( self.ActiveDimensions = self._sanitize_dims(dims) rdx = self.ds.domain_dimensions * self.ds.relative_refinement(0, level) - rdx[np.where(np.array(dims) - 2 * num_ghost_zones <= 1)] = 1 # issue 602 + + # normalize dims as a non-zero dim array + dims = np.array(list(always_iterable(dims))) + rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1 # issue 602 self.base_dds = self.ds.domain_width / self.ds.domain_dimensions self.dds = self.ds.domain_width / rdx.astype("float64") self.right_edge = self.left_edge + self.ActiveDimensions * self.dds @@ -1793,7 +1797,7 @@ def export_obj( filename : string The file this will be exported to. This cannot be a file-like - object. If there are no file extentions included - both obj & mtl + object. If there are no file extensions included - both obj & mtl files are created. transparency : float This gives the transparency of the output surface plot. Values @@ -1942,7 +1946,7 @@ def _color_samples_obj( cs = (cs - mi) / (ma - mi) else: cs[:] = 1.0 - # to get color indicies for OBJ formatting + # to get color indices for OBJ formatting from yt.visualization._colormap_data import color_map_luts lut = color_map_luts[color_map] @@ -2598,7 +2602,7 @@ def _upload_to_sketchfab(self, data, files): try: r = requests.post(SKETCHFAB_API_URL, data=data, files=files, verify=False) except requests.exceptions.RequestException: - mylog.exception("An error has occured") + mylog.exception("An error has occurred") return result = r.json() diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index d875f5722ef..6133a8b4bcb 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -794,7 +794,7 @@ def create_firefly_object( raise ImportError( "Can't find firefly_api, ensure it " "is in your python path or install it with " - "'$ pip install firefly_api'. It is also available " + "`python -m pip install firefly_api`. It is also available " "on github at github.com/agurvich/firefly_api" ) from e diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index b310ed76d65..9643afdaa15 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -497,17 +497,18 @@ class AngularMomentumVector(DerivedQuantity): Examples -------- - # Find angular momentum vector of galaxy in grid-based isolated galaxy dataset - >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") - >>> ad = ds.all_data() - >>> print(ad.quantities.angular_momentum_vector()) - - # Find angular momentum vector of gas disk in particle-based dataset - >>> ds = load("FIRE_M12i_ref11/snapshot_600.hdf5") - >>> _, c = ds.find_max(("gas", "density")) - >>> sp = ds.sphere(c, (10, "kpc")) - >>> search_args = dict(use_gas=False, use_particles=True, particle_type="PartType0") - >>> print(sp.quantities.angular_momentum_vector(**search_args)) + Find angular momentum vector of galaxy in grid-based isolated galaxy dataset + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + ... ad = ds.all_data() + ... print(ad.quantities.angular_momentum_vector()) + [-7.50868209e+26 1.06032596e+27 2.19274002e+29] cm**2/s + >>> # Find angular momentum vector of gas disk in particle-based dataset + >>> ds = yt.load("FIRE_M12i_ref11/snapshot_600.hdf5") + ... _, c = ds.find_max(("gas", "density")) + ... sp = ds.sphere(c, (10, "kpc")) + ... search_args = dict(use_gas=False, use_particles=True, particle_type="PartType0") + ... print(sp.quantities.angular_momentum_vector(**search_args)) + [4.88104442e+28 7.38463362e+28 6.20030135e+28] cm**2/s """ diff --git a/yt/data_objects/index_subobjects/grid_patch.py b/yt/data_objects/index_subobjects/grid_patch.py index 70ab7e712db..8e7ed4ae3e0 100644 --- a/yt/data_objects/index_subobjects/grid_patch.py +++ b/yt/data_objects/index_subobjects/grid_patch.py @@ -1,5 +1,6 @@ import warnings import weakref +from typing import List, Tuple import numpy as np @@ -268,7 +269,12 @@ def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False cube._base_grid = self return cube - def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False): + def get_vertex_centered_data( + self, + fields: List[Tuple[str, str]], + smoothed: bool = True, + no_ghost: bool = False, + ): _old_api = isinstance(fields, (str, tuple)) if _old_api: message = ( diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 3470ea293a2..5d988d6d77a 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -234,7 +234,8 @@ def _get_data(self, fields): for field in missing_fields: fds[field] = dd_first._determine_fields(field)[0] if field not in self.particle_fields: - if self.data_series[0]._get_field_info(*fds[field]).particle_type: + ftype = fds[field][0] + if ftype in self.data_series[0].particle_types: self.particle_fields.append(field) new_particle_fields.append(field) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index 87a813557a7..ee1f9de757d 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -1,4 +1,5 @@ import numpy as np +from more_itertools import collapse from yt.data_objects.field_data import YTFieldData from yt.fields.derived_field import DerivedField @@ -858,7 +859,7 @@ class ParticleProfile(Profile2D): weight_field : string field name The field to use for weighting. Default is None. deposition : string, optional - The interpolation kernal to be used for + The interpolation kernel to be used for deposition. Valid choices: "ngp" : nearest grid point interpolation "cic" : cloud-in-cell interpolation @@ -1347,7 +1348,9 @@ def create_profile( else: logs_list.append(data_source.ds.field_info[bin_field].take_log) logs = logs_list - if extrema is None: + + # Are the extrema all Nones? Then treat them as though extrema was set as None + if extrema is None or not any(collapse(extrema.values())): ex = [ data_source.quantities["Extrema"](f, non_zero=l) for f, l in zip(bin_fields, logs) diff --git a/yt/data_objects/selection_objects/spheroids.py b/yt/data_objects/selection_objects/spheroids.py index 8cfe91fd02e..728d698a823 100644 --- a/yt/data_objects/selection_objects/spheroids.py +++ b/yt/data_objects/selection_objects/spheroids.py @@ -139,7 +139,7 @@ class YTEllipsoid(YTSelectionContainer3D): e0 : array_like (automatically normalized) the direction of the largest semi-major axis of the ellipsoid tilt : float - After the rotation about the z-axis to allign e0 to x in the x-y + After the rotation about the z-axis to align e0 to x in the x-y plane, and then rotating about the y-axis to align e0 completely to the x-axis, tilt is the angle in radians remaining to rotate about the x-axis to align both e1 to the y-axis and e2 to diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 2f38a113a7e..e45825af401 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -264,7 +264,7 @@ def periodicity(self): def periodicity(self, val): # remove this setter to break backward compatibility issue_deprecation_warning( - "Dataset.periodicity should not be overriden manually. " + "Dataset.periodicity should not be overridden manually. " "In the future, this will become an error. " "Use `Dataset.force_periodicity` instead.", since="4.0.0", @@ -523,8 +523,6 @@ def has_key(self, key): @property def index(self): if self._instantiated_index is None: - if self._index_class is None: - raise RuntimeError("You should not instantiate Dataset.") self._instantiated_index = self._index_class( self, dataset_type=self.dataset_type ) @@ -731,7 +729,7 @@ def add_particle_union(self, union): def add_particle_filter(self, filter): """Add particle filter to the dataset. - Add ``filter`` to the dataset and set up relavent derived_field. + Add ``filter`` to the dataset and set up relevant derived_field. It will also add any ``filtered_type`` that the ``filter`` depends on. """ @@ -1338,7 +1336,7 @@ def _sanitize_units_override(cls, units_override): except KeyError: continue - # Now attempt to instanciate a unyt.unyt_quantity from val ... + # Now attempt to instantiate a unyt.unyt_quantity from val ... try: # ... directly (valid if val is a number, or a unyt_quantity) uo[key] = YTQuantity(val) @@ -1937,7 +1935,7 @@ def __init__( def validate_index_order(index_order): if index_order is None: - index_order = (7, 5) + index_order = (6, 2) elif not is_sequence(index_order): index_order = (int(index_order), 1) else: diff --git a/yt/data_objects/tests/test_profiles.py b/yt/data_objects/tests/test_profiles.py index 148e06d3626..45d028ae666 100644 --- a/yt/data_objects/tests/test_profiles.py +++ b/yt/data_objects/tests/test_profiles.py @@ -642,6 +642,21 @@ def test_unequal_bin_field_profile(self): ("gas", "mass"), ) + def test_set_linear_scaling_for_none_extrema(self): + # See Issue #3431 + # Ensures that extrema are calculated in the same way on subsequent passes + # through the PhasePlot machinery. + ds = fake_sph_orientation_ds() + p = yt.PhasePlot( + ds, + ("all", "particle_position_spherical_theta"), + ("all", "particle_position_spherical_radius"), + ("all", "particle_mass"), + weight_field=None, + ) + p.set_log(("all", "particle_position_spherical_theta"), False) + p.save() + def test_index_field_units(): # see #1849 diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index 8faca94f22f..4b36aa7f01e 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -33,14 +33,23 @@ "H_fraction", "He_fraction", "C_fraction", + "Ca_fraction", "N_fraction", "O_fraction", + "S_fraction", "Ne_fraction", "Mg_fraction", "Si_fraction", "Fe_fraction", + "H_density", + "He_density", "C_density", + "Ca_density", + "N_density", "O_density", + "S_density", + "Ne_density", + "Mg_density", "Si_density", "Fe_density", ) diff --git a/yt/fields/species_fields.py b/yt/fields/species_fields.py index 653de299665..7770422bd7f 100644 --- a/yt/fields/species_fields.py +++ b/yt/fields/species_fields.py @@ -263,7 +263,7 @@ def _default_nuclei_density(field, data): element = field.name[1][: field.name[1].find("_")] amu_cgs = data.ds.units.physical_constants.amu_cgs if element == "El": - # This is for determing the electron number density. + # This is for determining the electron number density. # If we got here, this assumes full ionization! muinv = 1.0 * _primordial_mass_fraction["H"] / ChemicalFormula("H").weight muinv += 2.0 * _primordial_mass_fraction["He"] / ChemicalFormula("He").weight diff --git a/yt/frontends/_skeleton/data_structures.py b/yt/frontends/_skeleton/data_structures.py index 3e7923910b0..b4148949cec 100644 --- a/yt/frontends/_skeleton/data_structures.py +++ b/yt/frontends/_skeleton/data_structures.py @@ -145,7 +145,7 @@ def _parse_parameter_file(self): # self.omega_matter <= float # self.hubble_constant <= float - # optional (the followin have default implementations) + # optional (the following have default implementations) # self.unique_identifier <= unique identifier for the dataset # being read (e.g., UUID or ST_CTIME) (int) # diff --git a/yt/frontends/adaptahop/tests/test_outputs.py b/yt/frontends/adaptahop/tests/test_outputs.py index 7beea642c2c..70c48458d0a 100644 --- a/yt/frontends/adaptahop/tests/test_outputs.py +++ b/yt/frontends/adaptahop/tests/test_outputs.py @@ -56,7 +56,7 @@ def test_get_halo(): halo = ds.halo(1, ptype="io") - # Check halo objet has position, velocity, mass and members attributes + # Check halo object has position, velocity, mass and members attributes for attr_name in ("mass", "position", "velocity", "member_ids"): getattr(halo, attr_name) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index e21528c947b..1c619b126a4 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -146,7 +146,7 @@ def __init__( parfiles=None, default_species_fields=None, ): - """Instanciate AMRVACDataset. + """Instantiate AMRVACDataset. Parameters ---------- @@ -157,7 +157,7 @@ def __init__( This should always be 'amrvac'. units_override : dict, optional - A dictionnary of physical normalisation factors to interpret on disk data. + A dictionary of physical normalisation factors to interpret on disk data. unit_system : str, optional Either "cgs" (default), "mks" or "code" diff --git a/yt/frontends/amrvac/tests/test_outputs.py b/yt/frontends/amrvac/tests/test_outputs.py index 0e7f89747d8..f309bfb0482 100644 --- a/yt/frontends/amrvac/tests/test_outputs.py +++ b/yt/frontends/amrvac/tests/test_outputs.py @@ -1,4 +1,4 @@ -import numpy as np # NOQA +import numpy as np import yt # NOQA from yt.frontends.amrvac.api import AMRVACDataset, AMRVACGrid diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index fede9637ec8..87c99b34049 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -292,7 +292,7 @@ def _parse_parameter_file(self): # lextra needs to be loaded as a string, but it's actually # array values. So pop it off here, and then re-insert. lextra = amr_header_vals.pop("lextra") - amr_header_vals["lextra"] = np.fromstring(lextra, ">f4") + amr_header_vals["lextra"] = np.frombuffer(lextra, ">f4") self.parameters.update(amr_header_vals) amr_header_vals = None # estimate the root level @@ -315,7 +315,7 @@ def _parse_parameter_file(self): # extras needs to be loaded as a string, but it's actually # array values. So pop it off here, and then re-insert. extras = particle_header_vals.pop("extras") - particle_header_vals["extras"] = np.fromstring(extras, ">f4") + particle_header_vals["extras"] = np.frombuffer(extras, ">f4") self.parameters["wspecies"] = wspecies[:n] self.parameters["lspecies"] = lspecies[:n] for specie in range(n): @@ -570,43 +570,41 @@ def _parse_parameter_file(self): boxsize = np.fromfile(fh, count=1, dtype=">f4") n = nspecs[0] particle_header_vals = {} - tmp = np.array( - [ - headerstr, - aexpn, - aexp0, - amplt, - astep, - istep, - partw, - tintg, - ekin, - ekin1, - ekin2, - au0, - aeu0, - nrowc, - ngridc, - nspecs, - nseed, - Om0, - Oml0, - hubble, - Wp5, - Ocurv, - wspecies, - lspecies, - extras, - boxsize, - ] - ) - for i in range(len(tmp)): + tmp = [ + headerstr, + aexpn, + aexp0, + amplt, + astep, + istep, + partw, + tintg, + ekin, + ekin1, + ekin2, + au0, + aeu0, + nrowc, + ngridc, + nspecs, + nseed, + Om0, + Oml0, + hubble, + Wp5, + Ocurv, + wspecies, + lspecies, + extras, + boxsize, + ] + for i, arr in enumerate(tmp): a1 = dmparticle_header_struct[0][i] a2 = dmparticle_header_struct[1][i] if a2 == 1: - particle_header_vals[a1] = tmp[i][0] + particle_header_vals[a1] = arr[0] else: - particle_header_vals[a1] = tmp[i][:a2] + particle_header_vals[a1] = arr[:a2] for specie in range(n): self.particle_types.append("specie%i" % specie) self.particle_types_raw = tuple(self.particle_types) diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index e74322a2308..169191d093d 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -533,7 +533,7 @@ def _read_child_level( # idc = np.argsort(arr['idc']) #correct fortran indices # translate idc into icell, and then to iOct icell = (arr["idc"] >> 3) << 3 - iocts = (icell - ncell0) / nchild # without a F correction, theres a +1 + iocts = (icell - ncell0) / nchild # without a F correction, there's a +1 # assert that the children are read in the same order as the octs assert np.all(octs == iocts[::nchild]) else: diff --git a/yt/frontends/art/tests/test_outputs.py b/yt/frontends/art/tests/test_outputs.py index 4e2dda04d8a..97f9fc2fb49 100644 --- a/yt/frontends/art/tests/test_outputs.py +++ b/yt/frontends/art/tests/test_outputs.py @@ -57,7 +57,7 @@ def test_d9p_global_values(): assert_equal(ad[("stars", "particle_type")].size, AnaNStars) assert_equal(ad[("specie4", "particle_type")].size, AnaNStars) - # The *real* asnwer is 2833405, but yt misses one particle since it lives + # The *real* answer is 2833405, but yt misses one particle since it lives # on a domain boundary. See issue 814. When that is fixed, this test # will need to be updated AnaNDM = 2833404 diff --git a/yt/frontends/artio/_artio_caller.pyx b/yt/frontends/artio/_artio_caller.pyx index 4aced6b58ae..066e083a4c2 100644 --- a/yt/frontends/artio/_artio_caller.pyx +++ b/yt/frontends/artio/_artio_caller.pyx @@ -294,7 +294,6 @@ cdef class artio_fileset : if self.handle : artio_fileset_close(self.handle) def read_parameters(self) : - from sys import version cdef char key[64] cdef int type cdef int length @@ -317,9 +316,8 @@ cdef class artio_fileset : for i in range(length) : free(char_values[i]) free(char_values) - if version[0] == '3': - for i in range(0,len(parameter)): - parameter[i] = parameter[i].decode('utf-8') + for i in range(len(parameter)): + parameter[i] = parameter[i].decode('utf-8') elif type == ARTIO_TYPE_INT : int_values = malloc(length*sizeof(int32_t)) artio_parameter_get_int_array( self.handle, key, length, int_values ) @@ -343,10 +341,7 @@ cdef class artio_fileset : else : raise RuntimeError("ARTIO file corruption detected: invalid type!") - if version[0] == '3': - self.parameters[key.decode('utf-8')] = parameter - else: - self.parameters[key] = parameter + self.parameters[key.decode('utf-8')] = parameter def abox_from_auni(self, np.float64_t a): if self.cosmology: diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 3849027489a..60e8c1858d1 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -349,7 +349,6 @@ def __init__( unit_system="cgs", default_species_fields=None, ): - from sys import version if self._handle is not None: return @@ -357,10 +356,7 @@ def __init__( self.fluid_types += ("artio",) self._filename = filename self._fileset_prefix = filename[:-4] - if version < "3": - self._handle = artio_fileset(self._fileset_prefix) - else: - self._handle = artio_fileset(bytes(self._fileset_prefix, "utf-8")) + self._handle = artio_fileset(bytes(self._fileset_prefix, "utf-8")) self.artio_parameters = self._handle.parameters # Here we want to initiate a traceback, if the reader is not built. Dataset.__init__( diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 8f6b8ce54f6..7c7bd57d525 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1,7 +1,6 @@ import glob import os import re -import warnings from collections import namedtuple from stat import ST_CTIME @@ -141,10 +140,9 @@ def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None): try: self.real_type = known_real_types[particle_real_type] except KeyError: - warnings.warn( - f"yt did not recognize particle real type {particle_real_type} " - "assuming double", - category=RuntimeWarning, + mylog.warning( + "yt did not recognize particle real type '%s'. Assuming 'double'.", + particle_real_type, ) self.real_type = known_real_types["double"] diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index aa342f86c9f..59c1a23b506 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -149,7 +149,7 @@ def _count_grids(self): elif "boxes" in d: self.num_grids += d["boxes"].len() else: - raise RuntimeError("Uknown file specification") + raise RuntimeError("Unknown file specification") def _parse_index(self): f = self._handle # shortcut diff --git a/yt/frontends/enzo_e/misc.py b/yt/frontends/enzo_e/misc.py index 5380b84bf58..283b3261ebf 100644 --- a/yt/frontends/enzo_e/misc.py +++ b/yt/frontends/enzo_e/misc.py @@ -7,7 +7,7 @@ def bdecode(block): Decode a block descriptor to get its left and right sides and level. A block string consisting of (0, 1), with optionally one colon. The - number of digits after the colon is the refinemenet level. The combined + number of digits after the colon is the refinement level. The combined digits denote the binary representation of the left edge. """ diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 18a15634a17..f19eeb79f39 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -87,7 +87,7 @@ def gadget_format(self): # Read the first 4 bytes assuming little endian int32 with self.open() as f: (rhead,) = struct.unpack("= LooseVersion("3.3.0"): + if parse_version(matplotlib.__version__) >= parse_version("3.3.0"): style_name["mathtext.fallback"] = "cm" else: style_name["mathtext.fallback_to_cm"] = True @@ -1288,7 +1288,7 @@ def levenshtein_distance(seq1, seq2, max_dist=None): Returns ------- - The Levensthein distance as an integer. + The Levenshtein distance as an integer. Notes ----- diff --git a/yt/geometry/_selection_routines/selector_object.pxi b/yt/geometry/_selection_routines/selector_object.pxi index 630573f031a..4f5235708ee 100644 --- a/yt/geometry/_selection_routines/selector_object.pxi +++ b/yt/geometry/_selection_routines/selector_object.pxi @@ -594,7 +594,7 @@ cdef class SelectorObject: return state_tuple def __getnewargs__(self): - # __setstate__ will always call __cinit__, this pickle hoook returns arguments + # __setstate__ will always call __cinit__, this pickle hook returns arguments # to __cinit__. We will give it None so we dont error then set attributes in # __setstate__ Note that we could avoid this by making dobj an optional argument # to __cinit__ diff --git a/yt/geometry/_selection_routines/sphere_selector.pxi b/yt/geometry/_selection_routines/sphere_selector.pxi index 2b777b04acf..1f8496ccd82 100644 --- a/yt/geometry/_selection_routines/sphere_selector.pxi +++ b/yt/geometry/_selection_routines/sphere_selector.pxi @@ -35,7 +35,7 @@ cdef class SphereSelector(SelectorObject): pos[2] - 0.5*dds[2] <= self.center[2] <= pos[2]+0.5*dds[2]): return 1 return self.select_point(pos) - # # langmm: added to allow sphere to interesect edge/corner of cell + # # langmm: added to allow sphere to intersect edge/corner of cell # cdef np.float64_t LE[3] # cdef np.float64_t RE[3] # cdef int i diff --git a/yt/geometry/oct_geometry_handler.py b/yt/geometry/oct_geometry_handler.py index 57107ba538d..077e0a12ed4 100644 --- a/yt/geometry/oct_geometry_handler.py +++ b/yt/geometry/oct_geometry_handler.py @@ -69,7 +69,7 @@ def _cell_index(field, data): remaining[remaining] = np.isnan(tmp[:Nremaining]) Nremaining = remaining.sum() - return data.ds.arr(ret.astype(np.float64), input_units="1") + return data.ds.arr(ret.astype(np.float64), units="1") def _mesh_sampling_particle_field(field, data): """ @@ -99,7 +99,7 @@ def _mesh_sampling_particle_field(field, data): ret[mask] = cell_data[icell[mask]] - return data.ds.arr(ret, input_units=cell_data.units) + return data.ds.arr(ret, units=cell_data.units) if (ptype, "cell_index") not in self.ds.derived_field_list: self.ds.add_field( diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index c44ce110073..0b64a728d0c 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -136,7 +136,7 @@ cdef class IndexOcts(OctVisitor): self.oct_index[o.domain_ind] = self.index self.index += 1 -# Compute a mapping from domain_ind to flattend index with some octs masked. +# Compute a mapping from domain_ind to flattened index with some octs masked. cdef class MaskedIndexOcts(OctVisitor): @cython.boundscheck(False) @cython.initializedcheck(False) diff --git a/yt/geometry/particle_deposit.pyx b/yt/geometry/particle_deposit.pyx index 0987df728e9..1897f6dad84 100644 --- a/yt/geometry/particle_deposit.pyx +++ b/yt/geometry/particle_deposit.pyx @@ -14,8 +14,6 @@ cimport numpy as np import numpy as np cimport cython -from cpython cimport PyObject -from cpython.array cimport array, clone from cython.view cimport memoryview as cymemview from libc.math cimport sqrt from libc.stdlib cimport free, malloc diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 85520b38e0a..5f9be09a59f 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -145,7 +145,7 @@ def _initialize_index(self): dont_cache = False # If we have applied a bounding box then we can't cache the - # ParticleBitmap because it is doman dependent + # ParticleBitmap because it is domain dependent if getattr(ds, "_domain_override", False): dont_cache = True @@ -163,12 +163,16 @@ def _initialize_index(self): ) # Load Morton index from file if provided - if getattr(ds, "index_filename", None) is None: - fname = ds.parameter_filename + ".index{}_{}.ewah".format( - self.regions.index_order1, self.regions.index_order2 - ) - else: - fname = ds.index_filename + def _current_fname(): + if getattr(ds, "index_filename", None) is None: + fname = ds.parameter_filename + ".index{}_{}.ewah".format( + self.regions.index_order1, self.regions.index_order2 + ) + else: + fname = ds.index_filename + return fname + + fname = _current_fname() dont_load = dont_cache and not hasattr(ds, "index_filename") try: @@ -183,6 +187,8 @@ def _initialize_index(self): self.regions.reset_bitmasks() self._initialize_coarse_index() self._initialize_refined_index() + # We now update fname since index_order2 may have changed + fname = _current_fname() wdir = os.path.dirname(fname) if not dont_cache and os.access(wdir, os.W_OK): # Sometimes os mis-reports whether a directory is writable, @@ -195,6 +201,7 @@ def _initialize_index(self): def _initialize_coarse_index(self): pb = get_pbar("Initializing coarse index ", len(self.data_files)) + max_hsml = 0.0 for i, data_file in enumerate(self.data_files): pb.update(i + 1) for ptype, pos in self.io._yield_coordinates(data_file): @@ -203,12 +210,28 @@ def _initialize_coarse_index(self): hsml = self.io._get_smoothing_length( data_file, pos.dtype, pos.shape ) + if hsml is not None and hsml.size > 0.0: + max_hsml = max(max_hsml, hsml.max()) else: hsml = None self.regions._coarse_index_data_file(pos, hsml, data_file.file_id) self.regions._set_coarse_index_data_file(data_file.file_id) pb.finish() self.regions.find_collisions_coarse() + if max_hsml > 0.0 and len(self.data_files) > 1: + # By passing this in, we only allow index_order2 to be increased by + # two at most, never increased. One place this becomes particularly + # useful is in the case of an extremely small section of gas + # particles embedded in a much much larger domain. The max + # smoothing length will be quite small, so based on the larger + # domain, it will correspond to a very very high index order, which + # is a large amount of memory! Having multiple indexes, one for + # each particle type, would fix this. + new_order2 = self.regions.update_mi2(max_hsml, ds.index_order[1] + 2) + mylog.info( + "Updating index_order2 from %s to %s", ds.index_order[1], new_order2 + ) + self.ds.index_order = (self.ds.index_order[0], new_order2) def _initialize_refined_index(self): mask = self.regions.masks.sum(axis=1).astype("uint8") diff --git a/yt/geometry/particle_oct_container.pyx b/yt/geometry/particle_oct_container.pyx index 7b75f6b0fa1..e774225715a 100644 --- a/yt/geometry/particle_oct_container.pyx +++ b/yt/geometry/particle_oct_container.pyx @@ -1,7 +1,6 @@ # distutils: language = c++ # distutils: extra_compile_args = CPP14_FLAG -# distutils: include_dirs = LIB_DIR -# distutils: libraries = EWAH_LIBS +# distutils: libraries = STD_LIBS """ Oct container tuned for Particles @@ -11,6 +10,12 @@ Oct container tuned for Particles """ +from libc.math cimport ceil, floor, fmod, log2 +from libc.stdlib cimport free, malloc, qsort +from libc.string cimport memset +from libcpp.map cimport map as cmap +from libcpp.vector cimport vector + from ewah_bool_utils.ewah_bool_array cimport ( bool_array, ewah_bool_array, @@ -18,12 +23,6 @@ from ewah_bool_utils.ewah_bool_array cimport ( ewah_map, ewah_word_type, ) -from ewah_bool_utils.ewah_bool_wrap cimport BoolArrayCollection -from libc.math cimport ceil, floor, fmod -from libc.stdlib cimport free, malloc, qsort -from libc.string cimport memset -from libcpp.map cimport map as cmap -from libcpp.vector cimport vector import numpy as np @@ -64,10 +63,13 @@ from yt.funcs import get_pbar from particle_deposit cimport gind +#from ewah_bool_utils.ewah_bool_wrap cimport \ +from ewah_bool_utils.ewah_bool_wrap cimport BoolArrayCollection + import os import struct -# If set to 1, ghost cells are added at the refined level reguardless of if the +# If set to 1, ghost cells are added at the refined level regardless of if the # coarse cell containing it is refined in the selector. # If set to 0, ghost cells are only added at the refined level of the coarse # index for the ghost cell is refined in the selector. @@ -89,7 +91,7 @@ cdef class ParticleOctreeContainer(OctreeContainer): #The starting oct index of each domain cdef np.int64_t *dom_offsets cdef public int max_level - #How many particles do we keep befor refining + #How many particles do we keep before refining cdef public int n_ref def allocate_root(self): @@ -457,6 +459,7 @@ cdef class ParticleBitmap: cdef np.uint64_t file_marker_i cdef public FileBitmasks bitmasks cdef public BoolArrayCollection collisions + cdef public int _used_mi2 def __init__(self, left_edge, right_edge, periodicity, file_hash, nfiles, index_order1, index_order2): @@ -479,14 +482,10 @@ cdef class ParticleBitmap: self.dds[i] = (right_edge[i] - left_edge[i])/self.dims[i] self.idds[i] = 1.0/self.dds[i] self.dds_mi1[i] = (right_edge[i] - left_edge[i]) / (1< 0: + return self.index_order2 + cdef np.uint64_t index_order2 = 2 + for i in range(3): + # Note we're casting to signed here, to avoid negative issues. + if self.dds_mi1[i] < characteristic_size: continue + index_order2 = max(index_order2, ceil(log2(self.dds_mi1[i] / characteristic_size))) + index_order2 = i64min(max_index_order2, index_order2) + self._update_mi2(index_order2) + return self.index_order2 + + cdef void _update_mi2(self, np.uint64_t index_order2): + self.index_order2 = index_order2 + mi2_max = (1 << self.index_order2) - 1 + self.directional_max2[0] = encode_morton_64bit(mi2_max, 0, 0) + self.directional_max2[1] = encode_morton_64bit(0, mi2_max, 0) + self.directional_max2[2] = encode_morton_64bit(0, 0, mi2_max) + for i in range(3): + self.dds_mi2[i] = self.dds_mi1[i] / (1< if you remain firm in your conviction to continue." - ) - print() - print() - print("Okay, sorry about that. How about a nice, pithy ( < 12 words )") - print("summary of the bug? (e.g. 'Particle overlay problem with parallel ") - print("projections')") - print() - try: - current_version = get_yt_version() - except Exception: - current_version = "Unavailable" - summary = input("Summary? ") - bugtype = "bug" - data = dict(title=summary, type=bugtype) - print() - print("Okay, now let's get a bit more information.") - print() - print("Remember that if you want to submit a traceback, you can run") - print("any script with --paste or --detailed-paste to submit it to") - print("the pastebin and then include the link in this bugreport.") - if "EDITOR" in os.environ: - print() - print(f"Press enter to spawn your editor, {os.environ['EDITOR']}") - input() - tf = tempfile.NamedTemporaryFile(delete=False) - fn = tf.name - tf.close() - subprocess.call(f"$EDITOR {fn}", shell=True) - content = open(fn).read() - try: - os.unlink(fn) - except Exception: - pass - else: - print() - print("Couldn't find an $EDITOR variable. So, let's just take") - print("take input here. Type up your summary until you're ready") - print("to be done, and to signal you're done, type --- by itself") - print("on a line to signal your completion.") - print() - print("(okay, type now)") - print() - lines = [] - while True: - line = input() - if line.strip() == "---": - break - lines.append(line) - content = "\n".join(lines) - content = f"Reporting Version: {current_version}\n\n{content}" - endpoint = "repositories/yt_analysis/yt/issues" - data["content"] = content - print() - print("===============================================================") - print() - print("Okay, we're going to submit with this:") - print() - print(f"Summary: {data['title']}") - print() - print("---") - print(content) - print("---") - print() - print("===============================================================") - print() - print("Is that okay? If not, hit ctrl-c. Otherwise, enter means") - print("'submit'. Next we'll ask for your Bitbucket Username.") - print("If you don't have one, run the 'yt bootstrap_dev' command.") - print() - input() - retval = bb_apicall(endpoint, data, use_pass=True) - import json - - retval = json.loads(retval) - url = f"http://bitbucket.org/yt_analysis/yt/issue/{retval['local_id']}" - print() - print("===============================================================") - print() - print("Thanks for your bug report! Together we'll make yt totally bug free!") - print("You can view bug report here:") - print(f" {url}") - print() - print("Keep in touch!") - print() - - -class YTHubRegisterCmd(YTCommand): - subparser = "hub" - name = "register" - description = """ - Register a user on the yt Hub: http://hub.yt/ - """ - - def __call__(self, args): - from yt.utilities.on_demand_imports import _requests as requests - - hub_api_key, config_file = ytcfg.get( - "yt", - "hub_api_key", - callback=lambda leaf: (leaf.value, leaf.extra_data.get("source", None)), - ) - if hub_api_key: - print( - "You seem to already have an API key for the hub in " - f"{config_file} . Delete this if you want to force a " - "new user registration." - ) - sys.exit() - print("Awesome! Let's start by registering a new user for you.") - print("Here's the URL, for reference: http://hub.yt/ ") - print() - print("As always, bail out with Ctrl-C at any time.") - print() - print("What username would you like to go by?") - print() - username = input("Username? ") - if len(username) == 0: - sys.exit(1) - print() - print("To start out, what's your name?") - print() - first_name = input("First Name? ") - if len(first_name) == 0: - sys.exit(1) - print() - last_name = input("Last Name? ") - if len(last_name) == 0: - sys.exit(1) - print() - print("And your email address?") - print() - email = input("Email? ") - if len(email) == 0: - sys.exit(1) - print() - print("Please choose a password:") - print() - while True: - password1 = getpass.getpass("Password? ") - password2 = getpass.getpass("Confirm? ") - if len(password1) == 0: - continue - if password1 == password2: - break - print("Sorry, they didn't match! Let's try again.") - print() - print() - print("Okay, press enter to register. You should receive a welcome") - print(f"message at {email} when this is complete.") - print() - input() - - data = dict( - firstName=first_name, - email=email, - login=username, - password=password1, - lastName=last_name, - admin=False, - ) - hub_url = ytcfg.get("yt", "hub_url") - req = requests.post(hub_url + "/user", data=data) - - if req.ok: - headers = {"Girder-Token": req.json()["authToken"]["token"]} - else: - if req.status_code == 400: - print("Registration failed with 'Bad request':") - print(req.json()["message"]) - exit(1) - print("User registration successful") - print("Obtaining API key...") - req = requests.post( - hub_url + "/api_key", - headers=headers, - data={"name": "ytcmd", "active": True}, - ) - apiKey = req.json()["key"] - - print("Storing API key in configuration file") - set_config("yt", "hub_api_key", apiKey, YTConfig.get_global_config_file()) - - print() - print("SUCCESS!") - print() - - class YTInstInfoCmd(YTCommand): name = ["instinfo", "version"] args = ( @@ -1162,69 +914,6 @@ def __call__(self, args): lo.main(None, download=args.number) -class YTHubStartNotebook(YTCommand): - args = ( - dict( - dest="folderId", - default=ytcfg.get("yt", "hub_sandbox"), - nargs="?", - help="(Optional) Hub folder to mount inside the Notebook", - ), - ) - description = """ - Start the Jupyter Notebook on the yt Hub. - """ - subparser = "hub" - name = "start" - - def __call__(self, args): - gc = _get_girder_client() - - # TODO: should happen server-side - _id = gc._checkResourcePath(args.folderId) - - resp = gc.post(f"/notebook/{_id}") - try: - print("Launched! Please visit this URL:") - print(" https://tmpnb.hub.yt" + resp["url"]) - print() - except (KeyError, TypeError): - print("Something went wrong. The yt Hub responded with : ") - print(resp) - - -class YTNotebookUploadCmd(YTCommand): - args = (dict(short="file", type=str),) - description = """ - Upload an IPython Notebook to the yt Hub. - """ - - name = "upload_notebook" - - def __call__(self, args): - gc = _get_girder_client() - username = gc.get("/user/me")["login"] - gc.upload(args.file, f"/user/{username}/Public") - - _id = gc.resourceLookup(f"/user/{username}/Public/{args.file}")["_id"] - _fid = next(gc.listFile(_id))["_id"] - hub_url = urlparse(ytcfg.get("yt", "hub_url")) - print("Upload successful!") - print() - print("To access your raw notebook go here:") - print() - print(f" {hub_url.scheme}://{hub_url.netloc}/#item/{_id}") - print() - print("To view your notebook go here:") - print() - print( - " http://nbviewer.jupyter.org/urls/{}/file/{}/download".format( - hub_url.netloc + hub_url.path, _fid - ) - ) - print() - - class YTPlotCmd(YTCommand): args = ( "width", @@ -1377,7 +1066,7 @@ class YTNotebookCmd(YTCommand): action="store", default=None, dest="profile", - help="The IPython profile to use when lauching the kernel.", + help="The IPython profile to use when launching the kernel.", ), dict( short="-n", @@ -1477,30 +1166,26 @@ def __call__(self, args): ds = args.ds ds.print_stats() vals = {} - if args.field in ds.derived_field_list: - if args.max: - vals["min"] = ds.find_max(args.field) - print( - f"Maximum {args.field}: {vals['min'][0]:0.5e} at {vals['min'][1]}" - ) - if args.min: - vals["max"] = ds.find_min(args.field) - print( - f"Minimum {args.field}: {vals['max'][0]:0.5e} at {vals['max'][1]}" - ) + field = ds._get_field_info(args.field) + if args.max: + vals["max"] = ds.find_max(field) + print(f"Maximum {field.name}: {vals['max'][0]:0.5e} at {vals['max'][1]}") + if args.min: + vals["min"] = ds.find_min(field) + print(f"Minimum {field.name}: {vals['min'][0]:0.5e} at {vals['min'][1]}") if args.output is not None: - t = ds.current_time * ds["years"] + t = ds.current_time.to("yr") with open(args.output, "a") as f: - f.write(f"{ds} ({t:0.5e} years)\n") + f.write(f"{ds} ({t:0.5e})\n") if "min" in vals: f.write( "Minimum %s is %0.5e at %s\n" - % (args.field, vals["min"][0], vals["min"][1]) + % (field.name, vals["min"][0], vals["min"][1]) ) if "max" in vals: f.write( "Maximum %s is %0.5e at %s\n" - % (args.field, vals["max"][0], vals["max"][1]) + % (field.name, vals["max"][0], vals["max"][1]) ) @@ -1681,7 +1366,7 @@ def load_config(self, args): dict( short="--local", action="store_true", - help="Store the configuration in the global configuration file.", + help="Store the configuration in the local configuration file.", ), dict( short="--global", diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 06a50f6639f..dc88bc311e3 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -296,7 +296,7 @@ def __str__(self): class YTFieldUnitParseError(YTException): def __init__(self, field_info): - self.msg = "The field '%s' has unparseable units '%s'." + self.msg = "The field '%s' has unparsable units '%s'." self.msg = self.msg % (field_info.name, field_info.units) def __str__(self): @@ -896,7 +896,7 @@ def __str__(self): msg += "appropriate for your python environment, e.g.:\n" msg += f" conda install {self.module}\n" msg += "or:\n" - msg += f" pip install {self.module}\n" + msg += f" python -m pip install {self.module}\n" return msg diff --git a/yt/utilities/fortran_utils.py b/yt/utilities/fortran_utils.py index c242be7c33c..32956767e87 100644 --- a/yt/utilities/fortran_utils.py +++ b/yt/utilities/fortran_utils.py @@ -1,19 +1,9 @@ +import io import os import struct import numpy as np -# This may not be the correct way to do this. We should investigate what NumPy -# does. -try: - file -except NameError: - # What we're doing here is making it always fail, so we read things in and - # THEN call numpy's fromstring. I can't figure out an easy way of telling if - # an object is an actual file, reliably. - class file: - pass - def read_attrs(f, attrs, endian="="): r"""This function accepts a file pointer and reads from that file pointer @@ -70,7 +60,7 @@ def read_attrs(f, attrs, endian="="): if s1 != s2: size = struct.calcsize(endian + "I" + "".join(n * [t]) + "I") raise OSError( - "An error occured while reading a Fortran record. " + "An error occurred while reading a Fortran record. " "Got a different size at the beginning and at the " "end of the record: %s %s", s1, @@ -81,7 +71,7 @@ def read_attrs(f, attrs, endian="="): if isinstance(a, tuple): if len(a) != len(v): raise OSError( - "An error occured while reading a Fortran " + "An error occurred while reading a Fortran " "record. Record length is not equal to expected " "length: %s %s", len(a), @@ -148,7 +138,7 @@ def read_cattrs(f, attrs, endian="="): if isinstance(a, tuple): if len(a) != len(v): raise OSError( - "An error occured while reading a Fortran " + "An error occurred while reading a Fortran " "record. Record length is not equal to expected " "length: %s %s", len(a), @@ -193,20 +183,20 @@ def read_vector(f, d, endian="="): vec_size = struct.calcsize(vec_fmt) if vec_len % vec_size != 0: raise OSError( - "An error occured while reading a Fortran record. " + "An error occurred while reading a Fortran record. " "Vector length is not compatible with data type: %s %s", vec_len, vec_size, ) vec_num = int(vec_len / vec_size) - if isinstance(f, file): # Needs to be explicitly a file - tr = np.fromfile(f, vec_fmt, count=vec_num) + if isinstance(f, io.IOBase): + tr = np.frombuffer(f.read(vec_len), vec_fmt, count=vec_num) else: - tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num) + tr = np.frombuffer(f, vec_fmt, count=vec_num) vec_len2 = struct.unpack(pad_fmt, f.read(pad_size))[0] if vec_len != vec_len2: raise OSError( - "An error occured while reading a Fortran record. " + "An error occurred while reading a Fortran record. " "Got a different size at the beginning and at the " "end of the record: %s %s", vec_len, @@ -249,7 +239,7 @@ def skip(f, n=1, endian="="): s2 = struct.unpack(fmt, size)[0] if s1 != s2: raise OSError( - "An error occured while reading a Fortran record. " + "An error occurred while reading a Fortran record. " "Got a different size at the beginning and at the " "end of the record: %s %s", s1, @@ -324,7 +314,7 @@ def read_record(f, rspec, endian="="): s1, s2 = vals.pop(0), vals.pop(-1) if s1 != s2: raise OSError( - "An error occured while reading a Fortran record. Got " + "An error occurred while reading a Fortran record. Got " "a different size at the beginning and at the end of " "the record: %s %s", s1, diff --git a/yt/utilities/grid_data_format/conversion/conversion_athena.py b/yt/utilities/grid_data_format/conversion/conversion_athena.py index 780d26c0198..84dfa6f3d12 100644 --- a/yt/utilities/grid_data_format/conversion/conversion_athena.py +++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py @@ -284,7 +284,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): self.write_gdf_field(gdf_name, i, field + "_z", data_z) del data, data_x, data_y, data_z del line - line = f.readline() # NOQA + line = f.readline() f.close() del f diff --git a/yt/utilities/grid_data_format/docs/gdf_specification.txt b/yt/utilities/grid_data_format/docs/gdf_specification.txt index ccfa02b6997..67921a1e5a2 100644 --- a/yt/utilities/grid_data_format/docs/gdf_specification.txt +++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt @@ -271,7 +271,7 @@ yt will also provide a writer for this data, which will operate on any existing data format. Provided that a simulation code can read this data, this will enable cross-platform comparison. Furthermore, any external piece of software (i.e., Stranger) that implements reading this format will be able to read any -format of data tha yt understands. +format of data that yt understands. Example File ------------ diff --git a/yt/utilities/lib/cykdtree/c_kdtree.hpp b/yt/utilities/lib/cykdtree/c_kdtree.hpp index 724a1ca352f..f6d3b7732e1 100644 --- a/yt/utilities/lib/cykdtree/c_kdtree.hpp +++ b/yt/utilities/lib/cykdtree/c_kdtree.hpp @@ -67,7 +67,7 @@ class Node { less = NULL; greater = NULL; } - // emtpy node with some info + // empty node with some info Node(uint32_t ndim0, double *le, double *re, bool *ple, bool *pre) { is_empty = true; is_leaf = false; @@ -153,7 +153,7 @@ class Node { } } Node(std::istream &is) { - // Note that Node instances intialized via this method do not have + // Note that Node instances initialized via this method do not have // any neighbor information. We will build neighbor information later // by walking the tree bool check_bit = deserialize_scalar(is); diff --git a/yt/utilities/lib/cykdtree/kdtree.pyx b/yt/utilities/lib/cykdtree/kdtree.pyx index 566c6298252..2c6fbf9896f 100644 --- a/yt/utilities/lib/cykdtree/kdtree.pyx +++ b/yt/utilities/lib/cykdtree/kdtree.pyx @@ -60,7 +60,7 @@ cdef class PyNode: range(node.right_neighbors[i].size())] def __cinit__(self): - # Initialize everthing to NULL/0/None to prevent seg fault + # Initialize everything to NULL/0/None to prevent seg fault self._node = NULL self.id = 0 self.npts = 0 @@ -169,7 +169,7 @@ cdef class PyKDTree: Defaults to False. Raises: - ValueError: If `leafsize < 2`. This currectly segfaults. + ValueError: If `leafsize < 2`. This currently segfaults. Attributes: npts (uint64): Number of points in the tree. @@ -201,7 +201,7 @@ cdef class PyKDTree: self._idx[i] = tree.all_idx[i] def __cinit__(self): - # Initialize everthing to NULL/0/None to prevent seg fault + # Initialize everything to NULL/0/None to prevent seg fault self._tree = NULL self.npts = 0 self.ndim = 0 @@ -286,7 +286,7 @@ cdef class PyKDTree: can be in any order. Defaults to True. Raises: - AssertionError: If there are missmatches between any of the two + AssertionError: If there are mismatches between any of the two trees' parameters. """ @@ -388,7 +388,7 @@ cdef class PyKDTree: np.ndarray of uint32: Leaves containing/neighboring `pos`. Raises: - ValueError: If pos is not contained withing the KDTree. + ValueError: If pos is not contained within the KDTree. """ return self._get_neighbor_ids(pos) @@ -419,7 +419,7 @@ cdef class PyKDTree: :class:`cykdtree.PyNode`: Leaf containing `pos`. Raises: - ValueError: If pos is not contained withing the KDTree. + ValueError: If pos is not contained within the KDTree. """ return self._get(pos) diff --git a/yt/utilities/lib/cykdtree/plot.py b/yt/utilities/lib/cykdtree/plot.py index 52b8d8933e9..414b2a8f388 100644 --- a/yt/utilities/lib/cykdtree/plot.py +++ b/yt/utilities/lib/cykdtree/plot.py @@ -125,16 +125,22 @@ def _plot2D_root( def plot2D_serial(tree, pts=None, label_boxes=False, **kwargs): r"""Plot a 2D kd-tree constructed in serial. - Args: - tree (:class:`cykdtree.kdtree.PyKDTree`): kd-tree class. - pts (np.ndarray, optional): Points contained by the kdtree. Defaults to - None if not provided and points are not plotted. - label_boxes (bool, optional): If True, leaves in the tree are labeled - with their index. Defaults to False. - Additional keywords are passed to :func:`cykdtree.plot._plot2D_root`. + Parameters + ---------- - Returns: - :obj:`matplotlib.pyplot.Axes`: Axes containing the plot. + tree: :class:`cykdtree.kdtree.PyKDTree` + kd-tree class. + pts: np.ndarray, optional + Points contained by the kdtree. + label_boxes: bool + If True, leaves in the tree are labeled with their index. Defaults to False. + + Additional keywords are passed to :func:`cykdtree.plot._plot2D_root`. + + Returns + ------- + + :obj:`matplotlib.pyplot.Axes`: Axes containing the plot. """ # Box edges diff --git a/yt/utilities/lib/cykdtree/tests/__init__.py b/yt/utilities/lib/cykdtree/tests/__init__.py index b06eacb3fac..e5f2a222973 100644 --- a/yt/utilities/lib/cykdtree/tests/__init__.py +++ b/yt/utilities/lib/cykdtree/tests/__init__.py @@ -198,7 +198,7 @@ def run_test( suppress_final_output=False, **kwargs, ): - r"""Run a rountine with a designated number of points & dimensions on a + r"""Run a routine with a designated number of points & dimensions on a selected number of processors. Args: diff --git a/yt/utilities/lib/cykdtree/tests/scaling.py b/yt/utilities/lib/cykdtree/tests/scaling.py index 1cec264298d..9b19a922e81 100644 --- a/yt/utilities/lib/cykdtree/tests/scaling.py +++ b/yt/utilities/lib/cykdtree/tests/scaling.py @@ -60,7 +60,7 @@ def stats_run( def time_run( npart, nproc, ndim, nrep=1, periodic=False, leafsize=10, suppress_final_output=False ): - r"""Get runing times using :package:`time`. + r"""Get running times using :package:`time`. Args: npart (int): Number of particles. diff --git a/yt/utilities/lib/cykdtree/utils.pyx b/yt/utilities/lib/cykdtree/utils.pyx index 1bd345a5be1..a2c64205457 100644 --- a/yt/utilities/lib/cykdtree/utils.pyx +++ b/yt/utilities/lib/cykdtree/utils.pyx @@ -269,7 +269,7 @@ def py_partition_given_pivot(np.ndarray[np.float64_t, ndim=2] pos, def py_select(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d, np.int64_t t): - r"""Get the indices required to partition coordiantes such that the first + r"""Get the indices required to partition coordinates such that the first t elements in pos[:,d] are the smallest t elements in pos[:,d]. Args: diff --git a/yt/utilities/lib/geometry_utils.pxd b/yt/utilities/lib/geometry_utils.pxd index 577dcd20124..3165bb43149 100644 --- a/yt/utilities/lib/geometry_utils.pxd +++ b/yt/utilities/lib/geometry_utils.pxd @@ -284,7 +284,7 @@ cdef inline np.uint64_t bounded_morton_relative(np.float64_t x, np.float64_t y, return mi2 -# This dosn't seem to be much, if at all, faster... +# This doesn't seem to be much, if at all, faster... @cython.cdivision(True) cdef inline np.uint64_t bounded_morton_dds(np.float64_t x, np.float64_t y, np.float64_t z, np.float64_t *DLE, np.float64_t *dds): diff --git a/yt/utilities/lib/geometry_utils.pyx b/yt/utilities/lib/geometry_utils.pyx index bc640b62f23..da21c59564b 100644 --- a/yt/utilities/lib/geometry_utils.pyx +++ b/yt/utilities/lib/geometry_utils.pyx @@ -820,7 +820,7 @@ def morton_qsort_iterative(np.ndarray[floating, ndim=2] pos, np.ndarray[np.uint64_t, ndim=1] ind, use_loop = False): # http://www.geeksforgeeks.org/iterative-quick-sort/ - # Auxillary stack + # Auxiliary stack cdef np.ndarray[np.int64_t, ndim=1] stack = np.zeros(h-l+1, dtype=np.int64) cdef np.int64_t top = -1 cdef np.int64_t p @@ -1019,7 +1019,7 @@ def knn_direct(np.ndarray[np.float64_t, ndim=2] P, np.uint64_t k, np.uint64_t i, P (np.ndarray): (N,d) array of points to search sorted by Morton order. k (int): number of nearest neighbors to find. i (int): index of point that nearest neighbors should be found for. - idx (np.ndarray): indicies of points from P to be considered. + idx (np.ndarray): indices of points from P to be considered. return_dist (Optional[bool]): If True, distances to the k nearest neighbors are also returned (in order of proximity). (default = False) @@ -1028,7 +1028,7 @@ def knn_direct(np.ndarray[np.float64_t, ndim=2] P, np.uint64_t k, np.uint64_t i, True. (default = False) Returns: - np.ndarray: Indicies of k nearest neighbors to point i. + np.ndarray: Indices of k nearest neighbors to point i. """ cdef int j,m @@ -1139,7 +1139,7 @@ def csearch_morton(np.ndarray[np.float64_t, ndim=2] P, int k, np.uint64_t i, if dist_to_box(cbox_sol,cbox_hl,rbox_hl) >= 1.5*rbox_sol: print('{} outside: rad = {}, rbox = {}, dist = {}'.format(m,rad_Ai,rbox_sol,dist_to_box(P[i,:],cbox_hl,rbox_hl))) return Ai - # Expand search to lower/higher indicies as needed + # Expand search to lower/higher indices as needed if i < m: # They are already sorted... Ai = csearch_morton(P,k,i,Ai,l,m-1,order,DLE,DRE,nu=nu) if compare_morton(P[m,:],P[i,:]+dist(P[i,:],P[Ai[k-1],:])): @@ -1158,13 +1158,13 @@ def knn_morton(np.ndarray[np.float64_t, ndim=2] P0, int k, np.uint64_t i0, float c = 1.0, int nu = 4, issorted = False, int order = ORDER_MAX, np.ndarray[np.float64_t, ndim=1] DLE = np.zeros(3,dtype=np.float64), np.ndarray[np.float64_t, ndim=1] DRE = np.zeros(3,dtype=np.float64)): - """Get the indicies of the k nearest neighbors to point i. + """Get the indices of the k nearest neighbors to point i. Args: P (np.ndarray): (N,d) array of points to search. k (int): number of nearest neighbors to find for each point in P. i (np.uint64): index of point to find neighbors for. - c (float): factor determining how many indicies before/after i are used + c (float): factor determining how many indices before/after i are used in the initial search (i-c*k to i+c*k, default = 1.0) nu (int): minimum number of points before a direct knn search is performed. (default = 4) @@ -1178,7 +1178,7 @@ def knn_morton(np.ndarray[np.float64_t, ndim=2] P0, int k, np.uint64_t i0, If not provided, this is determined from the points. Returns: - np.ndarray: (N,k) indicies of k nearest neighbors for each point in P. + np.ndarray: (N,k) indices of k nearest neighbors for each point in P. """ cdef int j cdef np.uint64_t i diff --git a/yt/utilities/lib/particle_kdtree_tools.pyx b/yt/utilities/lib/particle_kdtree_tools.pyx index 8aefaca21d2..38d3d70c9ac 100644 --- a/yt/utilities/lib/particle_kdtree_tools.pyx +++ b/yt/utilities/lib/particle_kdtree_tools.pyx @@ -62,7 +62,7 @@ def generate_smoothing_length(np.float64_t[:, ::1] tree_positions, tree_positions: arrays of floats with shape (n_particles, 3) The positions of particles in kdtree sorted order. Currently assumed - to be 3D postions. + to be 3D positions. kdtree: A PyKDTree instance A kdtree to do nearest neighbors searches with n_neighbors: The neighbor number to calculate the distance to @@ -120,7 +120,7 @@ def estimate_density(np.float64_t[:, ::1] tree_positions, np.float64_t[:] mass, tree_positions: array of floats with shape (n_particles, 3) The positions of particles in kdtree sorted order. Currently assumed - to be 3D postions. + to be 3D positions. mass: array of floats with shape (n_particles) The masses of particles in kdtree sorted order. smoothing_length: array of floats with shape (n_particles) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index d3155c8c0cc..72803050c95 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -1224,7 +1224,7 @@ def interpolate_sph_grid_gather(np.float64_t[:, :, :] buff, int num_neigh=32): """ This function takes in the bounds and number of cells in a grid (well, - actually we implicity calculate this from the size of buff). Then we can + actually we implicitly calculate this from the size of buff). Then we can perform nearest neighbor search and SPH interpolation at the centre of each cell in the grid. """ diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index 888088ee12b..fccc8fea7cd 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -72,7 +72,7 @@ class DuplicateFilter(logging.Filter): """A filter that removes duplicated successive log entries.""" # source - # https://stackoverflow.com/questions/44691558/suppress-multiple-messages-with-same-content-in-python-logging-module-aka-log-co # noqa + # https://stackoverflow.com/questions/44691558/suppress-multiple-messages-with-same-content-in-python-logging-module-aka-log-co def filter(self, record): current_log = (record.module, record.levelno, record.msg, record.args) if current_log != getattr(self, "last_log", None): diff --git a/yt/utilities/math_utils.py b/yt/utilities/math_utils.py index 42d95ccb0ea..e0409dd1155 100644 --- a/yt/utilities/math_utils.py +++ b/yt/utilities/math_utils.py @@ -883,7 +883,7 @@ def get_perspective_matrix(fovy, aspect, z_near, z_far): """ Given a field of view in radians, an aspect ratio, and a near and far plane distance, this routine computes the transformation matrix - corresponding to perspective projection using homogenous coordinates. + corresponding to perspective projection using homogeneous coordinates. Parameters ---------- @@ -955,7 +955,7 @@ def get_orthographic_matrix(maxr, aspect, z_near, z_far): """ Given a field of view in radians, an aspect ratio, and a near and far plane distance, this routine computes the transformation matrix - corresponding to perspective projection using homogenous coordinates. + corresponding to perspective projection using homogeneous coordinates. Parameters ---------- diff --git a/yt/utilities/on_demand_imports.py b/yt/utilities/on_demand_imports.py index 99d53cf6c0b..ba519191836 100644 --- a/yt/utilities/on_demand_imports.py +++ b/yt/utilities/on_demand_imports.py @@ -1,5 +1,6 @@ import sys -from distutils.version import LooseVersion + +from packaging.version import parse as parse_version class NotAModule: @@ -45,7 +46,7 @@ def __init__(self, pkg_name): "package to be installed. Try installing proj4 and " "geos with your package manager and building shapely " "and cartopy from source with: \n \n " - "pip install --no-binary :all: shapely cartopy \n \n" + "python -m pip install --no-binary :all: shapely cartopy \n \n" "For further instruction please refer to the " "yt documentation." % self.pkg_name ) @@ -360,10 +361,10 @@ def __init__(self): try: import h5py - if LooseVersion(h5py.__version__) < LooseVersion("2.4.0"): + if parse_version(h5py.__version__) < parse_version("2.4.0"): self._err = RuntimeError( "yt requires h5py version 2.4.0 or newer, " - 'please update h5py with e.g. "pip install -U h5py" ' + "please update h5py with e.g. `python -m pip install -U h5py` " "and try again" ) except ImportError: @@ -586,7 +587,7 @@ def __init__(self, pkg_name): "This functionality requires the %s package to be installed. " "Installation instructions can be found at " "https://github.com/weddige/miniball or alternatively you can " - "install via `pip install MiniballCpp`." + "install via `python -m pip install MiniballCpp`." ) self.error = ImportError(str % self.pkg_name) diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index 22719683443..b64ba701f70 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -64,7 +64,7 @@ def default_mpi_excepthook(exception_type, exception_value, tb): mylog.error("%s: %s", exception_type.__name__, exception_value) comm = yt.communication_system.communicators[-1] if comm.size > 1: - mylog.error("Error occured on rank %d.", comm.rank) + mylog.error("Error occurred on rank %d.", comm.rank) MPI.COMM_WORLD.Abort(1) @@ -747,7 +747,14 @@ def par_combine_object(self, data, op, datatype=None): data.update(self.comm.recv(source=i, tag=0)) else: self.comm.send(data, dest=0, tag=0) - data = self.comm.bcast(data, root=0) + + # Send the keys first, then each item one by one + # This is to prevent MPI from crashing when sending more + # than 2GiB of data over the network. + keys = self.comm.bcast(list(data.keys()), root=0) + for key in keys: + tmp = data.get(key, None) + data[key] = self.comm.bcast(tmp, root=0) return data elif datatype == "dict" and op == "cat": field_keys = sorted(data.keys()) diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index ed5c14ae56d..5217235d829 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -777,7 +777,7 @@ def get_slice_chunks(self, slice_dim, slice_index): def get_ibbox_slow(self, ileft, iright): """ - Given left and right indicies, return a mask and + Given left and right indices, return a mask and set of offsets+lengths into the sdf data. """ mask = np.zeros(self.indexdata["index"].shape, dtype="bool") @@ -796,7 +796,7 @@ def get_ibbox_slow(self, ileft, iright): def get_ibbox(self, ileft, iright): """ - Given left and right indicies, return a mask and + Given left and right indices, return a mask and set of offsets+lengths into the sdf data. """ # print('Getting data from ileft to iright:', ileft, iright) @@ -863,7 +863,7 @@ def get_ibbox(self, ileft, iright): def get_bbox(self, left, right): """ - Given left and right indicies, return a mask and + Given left and right indices, return a mask and set of offsets+lengths into the sdf data. """ ileft = np.floor((left - self.rmin) / self.domain_width * self.domain_dims) diff --git a/yt/utilities/tests/test_cosmology.py b/yt/utilities/tests/test_cosmology.py index 0d9310b8358..ae069d68dae 100644 --- a/yt/utilities/tests/test_cosmology.py +++ b/yt/utilities/tests/test_cosmology.py @@ -216,7 +216,8 @@ def test_cosmology_calculator_answers(): """ fn = os.path.join(local_dir, "cosmology_answers.yml") - data = yaml.load(open(fn), Loader=yaml.FullLoader) + with open(fn) as fh: + data = yaml.load(fh, Loader=yaml.FullLoader) cosmologies = data["cosmologies"] functions = data["functions"] diff --git a/yt/utilities/tests/test_interpolators.py b/yt/utilities/tests/test_interpolators.py index 46537b13c56..2c387ecc5e1 100644 --- a/yt/utilities/tests/test_interpolators.py +++ b/yt/utilities/tests/test_interpolators.py @@ -131,6 +131,14 @@ def test_get_vertex_centered_data(): assert len(w) == 1 assert issubclass(w[-1].category, DeprecationWarning) assert "requires list of fields" in str(w[-1].message) - vec_tuple = g.get_vertex_centered_data(("gas", "density"), no_ghost=True) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + vec_tuple = g.get_vertex_centered_data(("gas", "density"), no_ghost=True) + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert ( + "get_vertex_centered_data() requires list of fields, rather than " + "a single field as an argument." + ) in str(w[-1].message) assert_array_equal(vec_list[("gas", "density")], vec_str) assert_array_equal(vec_list[("gas", "density")], vec_tuple) diff --git a/yt/visualization/base_plot_types.py b/yt/visualization/base_plot_types.py index 97cdba0e844..164aa7b24bb 100644 --- a/yt/visualization/base_plot_types.py +++ b/yt/visualization/base_plot_types.py @@ -1,8 +1,8 @@ -from distutils.version import LooseVersion from io import BytesIO import matplotlib import numpy as np +from packaging.version import parse as parse_version from yt.funcs import ( get_brewer_cmap, @@ -132,9 +132,9 @@ def save(self, name, mpl_kwargs=None, canvas=None): if mpl_kwargs is None: mpl_kwargs = {} - if "papertype" not in mpl_kwargs and LooseVersion( + if "papertype" not in mpl_kwargs and parse_version( matplotlib.__version__ - ) < LooseVersion("3.3.0"): + ) < parse_version("3.3.0"): mpl_kwargs["papertype"] = "auto" name = validate_image_name(name) @@ -218,8 +218,8 @@ def _init_image(self, data, cbnorm, cblinthresh, cmap, extent, aspect): cblinthresh = np.nanmin(np.absolute(data)[data != 0]) cbnorm_kwargs.update(dict(linthresh=cblinthresh, vmin=vmin, vmax=vmax)) - MPL_VERSION = LooseVersion(matplotlib.__version__) - if MPL_VERSION >= "3.2.0": + MPL_VERSION = parse_version(matplotlib.__version__) + if MPL_VERSION >= parse_version("3.2.0"): # note that this creates an inconsistency between mpl versions # since the default value previous to mpl 3.4.0 is np.e # but it is only exposed since 3.2.0 @@ -238,7 +238,7 @@ def _init_image(self, data, cbnorm, cblinthresh, cmap, extent, aspect): if self._transform is None: # sets the transform to be an ax.TransData object, where the - # coordiante system of the data is controlled by the xlim and ylim + # coordinate system of the data is controlled by the xlim and ylim # of the data. transform = self.axes.transData else: diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index 9d01afe3b47..1203e413867 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -1,8 +1,12 @@ import numpy as np -from matplotlib import cm as mcm, colors as cc +from matplotlib import __version__ as mpl_ver, cm as mcm, colors as cc +from packaging.version import parse as parse_version from . import _colormap_data as _cm +MPL_VERSION = parse_version(mpl_ver) +del mpl_ver + def is_colormap(cmap): return isinstance(cmap, cc.Colormap) @@ -256,6 +260,30 @@ def show_colormaps(subset="all", filename=None): "to be 'all', 'yt_native', or a list of " "valid colormap names." ) from e + if parse_version("2.0.0") <= MPL_VERSION < parse_version("2.2.0"): + # the reason we do this filtering is to avoid spurious warnings in CI when + # testing against old versions of matplotlib (currently not older than 2.0.x) + # and we can't easily filter warnings at the level of the relevant test itself + # because it's not yet run exclusively with pytest. + # FUTURE: remove this completely when only matplotlib 2.2+ is supported + deprecated_cmaps = { + "spectral", + "spectral_r", + "Vega10", + "Vega10_r", + "Vega20", + "Vega20_r", + "Vega20b", + "Vega20b_r", + "Vega20c", + "Vega20c_r", + } + for cmap in deprecated_cmaps: + try: + maps.remove(cmap) + except ValueError: + pass + maps = sorted(set(maps)) # scale the image size by the number of cmaps plt.figure(figsize=(2.0 * len(maps) / 10.0, 6)) diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index ddfb82a5a29..4bb52158b43 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -577,7 +577,7 @@ def info(self, output=None): if output is None: output = sys.stdout if num_cols == 8: - header = "No. Name Ver Type Cards Dimensions Format Units" # NOQA E501 + header = "No. Name Ver Type Cards Dimensions Format Units" format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}" else: header = ( @@ -845,6 +845,9 @@ def sanitize_fits_unit(unit): return unit +# This list allows one to determine which axes are the +# correct axes of the image in a right-handed coordinate +# system depending on which axis is sliced or projected axis_wcs = [[1, 2], [0, 2], [0, 1]] @@ -888,11 +891,12 @@ def construct_image(ds, axis, data_source, center, image_res, width, length_unit else: frb = data_source.to_frb(width[0], (nx, ny), center=center, height=width[1]) elif isinstance(data_source, ParticleAxisAlignedDummyDataSource): + axes = axis_wcs[axis] bounds = ( - center[0] - width[0] / 2, - center[0] + width[0] / 2, - center[1] - width[1] / 2, - center[1] + width[1] / 2, + center[axes[0]] - width[0] / 2, + center[axes[0]] + width[0] / 2, + center[axes[1]] - width[1] / 2, + center[axes[1]] + width[1] / 2, ) frb = ParticleImageBuffer( data_source, bounds, (nx, ny), periodic=all(ds.periodicity) diff --git a/yt/visualization/geo_plot_utils.py b/yt/visualization/geo_plot_utils.py index 6ad262637a9..fc2c933e89e 100644 --- a/yt/visualization/geo_plot_utils.py +++ b/yt/visualization/geo_plot_utils.py @@ -59,7 +59,7 @@ def get_mpl_transform(mpl_proj): ... ) """ - # first check to see if the tranform dict is empty, if it is fill it with + # first check to see if the transform dict is empty, if it is fill it with # the cartopy functions if not valid_transforms: for mpl_transform in transform_list: diff --git a/yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.src.js b/yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.src.js index 309ee983385..18d8cd402b3 100644 --- a/yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.src.js +++ b/yt/visualization/mapserver/html/Leaflet.Coordinates-0.1.5.src.js @@ -74,7 +74,7 @@ L.Control.Coordinates = L.Control.extend({ map.whenReady(this._update, this); this._showsCoordinates = true; - //wether or not to show inputs on click + //whether or not to show inputs on click if (options.enableUserInput) { L.DomEvent.addListener(this._container, "click", this._switchUI, this); } diff --git a/yt/visualization/mapserver/pannable_map.py b/yt/visualization/mapserver/pannable_map.py index 9ec6cb343f9..86bec605a8f 100644 --- a/yt/visualization/mapserver/pannable_map.py +++ b/yt/visualization/mapserver/pannable_map.py @@ -4,6 +4,7 @@ import bottle import numpy as np +from yt.fields.derived_field import ValidateSpatial from yt.utilities.lib.misc_utilities import get_color_bounds from yt.utilities.png_writer import write_png_to_string from yt.visualization.fixed_resolution import FixedResolutionBuffer @@ -134,20 +135,19 @@ def static(self, path): def list_fields(self): d = {} - # Add deposit fields (only cic + density for now) - for ptype in self.ds.particle_types: - d[ptype] = [ - (("deposit", f"{ptype}_cic"), False), - (("deposit", f"{ptype}_density"), False), - ] - # Add fluid fields (only gas for now) for ftype in self.ds.fluid_types: d[ftype] = [] for f in self.ds.derived_field_list: if f[0] != ftype: continue - + # Discard fields which need ghost zones for now + df = self.ds.field_info[f] + if any(isinstance(v, ValidateSpatial) for v in df.validators): + continue + # Discard cutting plane fields + if "cutting" in f[1]: + continue active = f[1] == self.field d[ftype].append((f, active)) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 21809ea7446..9b1f1d5f055 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -22,7 +22,7 @@ from ._commons import validate_image_name try: - import cmocean # noqa + import cmocean except ImportError: cmocean = None @@ -303,7 +303,7 @@ def set_log(self, field, log, linthresh=None, symlog_auto=False): log. Symlog can also work with negative values in log space as well as negative and positive values simultaneously and symmetrically. If symlog scaling is desired, please set log=True and either set symlog_auto=True or - select a alue for linthresh. + select a value for linthresh. Parameters ---------- diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index ebfe2baf64b..fccb0e7fd95 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -2325,7 +2325,7 @@ def __call__(self, plot): class TimestampCallback(PlotCallback): - """ + r""" Annotates the timestamp and/or redshift of the data output at a specified location in the image (either in a present corner, or by specifying (x,y) image coordinates with the x_pos, y_pos arguments. If no time_units are @@ -2401,7 +2401,7 @@ class TimestampCallback(PlotCallback): A dictionary of any arbitrary parameters to be passed to the Matplotlib FancyBboxPatch object as the inset box around the text. Defaults: ``{'boxstyle':'square', 'pad':0.3, 'facecolor':'black', - 'linewidth':3, 'edgecolor':'white', 'alpha':0.5}`` + 'linewidth':3, 'edgecolor':'white', 'alpha':0.5}`` Example ------- @@ -2563,7 +2563,7 @@ def __call__(self, plot): class ScaleCallback(PlotCallback): - """ + r""" Annotates the scale of the plot at a specified location in the image (either in a preset corner, or by specifying (x,y) image coordinates with the pos argument. Coeff and units (e.g. 1 Mpc or 100 kpc) refer to the @@ -2620,7 +2620,7 @@ class ScaleCallback(PlotCallback): text_args : dictionary, optional A dictionary of parameters to used to update the font_properties for the text in this callback. For any property not set, it will - use the defaults of the plot. Thus one can modify the text size with: + use the defaults of the plot. Thus one can modify the text size with ``text_args={'size':24}`` size_bar_args : dictionary, optional @@ -2635,7 +2635,7 @@ class ScaleCallback(PlotCallback): A dictionary of keyword arguments to be passed to the matplotlib Patch object that represents the inset box. Defaults: ``{'facecolor': 'black', 'linewidth': 3, - 'edgecolor': 'white', 'alpha': 0.5, 'boxstyle': 'square'}`` + 'edgecolor': 'white', 'alpha': 0.5, 'boxstyle': 'square'}`` scale_text_format : string, optional This specifies the format of the scalebar value assuming "scale" is the diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index a10ffc12eec..14c4cff0571 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1,13 +1,13 @@ from collections import defaultdict -from distutils.version import LooseVersion from functools import wraps from numbers import Number import matplotlib import matplotlib.pyplot as plt import numpy as np -from more_itertools import always_iterable, zip_equal +from more_itertools import always_iterable from mpl_toolkits.axes_grid1 import ImageGrid +from packaging.version import parse as parse_version from unyt.exceptions import UnitConversionError from yt._maintenance.deprecation import issue_deprecation_warning @@ -48,7 +48,22 @@ ) from .plot_modifications import callback_registry -MPL_VERSION = LooseVersion(matplotlib.__version__) +import sys # isort: skip + +if sys.version_info < (3, 10): + # this function is deprecated in more_itertools + # because it is superseded by the standard library + from more_itertools import zip_equal +else: + + def zip_equal(*args): + # FUTURE: when only Python 3.10+ is supported, + # drop this conditional and call the builtin zip + # function directly where due + return zip(*args, strict=True) + + +MPL_VERSION = parse_version(matplotlib.__version__) # Some magic for dealing with pyparsing being included or not # included in matplotlib (not in gentoo, yes in everything else) @@ -1187,7 +1202,7 @@ def _setup_plots(self): self.plots[f].cax.yaxis.set_ticks(mticks, minor=True) elif self._field_transform[f] == log_transform: - if MPL_VERSION >= LooseVersion("3.0.0"): + if MPL_VERSION >= parse_version("3.0.0"): self.plots[f].cax.minorticks_on() self.plots[f].cax.xaxis.set_visible(False) else: diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index cc5dd7c4a6d..eddfd68fd19 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -2,12 +2,12 @@ import builtins import os from collections import OrderedDict -from distutils.version import LooseVersion from functools import wraps import matplotlib import numpy as np from more_itertools.more import always_iterable, unzip +from packaging.version import parse as parse_version from yt.data_objects.profiles import create_profile, sanitize_field_tuple_keys from yt.data_objects.static_output import Dataset @@ -28,7 +28,7 @@ validate_plot, ) -MPL_VERSION = LooseVersion(matplotlib.__version__) +MPL_VERSION = parse_version(matplotlib.__version__) def invalidate_profile(f): @@ -1174,7 +1174,7 @@ def _setup_plots(self): if self._cbar_minorticks[f]: if self._field_transform[f] == linear_transform: self.plots[f].cax.minorticks_on() - elif MPL_VERSION < LooseVersion("3.0.0"): + elif MPL_VERSION < parse_version("3.0.0"): # before matplotlib 3 log-scaled colorbars internally used # a linear scale going from zero to one and did not draw # minor ticks. Since we want minor ticks, calculate diff --git a/yt/visualization/tests/test_plotwindow.py b/yt/visualization/tests/test_plotwindow.py index 2a61b1495c6..cba87d757ee 100644 --- a/yt/visualization/tests/test_plotwindow.py +++ b/yt/visualization/tests/test_plotwindow.py @@ -45,7 +45,6 @@ def setup(): TEST_FLNMS = ["test.png"] M7 = "DD0010/moving7_0010" -WT = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030" FPROPS = {"family": "sans-serif", "style": "italic", "weight": "bold", "size": 24} @@ -190,29 +189,6 @@ def test_attributes(): ) -@requires_ds(WT) -def test_attributes_wt(): - plot_field = ("gas", "density") - decimals = 12 - - ds = data_dir_load(WT) - ax = "z" - for attr_name in ATTR_ARGS.keys(): - for args in ATTR_ARGS[attr_name]: - yield PlotWindowAttributeTest(ds, plot_field, ax, attr_name, args, decimals) - for n, r in CALLBACK_TESTS: - yield PlotWindowAttributeTest( - ds, - plot_field, - ax, - attr_name, - args, - decimals, - callback_id=n, - callback_runners=r, - ) - - class TestHideAxesColorbar(unittest.TestCase): ds = None diff --git a/yt/visualization/volume_rendering/lens.py b/yt/visualization/volume_rendering/lens.py index 66089393396..940ad309594 100644 --- a/yt/visualization/volume_rendering/lens.py +++ b/yt/visualization/volume_rendering/lens.py @@ -180,8 +180,8 @@ def _get_sampler_params(self, camera, render_source): north_vec = camera.unit_vectors[1] normal_vec = camera.unit_vectors[2] - px = np.mat(np.linspace(-0.5, 0.5, camera.resolution[0])) - py = np.mat(np.linspace(-0.5, 0.5, camera.resolution[1])) + px = np.linspace(-0.5, 0.5, camera.resolution[0])[np.newaxis, :] + py = np.linspace(-0.5, 0.5, camera.resolution[1])[np.newaxis, :] sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px) sample_x = sample_x.transpose() @@ -384,8 +384,8 @@ def _get_positions_vectors(self, camera, disparity): east_vec_rot = np.dot(R, east_vec) normal_vec_rot = np.dot(R, normal_vec) - px = np.mat(np.linspace(-0.5, 0.5, single_resolution_x)) - py = np.mat(np.linspace(-0.5, 0.5, camera.resolution[1])) + px = np.linspace(-0.5, 0.5, single_resolution_x)[np.newaxis, :] + py = np.linspace(-0.5, 0.5, camera.resolution[1])[np.newaxis, :] sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px) sample_x = sample_x.transpose() diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 7829e577eed..e2501c0ed46 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1309,8 +1309,8 @@ def get_sampler_args(self, image): east_vec = self.orienter.unit_vectors[0].reshape(3, 1) north_vec = self.orienter.unit_vectors[1].reshape(3, 1) - px = np.mat(np.linspace(-0.5, 0.5, self.resolution[0])) - py = np.mat(np.linspace(-0.5, 0.5, self.resolution[1])) + px = np.linspace(-0.5, 0.5, self.resolution[0])[np.newaxis, :] + py = np.linspace(-0.5, 0.5, self.resolution[1])[np.newaxis, :] sample_x = self.width[0] * np.array(east_vec * px).transpose() sample_y = self.width[1] * np.array(north_vec * py).transpose() diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 4b3e25c21e7..0ffa23a06ea 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -284,7 +284,7 @@ def save(self, fname=None, sigma_clip=None, render=True): If specified, save the rendering as to the file "fname". If unspecified, it creates a default based on the dataset filename. The file format is inferred from the filename's suffix. Supported - fomats are png, pdf, eps, and ps. + formats are png, pdf, eps, and ps. Default: None sigma_clip: float, optional Image values greater than this number times the standard deviation diff --git a/yt/visualization/volume_rendering/tests/test_vr_cameras.py b/yt/visualization/volume_rendering/tests/test_vr_cameras.py index d880ee62067..2881f743b0c 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py +++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py @@ -123,7 +123,7 @@ def test_interactive_camera(self): log_fields=[False], ) del cam - # Can't take a snapshot here since IC uses pylab.' + # Can't take a snapshot here since IC uses matplotlib.' def test_projection_camera(self): ds = self.ds diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 93eb2864dac..ad0e93bacc2 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -409,7 +409,7 @@ def __init__(self, x_bounds, nbins=256, grey_opacity=False): def add_gaussian(self, location, width, height): r"""Add a Gaussian distribution to the transfer function. - Typically, when rendering isocontours, a Guassian distribution is the + Typically, when rendering isocontours, a Gaussian distribution is the easiest way to draw out features. The spread provides a softness. The values are calculated as :math:`f(x) = h \exp{-(x-x_0)^2 / w}`.