From b3daab9256b06b0a6b04100a9d7970addf904392 Mon Sep 17 00:00:00 2001 From: Adam Ormondroyd <52655393+AdamOrmondroyd@users.noreply.github.com> Date: Mon, 29 Jan 2024 12:24:33 +0000 Subject: [PATCH] `**kwargs` input and `anesthetic.NestedSamples` output (#90) * neighbor spelling (assuming we're sticking to American spelling!) * updated run_poypolychord to use **kwargs rather than PolyChordSettings * return anesthetic.NestedSamples output if installed * added kwargs to run_pypolychord() docstring * changed examples to use **kwargs * accidental rename of run_polychord() * set kwarg defaults separately to run_pypolychord so that they can be given to resume file * run_polychord creates the paramnames file * changed other examples paramnames are passed to run_pypolychord * mention anesthetic output in docstring (incomplete) * tidied up grade_dims and nlives argument processing * logx should be logc * missing comma * removed settings argument * Restored old interface * run_polychord -> run * reverted Python_Functions/ (apart from logx->logc correction * put PolyChordSettings back in __init__.py * run() uses make_resume_file_kwargs_interface * added run to __init__.py * update nDerived, prior and dumper to kwargs * nDims is not a kwarg * add nDerived, prior and dumper to kwargs * use default_kwargs to set the defaults, then use this to see if kwargs has any excess keys * separate default_kwargs that refer to other default_kwargs * wrong brackets - think it's time to leave this for a moment * removed accidental commas * pass kwargs directly rather than creating dict * added anesthetic to install_requires * anesthetic now assumed to be installed, so new interface always returns an anesthetic output * tidy up plotting to just use anesthetic output * simplified to _make_resume_file(), plus wrapper _legacy_make_resume_file which translates from a settings object * added run_pypolychord_legacy_interface.py * update run() docstring to anesthetic output * removed unused sqrt from run_pypolychord.py * updated run_pypolychord.ipynb with py2nb * change requirement to anesthetic==2.0.0b11 * removed creation of .paramnames file from run(), added to run_pypolychord.py * corrected kwargs error message * exception handling if chains/ already exists * update to anesthetic 2.0.0b12 * rather than repeating PolyChordOutput.make_paramnames_file(), make it a static method and reuse it * changed setup.py to point to github anesthetic as 2.0.0b12 hasn't been released * remove unecessary os import * removed commented out/empty lines around paramname creation * removed unnecessary kwargs = kwargs.copy() * double quotes to be more civilised * change to single quotes to make Lucas happy * update for more recent anesthetic * line spacing and remove todo (see pr 110 * get anesthetic from pypi instead of gh * might as well do some formatting * also fix wrap_loglikelihood for new interface (#110) * anesthetic back to optional, return None if not installed * version bump to 1.22.0 * py2nb run_pypolychord.py * Fix names of run scripts/notebooks * remove testing print statement * remove sqrt import, who knows where this came from * Updated with new recommended interface * anesthetic root is deprecated * Update README.rst * more anesthetic 2 madness * update notebooks * remove () from dict * play around with tests * add anesthetic to CI install step * only run tests on python>=3.8 * test likelihood without derived parameters * some test housekeeping * use or between the two loglikelihood return options * use pathlib.Path to save OSError nonsense * explain paramnames more clearly * check that grade_dims add up to nDims and test * update run docstring anesthetic stuff * correct run_pypolychord return * correct missing blank lines in run() docstring * add cluster_dir to kwargs, since technically it could be changed in PolyChordSettings (with difficulty) * remove os entirely in favour of pathlib * fix line too long * update date * fix default grade_frac * remove pytest-mpi requirement from CI * remove unecessary cast to set * don't use ac shorthand, but also don't clutter the namespace with log and pi * Revert "don't use ac shorthand, but also don't clutter the namespace with log and pi" This reverts commit cc807f5dc7e6481420f9b9b3fdca966f7df3c147. * from anesthetic import make_2d_axes --------- Co-authored-by: Adam Ormondroyd <52655393+Ormorod@users.noreply.github.com> Co-authored-by: Will Handley --- .github/workflows/CI.yml | 9 +- README.rst | 61 +++-- pypolychord/__init__.py | 4 +- pypolychord/output.py | 3 +- pypolychord/polychord.py | 498 +++++++++++++++++++++++++++++++--- quickstart.ipynb | 170 ++++++++++++ quickstart.py | 70 +++++ run_pypolychord.ipynb | 40 +-- run_pypolychord.py | 10 +- src/polychord/feedback.f90 | 4 +- tests/test_run_pypolychord.py | 96 +++++-- 11 files changed, 839 insertions(+), 126 deletions(-) create mode 100644 quickstart.ipynb create mode 100644 quickstart.py mode change 100755 => 100644 run_pypolychord.py diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4a9d30c8..ed6a52bb 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -57,10 +57,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] - include: - - os: ubuntu-20.04 - python-version: '3.6' + python-version: ['3.8', '3.9', '3.10', '3.11'] steps: - uses: actions/checkout@v3 @@ -75,7 +72,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install mpi4py pytest pytest-mpi fortranformat + python -m pip install mpi4py pytest fortranformat anesthetic - name: Install pypolychord run: pip install -v '.[test]' @@ -84,4 +81,4 @@ jobs: run: python -m pytest tests - name: Test pypolychord (MPI) - run: mpirun --oversubscribe -np 3 python -m pytest tests --only-mpi + run: mpirun --oversubscribe -np 3 python -m pytest tests diff --git a/README.rst b/README.rst index 7cc61730..407e4b4c 100644 --- a/README.rst +++ b/README.rst @@ -5,17 +5,17 @@ :target: https://arxiv.org/abs/1506.00171 :alt: Open-access paper -PolyChord v 1.21.4 +PolyChord v 1.22.0 Will Handley, Mike Hobson & Anthony Lasenby wh260@mrao.cam.ac.uk -arXiv:1502.01856 +`arXiv:1502.01856 `_ -arXiv:1506.00171 +`arXiv:1506.00171 `_ -Latest version Released Apr 2020 +Latest version Released Jan 2024 PolyChord Licence @@ -26,11 +26,12 @@ file. PolyChord is free for academic usage Users are also required to cite the PolyChord papers: -- arXiv:1502.01856 -- arXiv:1506.00171 +- `arXiv:1502.01856 `_ +- `arXiv:1506.00171 `_ in their publications. + Python quickstart ================= @@ -39,14 +40,14 @@ For Python users in a hurry: .. code:: bash pip install git+https://github.com/PolyChord/PolyChordLite@master - wget https://raw.githubusercontent.com/PolyChord/PolyChordLite/master/run_pypolychord.py - python run_pypolychord.py + wget https://raw.githubusercontent.com/PolyChord/PolyChordLite/master/quickstart.py + python quickstart.py -You can then modify the file run_pypolychord.py to your needs. If you have mpi compilers available, this version can be run in parallel with mpi. +You should make sure that you have gfortran/gcc (or equivalent) fortran compilers installed. -You should make sure that you have gfortran (or equivalent) fortran compilers installed. +You can then modify the file quickstart.py to your needs. If you have mpi compilers available, this version can be run in parallel with mpi. -If any of the above steps fail (this can in general happen for certain Mac OSX versions), then try installing without pip: +If any of the above steps fail (this can in general happen for certain macOS versions), then try installing without pip: .. code:: bash @@ -54,19 +55,29 @@ If any of the above steps fail (this can in general happen for certain Mac OSX v cd PolyChordLite python setup.py install +or perhaps: + +.. code:: bash + + git clone https://github.com/PolyChord/PolyChordLite.git + cd PolyChordLite + make + pip install . + +our apologies -- the shifting sands that are macOS do not play well with the delicate dance of fortran, C and Python that is (py)PolyChordLite. + If you do not have sudo access/virtual environments/anaconda, then appending `--user` to the install command may be necessary. Post Processing =============== -We recommend the tool `anesthetic `_ for post-processing your nested sampling runs. A plot gallery can be found `here `_ +We recommend the pip-installable tool `anesthetic `_ for post-processing your nested sampling runs. A plot gallery can be found `here `_ +.. code:: bash -https://github.com/handley-lab/anesthetic + pip install anesthetic -``` -pip install anesthetic -``` +If `anesthetic` is already installed, then `pypolychord.run()` will return an `anesthetic.NestedSamples` object, which can be used directly for post-processing. MPI Support =========== @@ -211,7 +222,6 @@ You can install direct from the git repository using: pip install https://github.com/PolyChord/PolyChordLite/archive/master.zip -(N.B. PyPi coming soon) or you can install locally with the command: .. code:: bash @@ -232,26 +242,27 @@ and check that it's working by running: .. code:: bash - $ python run_pypolychord.py + $ python quickstart.py or in MPI: .. code:: bash - $ mpirun -np 4 python run_pypolychord.py + $ mpirun -np 4 python quickstart.py If so, the rest of the interface is relatively painless. Follow the example in -run_pypolychord.py, and consult the docstring if you need help: +quickstart.py, and consult the docstring if you need help: .. code:: python - import pypolychord - from pypolychord.settings import PolyChordSettings + >>> import pypolychord + >>> help(pypolychord.run) + +There is also a demo `python notebook `_. + +To post-process nested sampling runs we recommend the pip-installable tool `anesthetic `_. A plot gallery can be found `here `_ - help(pypolychord.run_polychord) - help(PolyChordSettings) -There is also a demo `python notebook `_. Output files ============= diff --git a/pypolychord/__init__.py b/pypolychord/__init__.py index 14143845..d786aab1 100644 --- a/pypolychord/__init__.py +++ b/pypolychord/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.21.4" +__version__ = "1.22.0" from pypolychord.settings import PolyChordSettings -from pypolychord.polychord import run_polychord +from pypolychord.polychord import run_polychord, run diff --git a/pypolychord/output.py b/pypolychord/output.py index 9ef16d36..cc09e812 100644 --- a/pypolychord/output.py +++ b/pypolychord/output.py @@ -170,7 +170,8 @@ def make_paramnames_files(self, paramnames): self._create_pandas_table(paramnames = paramnames) - def make_paramnames_file(self, paramnames, filename): + @staticmethod + def make_paramnames_file(paramnames, filename): with open(filename, 'w') as f: for name, latex in paramnames: f.write('%s %s\n' % (name, latex)) diff --git a/pypolychord/polychord.py b/pypolychord/polychord.py index e8720137..6e6ca398 100644 --- a/pypolychord/polychord.py +++ b/pypolychord/polychord.py @@ -1,5 +1,6 @@ from pypolychord.output import PolyChordOutput -import os +from pathlib import Path +import warnings import _pypolychord import numpy as np @@ -50,9 +51,7 @@ def run_polychord(loglikelihood, nDims, nDerived, settings, (logL, phi): (float, array-like) return is a 2-tuple of the log-likelihood (logL) and the derived parameters (phi). phi length nDerived. - - Returns - ------- + OR logL: float log-likelihood @@ -104,7 +103,7 @@ def run_polychord(loglikelihood, nDims, nDerived, settings, Returns ------- - None. (in Python) + pypolychord.output.PolyChordOutput All output is currently produced in the form of text files in directory. If you would like to contribute to pypolychord and improve this, @@ -152,20 +151,11 @@ def run_polychord(loglikelihood, nDims, nDerived, settings, except ImportError: rank = 0 - try: - if rank == 0: - os.makedirs(settings.base_dir) - except OSError: - pass - - try: - if rank == 0: - os.makedirs(settings.cluster_dir) - except OSError: - pass + if rank == 0: + Path(settings.cluster_dir).mkdir(parents=True, exist_ok=True) if settings.cube_samples is not None: - make_resume_file(settings, loglikelihood, prior) + _legacy_make_resume_file(settings, loglikelihood, prior) read_resume = settings.read_resume settings.read_resume=True @@ -225,10 +215,442 @@ def wrap_prior(cube, theta): return PolyChordOutput(settings.base_dir, settings.file_root) -def make_resume_file(settings, loglikelihood, prior): +### new interface ### + + +def run(loglikelihood, nDims, **kwargs): + """ + Runs PolyChord. + + For full details of nested sampling and PolyChord, please refer to: + + * PolyChord paper: http://arxiv.org/abs/1506.00171 + * Nested Sampling paper: http://projecteuclid.org/euclid.ba/1340370944 + + To run in mpi mode, just run your script with mpirun as usual. + Make sure that pypolychord is compiled with MPI: + $ make veryclean + $ make pypolychord MPI=1 + + Users are also required to cite the PolyChord papers: + arXiv:1502.01856 + arXiv:1506.00171 + in their publications. + + + Parameters + ---------- + + loglikelihood: function + This function computes the log-likelihood of the model and derived + parameters (phi) from the physical coordinates (theta). + + Parameters + ---------- + theta: numpy.array + physical coordinate. Length nDims. + + Returns + ------- + (logL, phi): (float, array-like) + return is a 2-tuple of the log-likelihood (logL) and the derived + parameters (phi). phi length nDerived. + OR + logL: float + log-likelihood + + nDims: int + Dimensionality of the model, i.e. the number of physical parameters. + + Keyword arguments + ----------------- + + nDerived: int + The number of derived parameters (can be 0). + + prior: function + This function converts from the unit hypercube to the physical + parameters. + (Default: identity function => prior(cube) == cube ) + + Parameters + ---------- + cube: numpy.array + coordinates in the unit hypercube. Length nDims. + + Returns + ------- + theta: array-like + physical coordinates. Length nDims. + + dumper: function + This function gives run-time access to the posterior and live points. + + Parameters + ---------- + live: numpy.array + The live points and their loglikelihood birth and death contours + Shape (nlive, nDims+nDerived+2) + dead: numpy.array + The dead points and their loglikelihood birth and death contours + Shape (ndead, nDims+nDerived+2) + logweights: numpy.array + The posterior weights of the dead points + Shape (ndead) + logZ: float + The current log-evidence estimate + logZerr: float + The current log-evidence error estimate + + nlive: int + (Default: nDims*25) + The number of live points. + Increasing nlive increases the accuracy of posteriors and evidences, + and proportionally increases runtime ~ O(nlive). + + num_repeats : int + (Default: nDims*5) + The number of slice slice-sampling steps to generate a new point. + Increasing num_repeats increases the reliability of the algorithm. + Typically + * for reliable evidences need num_repeats ~ O(5*nDims). + * for reliable posteriors need num_repeats ~ O(nDims) + + nprior : int + (Default: nlive) + The number of prior samples to draw before starting compression. + + nfail : int + (Default: nlive) + The number of failed spawns before stopping nested sampling. + + do_clustering : boolean + (Default: True) + Whether or not to use clustering at run time. + + feedback : {0,1,2,3} + (Default: 1) + How much command line feedback to give + + precision_criterion : float + (Default: 0.001) + Termination criterion. Nested sampling terminates when the evidence + contained in the live points is precision_criterion fraction of the + total evidence. + + logzero : float + (Default: -1e30) + The loglikelihood value at which PolyChord considers points + 'unphysical', and excludes them at the prior level. + + max_ndead : int + (Default: -1) + Alternative termination criterion. Stop after max_ndead iterations. + Set negative to ignore (default). + + boost_posterior : float + (Default: 0.0) + Increase the number of posterior samples produced. This can be set + arbitrarily high, but you won't be able to boost by more than + num_repeats + Warning: in high dimensions PolyChord produces _a lot_ of posterior + samples. You probably don't need to change this + + posteriors : boolean + (Default: True) + Produce (weighted) posterior samples. Stored in .txt. + + equals : boolean + (Default: True) + Produce (equally weighted) posterior samples. Stored in + _equal_weights.txt + + cluster_posteriors : boolean + (Default: True) + Produce posterior files for each cluster? + Does nothing if do_clustering=False. + + write_resume : boolean + (Default: True) + Create a resume file. + + read_resume : boolean + (Default: True) + Read from resume file. + + write_stats : boolean + (Default: True) + Write an evidence statistics file. + + write_live : boolean + (Default: True) + Write a live points file. + + write_dead : boolean + (Default: True) + Write a dead points file. + + write_prior : boolean + (Default: True) + Write a prior points file. + + maximise : boolean + (Default: False) + Perform maximisation at the end of the run to find the maximum + likelihood point and value + + compression_factor : double + (Default: exp(-1)) + How often to update the files and do clustering + + synchronous : boolean + (Default: True) + Parallelise with synchronous workers, rather than asynchronous ones. + This can be set to False if the likelihood speed is known to be + approximately constant across the parameter space. Synchronous + parallelisation is less effective than asynchronous by a factor ~O(1) + for large parallelisation. + + base_dir : string + (Default: 'chains') + Where to store output files. + + file_root : string + (Default: 'test') + Root name of the files produced. + + cluster_dir : string + (Default: 'clusters') + Where to store clustering files. (base_dir/cluster_dir) + + grade_frac : List[float] + (Default: [1]) + The amount of time to spend in each speed. + If any of grade_frac are <= 1, then polychord will time each sub-speed, + and then choose num_repeats for the number of slowest repeats, and + spend the proportion of time indicated by grade_frac. Otherwise this + indicates the number of repeats to spend in each speed. + + grade_dims : List[int] + (Default: nDims) + The number of parameters within each speed. + + nlives : dict {double:int} + (Default: {}) + Variable number of live points option. This dictionary is a mapping + between loglike contours and nlive. + You should still set nlive to be a sensible number, as this indicates + how often to update the clustering, and to define the default value. + + seed : positive int + (Default: system time in milliseconds) + Choose the seed to seed the random number generator. + Note **Positive seeds only** + a negative seed indicates that you should use the system time in + milliseconds + + cube_samples: array-like + (Default: None) + samples from the unit hypercube to start nested sampling from. This is + useful if the prior is hard to rejection sample directly from the unit + hypercube (e.g. if there is a large amount of excluded parameter + space), but can be done more efficiently manually. A concrete example + could be include sorted uniform parameters by requiring 0 < x1 < x2 < + x3 < 1, if one did not want to use SortedUniformPrior. + Only available in Python interface. + shape (:, nDims) + + paramnames: List [(string,string)] + (Default: None) + Mapping of label:Tex for all parameters in order. E.g. for two physical + parameters and one derived: + paramnames = [("p0", "$p_0$"), ("p1", "$p_1$), ("d0", "$d_0$)] + + Returns + ------- + + anesthetic.NestedSamples + + All output is currently produced in the form of text files in , + anesthetic can read these in directly: + + >>> import anesthetic + >>> anesthetic.read_chains("base_dir/file_root") + + In general the contents of is a set of getdist compatible files. + + = / + + .txt (posteriors = True) + Weighted posteriors. Compatible with getdist. Each line contains: + + weight, -2*loglikelihood, , + + Note that here the weights are not normalised, but instead are chosen + so that the maximum weight is 1.0. + + _equal_weights.txt (equals = True) + Weighted posteriors. Compatible with getdist. Each line contains: + 1.0, -2*loglikelihood, , + + _dead.txt (write_dead = True) + Dead points. Each line contains: + loglikelihood, , + + _phys_live.txt (write_live = True) + Live points. Each line contains: + , , loglikelihood + Mostly useful for run-time monitoring. + + .resume + Resume file. Human readable. + + .stats + Final output evidence statistics + + """ + + try: + from mpi4py import MPI + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + except ImportError: + rank = 0 + + paramnames = kwargs.pop('paramnames', None) + + default_kwargs = { + 'nDerived': 0, + 'prior': default_prior, + 'dumper': default_dumper, + 'nlive': nDims*25, + 'num_repeats': nDims*5, + 'nprior': -1, + 'nfail': -1, + 'do_clustering': True, + 'feedback': 1, + 'precision_criterion': 0.001, + 'logzero': -1e30, + 'max_ndead': -1, + 'boost_posterior': 0.0, + 'posteriors': True, + 'equals': True, + 'cluster_posteriors': True, + 'write_resume': True, + 'write_paramnames': False, + 'read_resume': True, + 'write_stats': True, + 'write_live': True, + 'write_dead': True, + 'write_prior': True, + 'maximise': False, + 'compression_factor': np.exp(-1), + 'synchronous': True, + 'base_dir': 'chains', + 'file_root': 'test', + 'cluster_dir': 'clusters', + 'grade_dims': [nDims], + 'nlives': {}, + 'seed': -1, + } + default_kwargs['grade_frac'] = ([1.0]*len(default_kwargs['grade_dims']) + if 'grade_dims' not in kwargs else + [1.0]*len(kwargs['grade_dims'])) + + if not kwargs.keys() <= default_kwargs.keys(): + raise TypeError(f"{__name__} got unknown keyword arguments: " + f"{kwargs.keys() - default_kwargs.keys()}") + default_kwargs.update(kwargs) + kwargs = default_kwargs + + if rank == 0: + (Path(kwargs['base_dir']) / kwargs['cluster_dir']).mkdir( + parents=True, exist_ok=True) + + if 'cube_samples' in kwargs: + _make_resume_file(loglikelihood, kwargs['prior'], **kwargs) + read_resume = kwargs['read_resume'] + kwargs['read_resume'] = True + + def wrap_loglikelihood(theta, phi): + logL = loglikelihood(theta) + try: + logL, phi[:] = logL + except TypeError: + pass + return logL + + def wrap_prior(cube, theta): + theta[:] = kwargs['prior'](cube) + + kwargs['grade_dims'] = [int(d) for d in list(kwargs['grade_dims'])] + if sum(kwargs['grade_dims']) != nDims: + raise ValueError(f"grade_dims ({sum(kwargs['grade_dims'])}) " + f"must sum to nDims ({nDims})") + kwargs['nlives'] = {float(logL): int(nlive) + for logL, nlive in kwargs['nlives'].items()} + + # Run polychord from module library + _pypolychord.run(wrap_loglikelihood, + wrap_prior, + kwargs['dumper'], + nDims, + kwargs['nDerived'], + kwargs['nlive'], + kwargs['num_repeats'], + kwargs['nprior'], + kwargs['nfail'], + kwargs['do_clustering'], + kwargs['feedback'], + kwargs['precision_criterion'], + kwargs['logzero'], + kwargs['max_ndead'], + kwargs['boost_posterior'], + kwargs['posteriors'], + kwargs['equals'], + kwargs['cluster_posteriors'], + kwargs['write_resume'], + kwargs['write_paramnames'], + kwargs['read_resume'], + kwargs['write_stats'], + kwargs['write_live'], + kwargs['write_dead'], + kwargs['write_prior'], + kwargs['maximise'], + kwargs['compression_factor'], + kwargs['synchronous'], + kwargs['base_dir'], + kwargs['file_root'], + kwargs['grade_frac'], + kwargs['grade_dims'], + kwargs['nlives'], + kwargs['seed'], + ) + + if 'cube_samples' in kwargs: + kwargs['read_resume'] = read_resume + + if paramnames is not None: + PolyChordOutput.make_paramnames_file( + paramnames, + Path(kwargs['base_dir']) / + (kwargs['file_root'] + ".paramnames")) + + try: + import anesthetic + except ImportError: + warnings.warn("anesthetic not installed. " + "Cannot return NestedSamples object.") + return + return anesthetic.read_chains( + str(Path(kwargs['base_dir']) / kwargs['file_root'])) + + + +def _make_resume_file(loglikelihood, **kwargs): import fortranformat as ff - resume_filename = os.path.join(settings.base_dir, - settings.file_root)+".resume" + resume_filename = Path(kwargs['base_dir']) / (kwargs['file_root'] + + ".resume") try: from mpi4py import MPI @@ -241,10 +663,10 @@ def make_resume_file(settings, loglikelihood, prior): size = 1 lives = [] - logL_birth = settings.logzero - for i in np.array_split(np.arange(len(settings.cube_samples)), size)[rank]: - cube = settings.cube_samples[i] - theta = prior(cube) + logL_birth = kwargs['logzero'] + for i in np.array_split(np.arange(len(kwargs['cube_samples'])), size)[rank]: + cube = kwargs['cube_samples'][i] + theta = kwargs['prior'](cube) logL = loglikelihood(theta) try: logL, derived = logL @@ -265,7 +687,7 @@ def make_resume_file(settings, loglikelihood, prior): if rank == 0: if MPI: - lives = np.reshape(recvbuf, (len(settings.cube_samples), len(lives[0]))) + lives = np.reshape(recvbuf, (len(kwargs['cube_samples']), len(lives[0]))) else: lives = np.array(lives) with open(resume_filename,"w") as f: @@ -295,11 +717,11 @@ def write(var): write('=== Number of global equally weighted posterior points ===') write(0) write('=== Number of grades ===') - write(len(settings.grade_dims)) + write(len(kwargs['grade_dims'])) write('=== positions of grades ===') - write(settings.grade_dims) + write(kwargs['grade_dims']) write('=== Number of repeats ===') - write(settings.num_repeats) + write(kwargs['num_repeats']) write('=== Number of likelihood calls ===') write(len(lives)) write('=== Number of live points in each cluster ===') @@ -315,11 +737,11 @@ def write(var): write('=== Number of weighted posterior points in each dead cluster ===') write('=== Number of equally weighted posterior points in each dead cluster ===') write('=== global evidence -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== global evidence^2 -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== posterior thin factor ===') - write(settings.boost_posterior) + write(kwargs['boost_posterior']) write('=== local loglikelihood bounds ===') write(lives[:,-1].min()) write('=== local volume -- log() ===') @@ -327,17 +749,17 @@ def write(var): write('=== last update volume ===') write(0.0) write('=== global evidence volume cross correlation -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== local evidence -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== local evidence^2 -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== local evidence volume cross correlation -- log() ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== local volume cross correlation -- log() ===') write(0.0) write('=== maximum log weights -- log(w_p) ===') - write(settings.logzero) + write(kwargs['logzero']) write('=== local dead evidence -- log() ===') write('=== local dead evidence^2 -- log() ===') write('=== maximum dead log weights -- log(w_p) ===') @@ -365,3 +787,7 @@ def write(var): write('---------------------------------------') write('=== dead equally weighted posterior points ===') write('=== global equally weighted posterior points ===') + +def _legacy_make_resume_file(settings, loglikelihood, prior): + kwargs = settings.__dict__ + _make_resume_file(loglikelihood, prior = prior, **kwargs) diff --git a/quickstart.ipynb b/quickstart.ipynb new file mode 100644 index 00000000..83b254c1 --- /dev/null +++ b/quickstart.ipynb @@ -0,0 +1,170 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "74897445", + "metadata": {}, + "outputs": [], + "source": [ + "from numpy import pi, log\n", + "import pypolychord\n", + "from pypolychord.priors import UniformPrior\n", + "try:\n", + " from mpi4py import MPI\n", + "except ImportError:\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "id": "0b27c3aa", + "metadata": {}, + "source": [ + "Define a four-dimensional spherical gaussian likelihood,\n", + " width sigma=0.1, centered on the 0 with one derived parameter.\n", + " The derived parameter is the squared radius" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9453fa2", + "metadata": {}, + "outputs": [], + "source": [ + "nDims = 4\n", + "nDerived = 1\n", + "sigma = 0.1\n", + "\n", + "def likelihood(theta):\n", + " \"\"\" Simple Gaussian Likelihood\"\"\"\n", + "\n", + " nDims = len(theta)\n", + " r2 = sum(theta**2)\n", + " logL = -log(2*pi*sigma*sigma)*nDims/2.0\n", + " logL += -r2/2/sigma/sigma\n", + "\n", + " return logL, [r2]" + ] + }, + { + "cell_type": "markdown", + "id": "a2e69376", + "metadata": {}, + "source": [ + "Define a box uniform prior from -1 to 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93de201d", + "metadata": {}, + "outputs": [], + "source": [ + "def prior(hypercube):\n", + " \"\"\" Uniform prior from [-1,1]^D. \"\"\"\n", + " return UniformPrior(-1, 1)(hypercube)" + ] + }, + { + "cell_type": "markdown", + "id": "fe13bd20", + "metadata": {}, + "source": [ + "Optional dumper function giving run-time read access to\n", + " the live points, dead points, weights and evidences" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0565d20", + "metadata": {}, + "outputs": [], + "source": [ + "def dumper(live, dead, logweights, logZ, logZerr):\n", + " print(\"Last dead point:\", dead[-1])" + ] + }, + { + "cell_type": "markdown", + "id": "f9ed3ce3", + "metadata": {}, + "source": [ + "Parameter names" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1442195", + "metadata": {}, + "outputs": [], + "source": [ + "#! This is a list of tuples (label, latex)\n", + "#! Derived parameters should be followed by a *\n", + "\n", + "paramnames = [(f'p{i}', f'\\\\theta_{i}') for i in range(nDims)]\n", + "paramnames += [('r*', 'r')]" + ] + }, + { + "cell_type": "markdown", + "id": "58c57e67", + "metadata": {}, + "source": [ + "Run PolyChord" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2dbb2bb", + "metadata": {}, + "outputs": [], + "source": [ + "output = pypolychord.run(\n", + " likelihood,\n", + " nDims,\n", + " nDerived=nDerived,\n", + " prior=prior,\n", + " dumper=dumper,\n", + " file_root='gaussian',\n", + " nlive=200,\n", + " do_clustering=True,\n", + " read_resume=False,\n", + " paramnames=paramnames,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "a8c1be16", + "metadata": {}, + "source": [ + "Make an anesthetic plot" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39356e18", + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " from anesthetic import make_2d_axes\n", + " fig, ax = make_2d_axes(['p0', 'p1', 'p2', 'p3', 'r'])\n", + " output.plot_2d(ax)\n", + " fig.savefig('posterior.pdf')\n", + "except ImportError:\n", + " print(\"Install anesthetic for plotting examples.\")" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/quickstart.py b/quickstart.py new file mode 100644 index 00000000..d0f9a700 --- /dev/null +++ b/quickstart.py @@ -0,0 +1,70 @@ +from numpy import pi, log +import pypolychord +from pypolychord.priors import UniformPrior +try: + from mpi4py import MPI +except ImportError: + pass + + +#| Define a four-dimensional spherical gaussian likelihood, +#| width sigma=0.1, centered on the 0 with one derived parameter. +#| The derived parameter is the squared radius + +nDims = 4 +nDerived = 1 +sigma = 0.1 + +def likelihood(theta): + """ Simple Gaussian Likelihood""" + + nDims = len(theta) + r2 = sum(theta**2) + logL = -log(2*pi*sigma*sigma)*nDims/2.0 + logL += -r2/2/sigma/sigma + + return logL, [r2] + +#| Define a box uniform prior from -1 to 1 + +def prior(hypercube): + """ Uniform prior from [-1,1]^D. """ + return UniformPrior(-1, 1)(hypercube) + +#| Optional dumper function giving run-time read access to +#| the live points, dead points, weights and evidences + +def dumper(live, dead, logweights, logZ, logZerr): + print("Last dead point:", dead[-1]) + +#| Parameter names +#! This is a list of tuples (label, latex) +#! Derived parameters should be followed by a * + +paramnames = [(f'p{i}', f'\\theta_{i}') for i in range(nDims)] +paramnames += [('r*', 'r')] + +#| Run PolyChord + +output = pypolychord.run( + likelihood, + nDims, + nDerived=nDerived, + prior=prior, + dumper=dumper, + file_root='gaussian', + nlive=200, + do_clustering=True, + read_resume=False, + paramnames=paramnames, +) + +#| Make an anesthetic plot + +try: + from anesthetic import make_2d_axes + fig, ax = make_2d_axes(['p0', 'p1', 'p2', 'p3', 'r']) + output.plot_2d(ax) + fig.savefig('posterior.pdf') +except ImportError: + print("Install anesthetic for plotting examples.") diff --git a/run_pypolychord.ipynb b/run_pypolychord.ipynb index 752e8977..f63aa1a5 100644 --- a/run_pypolychord.ipynb +++ b/run_pypolychord.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "560e9bea", + "id": "4d1fc41a", "metadata": {}, "outputs": [], "source": [ @@ -19,7 +19,7 @@ }, { "cell_type": "markdown", - "id": "7fe34a27", + "id": "1ca223cf", "metadata": {}, "source": [ "Define a four-dimensional spherical gaussian likelihood,\n", @@ -30,7 +30,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b1d5bdd6", + "id": "a6e26034", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +51,7 @@ }, { "cell_type": "markdown", - "id": "99ae9f31", + "id": "30f9057e", "metadata": {}, "source": [ "Define a box uniform prior from -1 to 1" @@ -60,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18e285b9", + "id": "36df3068", "metadata": {}, "outputs": [], "source": [ @@ -71,7 +71,7 @@ }, { "cell_type": "markdown", - "id": "1845f6ae", + "id": "0866528b", "metadata": {}, "source": [ "Optional dumper function giving run-time read access to\n", @@ -81,7 +81,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ccb9f8ad", + "id": "60a6b0d0", "metadata": {}, "outputs": [], "source": [ @@ -91,7 +91,7 @@ }, { "cell_type": "markdown", - "id": "2a8241b7", + "id": "52d9ce7b", "metadata": {}, "source": [ "Initialise the settings" @@ -100,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6f68e8df", + "id": "fbacdc2b", "metadata": {}, "outputs": [], "source": [ @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "4e423ddc", + "id": "9151acb4", "metadata": {}, "source": [ "Run PolyChord" @@ -122,7 +122,7 @@ { "cell_type": "code", "execution_count": null, - "id": "44304793", + "id": "c0bcadad", "metadata": {}, "outputs": [], "source": [ @@ -131,7 +131,7 @@ }, { "cell_type": "markdown", - "id": "3afc5182", + "id": "ac1c6942", "metadata": {}, "source": [ "Create a paramnames file" @@ -140,7 +140,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32f7ff6e", + "id": "1b947d71", "metadata": {}, "outputs": [], "source": [ @@ -151,7 +151,7 @@ }, { "cell_type": "markdown", - "id": "62152912", + "id": "94b3acd9", "metadata": {}, "source": [ "Make an anesthetic plot (could also use getdist)" @@ -160,16 +160,16 @@ { "cell_type": "code", "execution_count": null, - "id": "5302afe6", + "id": "070c235e", "metadata": {}, "outputs": [], "source": [ "try:\n", - " from matplotlib import pyplot as plt\n", - " from anesthetic import read_chains\n", - " samples = read_chains(settings.base_dir + '/' + settings.file_root)\n", - " samples.plot_2d(['p0','p1','p2','p3','r'])\n", - " plt.savefig('posterior.pdf')\n", + " import anesthetic as ac\n", + " samples = ac.read_chains(settings.base_dir + '/' + settings.file_root)\n", + " fig, axes = ac.make_2d_axes(['p0', 'p1', 'p2', 'p3', 'r'])\n", + " samples.plot_2d(axes)\n", + " fig.savefig('posterior.pdf')\n", "\n", "except ImportError:\n", " try:\n", diff --git a/run_pypolychord.py b/run_pypolychord.py old mode 100755 new mode 100644 index 44781aa4..43424f63 --- a/run_pypolychord.py +++ b/run_pypolychord.py @@ -58,11 +58,11 @@ def dumper(live, dead, logweights, logZ, logZerr): #| Make an anesthetic plot (could also use getdist) try: - from matplotlib import pyplot as plt - from anesthetic import read_chains - samples = read_chains(settings.base_dir + '/' + settings.file_root) - samples.plot_2d(['p0','p1','p2','p3','r']) - plt.savefig('posterior.pdf') + import anesthetic as ac + samples = ac.read_chains(settings.base_dir + '/' + settings.file_root) + fig, axes = ac.make_2d_axes(['p0', 'p1', 'p2', 'p3', 'r']) + samples.plot_2d(axes) + fig.savefig('posterior.pdf') except ImportError: try: diff --git a/src/polychord/feedback.f90 b/src/polychord/feedback.f90 index 40f6e35b..370c1c38 100644 --- a/src/polychord/feedback.f90 +++ b/src/polychord/feedback.f90 @@ -28,8 +28,8 @@ subroutine write_opening_statement(settings) write(stdout_unit,'("")') write(stdout_unit,'("PolyChord: Next Generation Nested Sampling")') write(stdout_unit,'("copyright: Will Handley, Mike Hobson & Anthony Lasenby")') - write(stdout_unit,'(" version: 1.21.4")') - write(stdout_unit,'(" release: 9th Jan 2024")') + write(stdout_unit,'(" version: 1.22.0")') + write(stdout_unit,'(" release: 10th Jan 2024")') write(stdout_unit,'(" email: wh260@mrao.cam.ac.uk")') write(stdout_unit,'("")') end if diff --git a/tests/test_run_pypolychord.py b/tests/test_run_pypolychord.py index 9f9a5175..6852da37 100644 --- a/tests/test_run_pypolychord.py +++ b/tests/test_run_pypolychord.py @@ -1,5 +1,7 @@ import pytest import numpy as np +from mpi4py import MPI +import anesthetic as ac import pypolychord from pypolychord.settings import PolyChordSettings from pypolychord.priors import UniformPrior @@ -18,6 +20,8 @@ def gaussian_likelihood(theta): return logL, [r2] +uniform_prior = UniformPrior(-1, 1) + default_settings = PolyChordSettings(4, 1) default_settings.file_root = 'settings' default_settings.nlive = 200 @@ -36,12 +40,7 @@ def gaussian_likelihood(theta): @pytest.mark.parametrize("settings, likelihood, nDims, nDerived", [(default_settings, gaussian_likelihood, 4, 1), (cube_samples_settings, gaussian_likelihood, 4, 1)]) -def test_run(settings, likelihood, nDims, nDerived): - # Define a box uniform prior from -1 to 1 - def prior(hypercube): - """ Uniform prior from [-1,1]^D. """ - return UniformPrior(-1, 1)(hypercube) - +def test_run_polychord(settings, likelihood, nDims, nDerived): # Optional dumper function giving run-time read access to # the live points, dead points, weights and evidences @@ -50,8 +49,8 @@ def dumper(live, dead, logweights, logZ, logZerr): # Run PolyChord - print("Running PolyChord") - output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings, prior, dumper) + output = pypolychord.run_polychord(likelihood, nDims, nDerived, + settings, uniform_prior, dumper) # Create a paramnames file @@ -60,33 +59,72 @@ def dumper(live, dead, logweights, logZ, logZerr): output.make_paramnames_files(paramnames) -@pytest.mark.parametrize("settings, likelihood, nDims, nDerived", - [(default_settings, gaussian_likelihood, 4, 1), - (cube_samples_settings, gaussian_likelihood, 4, 1)]) -@pytest.mark.mpi -def test_run_mpi(settings, likelihood, nDims, nDerived): - from mpi4py import MPI +@pytest.mark.parametrize("likelihood, nDims, nDerived", + [(gaussian_likelihood, 4, 1),]) +def test_run(likelihood, nDims, nDerived): + # Create paramnames + paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)] + paramnames += [('r*', 'r')] - settings.file_root += '_mpi' + # Run PolyChord - # Define a box uniform prior from -1 to 1 - def prior(hypercube): - """ Uniform prior from [-1,1]^D. """ - return UniformPrior(-1, 1)(hypercube) + ns = pypolychord.run(likelihood, nDims, nDerived=nDerived, + prior=uniform_prior, paramnames=paramnames, + read_resume=False) + assert isinstance(ns, ac.NestedSamples) - # Optional dumper function giving run-time read access to - # the live points, dead points, weights and evidences - def dumper(live, dead, logweights, logZ, logZerr): - print("Last dead point:", dead[-1]) +@pytest.mark.parametrize("seed", [-1, 0, 1, 2]) +def test_seed(seed): + # Create paramnames + paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(4)] + paramnames += [('r*', 'r')] - # Run PolyChord + # Run PolyChord twice + ns0 = pypolychord.run(gaussian_likelihood, 4, nDerived=1, + prior=uniform_prior, paramnames=paramnames, + read_resume=False, seed=seed) + ns1 = pypolychord.run(gaussian_likelihood, 4, nDerived=1, + prior=uniform_prior, paramnames=paramnames, + read_resume=False, seed=seed) + assert ns0.equals(ns1) != (seed < 0) - print("Running PolyChord") - output = pypolychord.run_polychord(likelihood, nDims, nDerived, settings, prior, dumper) - # Create a paramnames file +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_no_derived(): + def no_derived_gaussian_likelihood(theta): + """ Simple Gaussian Likelihood without derived parameters""" - paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(nDims)] + sigma = 0.1 + + nDims = len(theta) + r2 = sum(theta**2) + logL = -np.log(2*np.pi*sigma*sigma)*nDims/2.0 + logL += -r2/2/sigma/sigma + + return logL + + seed = 1 + paramnames = [('p%i' % i, r'\theta_%i' % i) for i in range(4)] + + # Run without derived parameters + ns0 = pypolychord.run(no_derived_gaussian_likelihood, 4, + prior=uniform_prior, paramnames=paramnames, + read_resume=False, seed=seed) + # Run with derived parameters paramnames += [('r*', 'r')] - output.make_paramnames_files(paramnames) + ns1 = pypolychord.run(gaussian_likelihood, 4, nDerived=1, + prior=uniform_prior, paramnames=paramnames, + read_resume=False, seed=seed) + assert ns0.equals(ns1.drop(columns='r')) + + +def test_grade_dims(): + grade_dims = [1, 3] + pypolychord.run(gaussian_likelihood, 4, nDerived=1, + prior=uniform_prior, read_resume=False, + grade_dims=grade_dims) + with pytest.raises(ValueError): + pypolychord.run(gaussian_likelihood, 5, nDerived=1, + prior=uniform_prior, read_resume=False, + grade_dims=grade_dims)