diff --git a/Makefile b/Makefile index 8240a8ab3..98ce7fc6a 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,8 @@ help: @echo "coverage - check code coverage quickly with the default Python" @echo "docs - generate Sphinx HTML documentation, including API docs" @echo "sdist - package" + @echo "black - run black formatter" + @echo "docformatter - run docformatter" clean: clean-build clean-pyc @@ -26,6 +28,8 @@ clean-pyc: lint: flake8 lenstronomy test + black . + docformatter -r ./* --black --in-place test: py.test diff --git a/README.rst b/README.rst index cf4cd0bd3..850837e0f 100644 --- a/README.rst +++ b/README.rst @@ -25,6 +25,15 @@ .. image:: https://img.shields.io/badge/arXiv-1803.09746%20-yellowgreen.svg :target: https://arxiv.org/abs/1803.09746 +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + +.. image:: https://img.shields.io/badge/%20formatter-docformatter-fedcba.svg + :target: https://github.com/PyCQA/docformatter + +.. image:: https://img.shields.io/badge/%20style-sphinx-0a507a.svg + :target: https://www.sphinx-doc.org/en/master/usage/index.html + .. .. image:: https://raw.githubusercontent.com/lenstronomy/lenstronomy/main/docs/figures/readme_fig.png :target: https://raw.githubusercontent.com/lenstronomy/lenstronomy/main/docs/figures/readme_fig.png diff --git a/docs/check_sphinx.py b/docs/check_sphinx.py index 4707828fc..ebdfade82 100644 --- a/docs/check_sphinx.py +++ b/docs/check_sphinx.py @@ -1,20 +1,22 @@ -''' -Created on Dec 2, 2013 +"""Created on Dec 2, 2013. @author: jakeret -''' +""" import py import subprocess + + def test_linkcheck(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call( - ["sphinx-build", "-blinkcheck", - "-d", str(doctrees), ".", str(htmldir)]) - + ["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)] + ) + + def test_build_docs(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") - subprocess.check_call([ - "sphinx-build", "-bhtml", - "-d", str(doctrees), ".", str(htmldir)]) \ No newline at end of file + subprocess.check_call( + ["sphinx-build", "-bhtml", "-d", str(doctrees), ".", str(htmldir)] + ) diff --git a/docs/conf.py b/docs/conf.py index 4b38aae57..903490870 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,7 +16,7 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) cwd = os.getcwd() parent = os.path.dirname(cwd) @@ -27,33 +27,37 @@ # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']\ -# , 'sphinx.ext.autosectionlabel'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", +] # , 'sphinx.ext.autosectionlabel'] autodoc_default_options = { - 'member-order': 'bysource', - 'special-members': '__init__', + "member-order": "bysource", + "special-members": "__init__", } # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The main toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'lenstronomy' -copyright = u'2018-2022, lenstronomy developers' +project = "lenstronomy" +copyright = "2018-2022, lenstronomy developers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -66,23 +70,23 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). @@ -93,79 +97,79 @@ # show_authors = False # changed from default option # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True @@ -173,67 +177,61 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'lenstronomydoc' +htmlhelp_basename = "lenstronomydoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'lenstronomy.tex', u'lenstronomy Documentation', - u'Simon Birrer', 'manual'), + ("index", "lenstronomy.tex", "lenstronomy Documentation", "Simon Birrer", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'lenstronomy', u'lenstronomy Documentation', - [u'Simon Birrer'], 1) -] +man_pages = [("index", "lenstronomy", "lenstronomy Documentation", ["Simon Birrer"], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -242,19 +240,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'lenstronomy', u'lenstronomy Documentation', - u'Simon Birrer', 'lenstronomy', 'Multi-purpose gravitational lens modeling package.', - 'Miscellaneous'), + ( + "index", + "lenstronomy", + "lenstronomy Documentation", + "Simon Birrer", + "lenstronomy", + "Multi-purpose gravitational lens modeling package.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False \ No newline at end of file +# texinfo_no_detailmenu = False diff --git a/lenstronomy/Analysis/image_reconstruction.py b/lenstronomy/Analysis/image_reconstruction.py index 9568ab7b6..da7613fab 100644 --- a/lenstronomy/Analysis/image_reconstruction.py +++ b/lenstronomy/Analysis/image_reconstruction.py @@ -5,24 +5,32 @@ from lenstronomy.ImSim.MultiBand.single_band_multi_model import SingleBandMultiModel from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class MultiBandImageReconstruction(object): - """ - this class manages the output/results of a fitting process and can conveniently access image reconstruction - properties in multi-band fitting. - In particular, the fitting result does not come with linear inversion parameters (which may or may not be joint - or different for multiple bands) and this class performs the linear inversion for the surface brightness amplitudes - and stores them for each individual band to be accessible by the user. - - This class is the backbone of the ModelPlot routine that provides the interface of this class with plotting and - illustration routines. + """This class manages the output/results of a fitting process and can conveniently + access image reconstruction properties in multi-band fitting. In particular, the + fitting result does not come with linear inversion parameters (which may or may not + be joint or different for multiple bands) and this class performs the linear + inversion for the surface brightness amplitudes and stores them for each individual + band to be accessible by the user. + + This class is the backbone of the ModelPlot routine that provides the interface of + this class with plotting and illustration routines. """ - def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type='multi-linear', - kwargs_likelihood=None, verbose=True): + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="multi-linear", + kwargs_likelihood=None, + verbose=True, + ): """ :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] @@ -43,71 +51,104 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type # reconstruction if kwargs_likelihood is None: kwargs_likelihood = {} - image_likelihood_mask_list = kwargs_likelihood.get('image_likelihood_mask_list', None) - source_marg = kwargs_likelihood.get('source_marg', False) - linear_prior = kwargs_likelihood.get('linear_prior', None) - bands_compute = kwargs_likelihood.get('bands_compute', None) + image_likelihood_mask_list = kwargs_likelihood.get( + "image_likelihood_mask_list", None + ) + source_marg = kwargs_likelihood.get("source_marg", False) + linear_prior = kwargs_likelihood.get("linear_prior", None) + bands_compute = kwargs_likelihood.get("bands_compute", None) if bands_compute is None: bands_compute = [True] * len(multi_band_list) - if multi_band_type == 'single-band': - multi_band_type = 'multi-linear' # this makes sure that the linear inversion outputs are coming in a list - self._imageModel = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, - bands_compute=bands_compute, - image_likelihood_mask_list=image_likelihood_mask_list) + if multi_band_type == "single-band": + multi_band_type = "multi-linear" # this makes sure that the linear inversion outputs are coming in a list + self._imageModel = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=bands_compute, + image_likelihood_mask_list=image_likelihood_mask_list, + ) # here we perform the (joint) linear inversion with all data - model, error_map, cov_param, param = self._imageModel.image_linear_solve(inv_bool=True, **kwargs_params) + model, error_map, cov_param, param = self._imageModel.image_linear_solve( + inv_bool=True, **kwargs_params + ) check_solver_error(param) if verbose: - logL = self._imageModel.likelihood_data_given_model(source_marg=source_marg, linear_prior=linear_prior, - **kwargs_params) + logL = self._imageModel.likelihood_data_given_model( + source_marg=source_marg, linear_prior=linear_prior, **kwargs_params + ) n_data = self._imageModel.num_data_evaluate if n_data > 0: - print(logL * 2 / n_data, 'reduced X^2 of all evaluated imaging data combined.') + print( + logL * 2 / n_data, + "reduced X^2 of all evaluated imaging data combined.", + ) self.model_band_list = [] for i in range(len(multi_band_list)): if bands_compute[i] is True: - if multi_band_type == 'joint-linear': + if multi_band_type == "joint-linear": param_i = param cov_param_i = cov_param else: param_i = param[i] cov_param_i = cov_param[i] - model_band = ModelBand(multi_band_list, kwargs_model, model[i], error_map[i], cov_param_i, - param_i, copy.deepcopy(kwargs_params), - image_likelihood_mask_list=image_likelihood_mask_list, band_index=i, - verbose=verbose) + model_band = ModelBand( + multi_band_list, + kwargs_model, + model[i], + error_map[i], + cov_param_i, + param_i, + copy.deepcopy(kwargs_params), + image_likelihood_mask_list=image_likelihood_mask_list, + band_index=i, + verbose=verbose, + ) self.model_band_list.append(model_band) else: self.model_band_list.append(None) def band_setup(self, band_index=0): - """ - ImageModel() instance and keyword arguments of the model components to execute all the options of the ImSim - core modules. + """ImageModel() instance and keyword arguments of the model components to + execute all the options of the ImSim core modules. - :param band_index: integer (>=0) of imaging band in order of multi_band_list input to this class + :param band_index: integer (>=0) of imaging band in order of multi_band_list + input to this class :return: ImageModel() instance and keyword arguments of the model """ i = int(band_index) if self.model_band_list[i] is None: raise ValueError("band %s is not computed or out of range." % i) - return self.model_band_list[i].image_model_class, self.model_band_list[i].kwargs_model + return ( + self.model_band_list[i].image_model_class, + self.model_band_list[i].kwargs_model, + ) @export class ModelBand(object): - """ - class to plot a single band given the full modeling results - This class has its specific role when the linear inference is performed on the joint band level and/or when only - a subset of model components get used for this specific band in the modeling. + """Class to plot a single band given the full modeling results This class has its + specific role when the linear inference is performed on the joint band level and/or + when only a subset of model components get used for this specific band in the + modeling.""" - """ - def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, param, kwargs_params, - image_likelihood_mask_list=None, band_index=0, verbose=True): + def __init__( + self, + multi_band_list, + kwargs_model, + model, + error_map, + cov_param, + param, + kwargs_params, + image_likelihood_mask_list=None, + band_index=0, + verbose=True, + ): """ :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] @@ -124,16 +165,39 @@ def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, p :param verbose: if True (default), prints the reduced chi2 value for the current band. """ - self._bandmodel = SingleBandMultiModel(multi_band_list, kwargs_model, - likelihood_mask_list=image_likelihood_mask_list, band_index=band_index) - self._kwargs_special_partial = kwargs_params.get('kwargs_special', None) - self._kwargs_lens = kwargs_params.get('kwargs_lens', None) - kwarks_lens_partial, kwargs_source_partial, kwargs_lens_light_partial, kwargs_ps_partial, self._kwargs_extinction_partial = self._bandmodel.select_kwargs(**kwargs_params) - self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, self._kwargs_ps_partial = self._bandmodel._update_linear_kwargs(param, kwarks_lens_partial, kwargs_source_partial, kwargs_lens_light_partial, kwargs_ps_partial) + self._bandmodel = SingleBandMultiModel( + multi_band_list, + kwargs_model, + likelihood_mask_list=image_likelihood_mask_list, + band_index=band_index, + ) + self._kwargs_special_partial = kwargs_params.get("kwargs_special", None) + self._kwargs_lens = kwargs_params.get("kwargs_lens", None) + ( + kwarks_lens_partial, + kwargs_source_partial, + kwargs_lens_light_partial, + kwargs_ps_partial, + self._kwargs_extinction_partial, + ) = self._bandmodel.select_kwargs(**kwargs_params) + ( + self._kwargs_lens_partial, + self._kwargs_source_partial, + self._kwargs_lens_light_partial, + self._kwargs_ps_partial, + ) = self._bandmodel._update_linear_kwargs( + param, + kwarks_lens_partial, + kwargs_source_partial, + kwargs_lens_light_partial, + kwargs_ps_partial, + ) # this is an (out-commented) example of how to re-create the model in this band # model_new = self.bandmodel.image(self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, self._kwargs_ps_partial, self._kwargs_special_partial, self._kwargs_extinction_partial) - self._norm_residuals = self._bandmodel.reduced_residuals(model, error_map=error_map) + self._norm_residuals = self._bandmodel.reduced_residuals( + model, error_map=error_map + ) self._reduced_x2 = self._bandmodel.reduced_chi2(model, error_map=error_map) if verbose: print("reduced chi^2 of data ", band_index, "= ", self._reduced_x2) @@ -161,8 +225,8 @@ def norm_residuals(self): @property def image_model_class(self): - """ - ImageModel() class instance of the single band with only the model components applied to this band + """ImageModel() class instance of the single band with only the model components + applied to this band. :return: SingleBandMultiModel() instance, which inherits the ImageModel instance """ @@ -175,10 +239,14 @@ def kwargs_model(self): :return: keyword argument of keyword argument lists of the different model components selected for the imaging band, including linear amplitudes. These format matches the image_model_class() return """ - kwargs_return = {'kwargs_lens': self._kwargs_lens_partial, 'kwargs_source': self._kwargs_source_partial, - 'kwargs_lens_light': self._kwargs_lens_light_partial, 'kwargs_ps': self._kwargs_ps_partial, - 'kwargs_special': self._kwargs_special_partial, - 'kwargs_extinction': self._kwargs_extinction_partial} + kwargs_return = { + "kwargs_lens": self._kwargs_lens_partial, + "kwargs_source": self._kwargs_source_partial, + "kwargs_lens_light": self._kwargs_lens_light_partial, + "kwargs_ps": self._kwargs_ps_partial, + "kwargs_special": self._kwargs_special_partial, + "kwargs_extinction": self._kwargs_extinction_partial, + } return kwargs_return @@ -191,8 +259,10 @@ def check_solver_error(image): """ result = np.all(image == 0) if result: - Warning('Linear inversion of surface brightness components did not result in a unique solution.' - 'All linear amplitude parameters are set =0 instead. Please check whether ' - 'a) there are too many basis functions in the model, ' - 'or b) some linear basis sets are outside of the image/likelihood mask.') + Warning( + "Linear inversion of surface brightness components did not result in a unique solution." + "All linear amplitude parameters are set =0 instead. Please check whether " + "a) there are too many basis functions in the model, " + "or b) some linear basis sets are outside of the image/likelihood mask." + ) return result diff --git a/lenstronomy/Analysis/kinematics_api.py b/lenstronomy/Analysis/kinematics_api.py index d7d9ac5d1..dc28f3b30 100644 --- a/lenstronomy/Analysis/kinematics_api.py +++ b/lenstronomy/Analysis/kinematics_api.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import copy @@ -11,20 +11,36 @@ from lenstronomy.Analysis.light_profile import LightProfileAnalysis import lenstronomy.Util.multi_gauss_expansion as mge -__all__ = ['KinematicsAPI'] +__all__ = ["KinematicsAPI"] class KinematicsAPI(object): - """ - this class contains routines to compute time delays, magnification ratios, line of sight velocity dispersions etc - for a given lens model - """ + """This class contains routines to compute time delays, magnification ratios, line + of sight velocity dispersions etc for a given lens model.""" - def __init__(self, z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing, anisotropy_model, cosmo=None, - lens_model_kinematics_bool=None, light_model_kinematics_bool=None, multi_observations=False, - kwargs_numerics_galkin=None, analytic_kinematics=False, Hernquist_approx=False, MGE_light=False, - MGE_mass=False, kwargs_mge_light=None, kwargs_mge_mass=None, sampling_number=1000, - num_kin_sampling=1000, num_psf_sampling=100): + def __init__( + self, + z_lens, + z_source, + kwargs_model, + kwargs_aperture, + kwargs_seeing, + anisotropy_model, + cosmo=None, + lens_model_kinematics_bool=None, + light_model_kinematics_bool=None, + multi_observations=False, + kwargs_numerics_galkin=None, + analytic_kinematics=False, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + kwargs_mge_light=None, + kwargs_mge_mass=None, + sampling_number=1000, + num_kin_sampling=1000, + num_psf_sampling=100, + ): """ :param z_lens: redshift of lens @@ -65,13 +81,22 @@ def __init__(self, z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seein self._kwargs_aperture_kin = kwargs_aperture self._kwargs_psf_kin = kwargs_seeing self.lensCosmo = LensCosmo(z_lens, z_source, cosmo=cosmo) - self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, extinction_class = class_creator.create_class_instances( - all_models=True, **kwargs_model) + ( + self.LensModel, + self.SourceModel, + self.LensLightModel, + self.PointSource, + extinction_class, + ) = class_creator.create_class_instances(all_models=True, **kwargs_model) self._lensLightProfile = LightProfileAnalysis(light_model=self.LensLightModel) self._lensMassProfile = LensProfileAnalysis(lens_model=self.LensModel) self._lens_light_model_list = self.LensLightModel.profile_type_list self._lens_model_list = self.LensModel.lens_model_list - self._kwargs_cosmo = {'d_d': self.lensCosmo.dd, 'd_s': self.lensCosmo.ds, 'd_ds': self.lensCosmo.dds} + self._kwargs_cosmo = { + "d_d": self.lensCosmo.dd, + "d_s": self.lensCosmo.ds, + "d_ds": self.lensCosmo.dds, + } self._lens_model_kinematics_bool = lens_model_kinematics_bool self._light_model_kinematics_bool = light_model_kinematics_bool self._sampling_number = sampling_number @@ -79,20 +104,27 @@ def __init__(self, z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seein self._num_psf_sampling = num_psf_sampling if kwargs_mge_mass is None: - self._kwargs_mge_mass = {'n_comp': 20} + self._kwargs_mge_mass = {"n_comp": 20} else: self._kwargs_mge_mass = kwargs_mge_mass if kwargs_mge_light is None: - self._kwargs_mge_light = {'grid_spacing': 0.01, 'grid_num': 100, 'n_comp': 20, 'center_x': None, - 'center_y': None} + self._kwargs_mge_light = { + "grid_spacing": 0.01, + "grid_num": 100, + "n_comp": 20, + "center_x": None, + "center_y": None, + } else: self._kwargs_mge_light = kwargs_mge_light if kwargs_numerics_galkin is None: - kwargs_numerics_galkin = {'interpol_grid_num': 1000, # numerical interpolation, should converge -> infinity - 'log_integration': True, # log or linear interpolation of surface brightness and mass models - 'max_integrate': 100, - 'min_integrate': 0.001} # lower/upper bound of numerical integrals + kwargs_numerics_galkin = { + "interpol_grid_num": 1000, # numerical interpolation, should converge -> infinity + "log_integration": True, # log or linear interpolation of surface brightness and mass models + "max_integrate": 100, + "min_integrate": 0.001, + } # lower/upper bound of numerical integrals self._kwargs_numerics_kin = kwargs_numerics_galkin self._anisotropy_model = anisotropy_model self._analytic_kinematics = analytic_kinematics @@ -101,59 +133,86 @@ def __init__(self, z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seein self._MGE_mass = MGE_mass self._multi_observations = multi_observations - def velocity_dispersion(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None, theta_E=None, - gamma=None, kappa_ext=0): - """ - API for both, analytic and numerical JAM to compute the velocity dispersion [km/s] - This routine uses the galkin_setting() routine for the Galkin configurations (see there what options and input - is relevant. + def velocity_dispersion( + self, + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=None, + theta_E=None, + gamma=None, + kappa_ext=0, + ): + """API for both, analytic and numerical JAM to compute the velocity dispersion + [km/s] This routine uses the galkin_setting() routine for the Galkin + configurations (see there what options and input is relevant. :param kwargs_lens: lens model keyword arguments :param kwargs_lens_light: lens light model keyword arguments :param kwargs_anisotropy: stellar anisotropy keyword arguments - :param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional, - if set to None will be computed in this function with default settings that may not be accurate. + :param r_eff: projected half-light radius of the stellar light associated with + the deflector galaxy, optional, if set to None will be computed in this + function with default settings that may not be accurate. :param theta_E: Einstein radius (optional) :param gamma: power-law slope (optional) :param kappa_ext: external convergence (optional) :return: velocity dispersion [km/s] """ - galkin, kwargs_profile, kwargs_light = self.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=r_eff, - theta_E=theta_E, gamma=gamma) - sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, - sampling_number=self._sampling_number) + galkin, kwargs_profile, kwargs_light = self.galkin_settings( + kwargs_lens, kwargs_lens_light, r_eff=r_eff, theta_E=theta_E, gamma=gamma + ) + sigma_v = galkin.dispersion( + kwargs_profile, + kwargs_light, + kwargs_anisotropy, + sampling_number=self._sampling_number, + ) sigma_v = self.transform_kappa_ext(sigma_v, kappa_ext=kappa_ext) return sigma_v - def velocity_dispersion_map(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None, theta_E=None, - gamma=None, kappa_ext=0): - """ - API for both, analytic and numerical JAM to compute the velocity dispersion map with IFU data [km/s] + def velocity_dispersion_map( + self, + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=None, + theta_E=None, + gamma=None, + kappa_ext=0, + ): + """API for both, analytic and numerical JAM to compute the velocity dispersion + map with IFU data [km/s] :param kwargs_lens: lens model keyword arguments :param kwargs_lens_light: lens light model keyword arguments :param kwargs_anisotropy: stellar anisotropy keyword arguments - :param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional, - if set to None will be computed in this function with default settings that may not be accurate. - :param theta_E: circularized Einstein radius, optional, if not provided will either be computed in this - function with default settings or not required + :param r_eff: projected half-light radius of the stellar light associated with + the deflector galaxy, optional, if set to None will be computed in this + function with default settings that may not be accurate. + :param theta_E: circularized Einstein radius, optional, if not provided will + either be computed in this function with default settings or not required :param gamma: power-law slope at the Einstein radius, optional :param kappa_ext: external convergence :return: velocity dispersion [km/s] """ - galkin, kwargs_profile, kwargs_light = self.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=r_eff, - theta_E=theta_E, gamma=gamma) - sigma_v_map = galkin.dispersion_map(kwargs_profile, kwargs_light, kwargs_anisotropy, - num_kin_sampling=self._num_kin_sampling, - num_psf_sampling=self._num_psf_sampling) + galkin, kwargs_profile, kwargs_light = self.galkin_settings( + kwargs_lens, kwargs_lens_light, r_eff=r_eff, theta_E=theta_E, gamma=gamma + ) + sigma_v_map = galkin.dispersion_map( + kwargs_profile, + kwargs_light, + kwargs_anisotropy, + num_kin_sampling=self._num_kin_sampling, + num_psf_sampling=self._num_psf_sampling, + ) sigma_v_map = self.transform_kappa_ext(sigma_v_map, kappa_ext=kappa_ext) return sigma_v_map def velocity_dispersion_analytical(self, theta_E, gamma, r_eff, r_ani, kappa_ext=0): - """ - computes the LOS velocity dispersion of the lens within a slit of size R_slit x dR_slit and seeing psf_fwhm. - The assumptions are a Hernquist light profile and the spherical power-law lens model at the first position and - an Osipkov and Merritt ('OM') stellar anisotropy distribution. + """Computes the LOS velocity dispersion of the lens within a slit of size R_slit + x dR_slit and seeing psf_fwhm. The assumptions are a Hernquist light profile and + the spherical power-law lens model at the first position and an Osipkov and + Merritt ('OM') stellar anisotropy distribution. Further information can be found in the AnalyticKinematics() class. @@ -164,18 +223,29 @@ def velocity_dispersion_analytical(self, theta_E, gamma, r_eff, r_ani, kappa_ext :param kappa_ext: external convergence not accounted in the lens models :return: velocity dispersion in units [km/s] """ - galkin = Galkin(kwargs_model={'anisotropy_model': 'OM'}, kwargs_aperture=self._kwargs_aperture_kin, - kwargs_psf=self._kwargs_psf_kin, kwargs_cosmo=self._kwargs_cosmo, kwargs_numerics={}, - analytic_kinematics=True) - kwargs_profile = {'theta_E': theta_E, 'gamma': gamma} - kwargs_light = {'r_eff': r_eff} - kwargs_anisotropy = {'r_ani': r_ani} - sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, - sampling_number=self._sampling_number) + galkin = Galkin( + kwargs_model={"anisotropy_model": "OM"}, + kwargs_aperture=self._kwargs_aperture_kin, + kwargs_psf=self._kwargs_psf_kin, + kwargs_cosmo=self._kwargs_cosmo, + kwargs_numerics={}, + analytic_kinematics=True, + ) + kwargs_profile = {"theta_E": theta_E, "gamma": gamma} + kwargs_light = {"r_eff": r_eff} + kwargs_anisotropy = {"r_ani": r_ani} + sigma_v = galkin.dispersion( + kwargs_profile, + kwargs_light, + kwargs_anisotropy, + sampling_number=self._sampling_number, + ) sigma_v = self.transform_kappa_ext(sigma_v, kappa_ext=kappa_ext) return sigma_v - def galkin_settings(self, kwargs_lens, kwargs_lens_light, r_eff=None, theta_E=None, gamma=None): + def galkin_settings( + self, kwargs_lens, kwargs_lens_light, r_eff=None, theta_E=None, gamma=None + ): """ :param kwargs_lens: lens model keyword argument list @@ -186,74 +256,126 @@ def galkin_settings(self, kwargs_lens, kwargs_lens_light, r_eff=None, theta_E=No :return: Galkin() instance and mass and light profiles configured for the Galkin module """ if r_eff is None: - r_eff = self._lensLightProfile.half_light_radius(kwargs_lens_light, grid_spacing=0.05, grid_num=200, - center_x=None, center_y=None, - model_bool_list=self._light_model_kinematics_bool) + r_eff = self._lensLightProfile.half_light_radius( + kwargs_lens_light, + grid_spacing=0.05, + grid_num=200, + center_x=None, + center_y=None, + model_bool_list=self._light_model_kinematics_bool, + ) if theta_E is None: - theta_E = self._lensMassProfile.effective_einstein_radius_grid(kwargs_lens, center_x=None, center_y=None, - model_bool_list=self._lens_model_kinematics_bool, - grid_num=200, grid_spacing=0.05, - get_precision=False, verbose=True) + theta_E = self._lensMassProfile.effective_einstein_radius_grid( + kwargs_lens, + center_x=None, + center_y=None, + model_bool_list=self._lens_model_kinematics_bool, + grid_num=200, + grid_spacing=0.05, + get_precision=False, + verbose=True, + ) if gamma is None and self._analytic_kinematics is True: - gamma = self._lensMassProfile.profile_slope(kwargs_lens, theta_E, center_x=None, center_y=None, - model_list_bool=self._lens_model_kinematics_bool, - num_points=10) + gamma = self._lensMassProfile.profile_slope( + kwargs_lens, + theta_E, + center_x=None, + center_y=None, + model_list_bool=self._lens_model_kinematics_bool, + num_points=10, + ) - mass_profile_list, kwargs_profile = self.kinematic_lens_profiles(kwargs_lens, - MGE_fit=self._MGE_mass, theta_E=theta_E, - model_kinematics_bool=self._lens_model_kinematics_bool, - kwargs_mge=self._kwargs_mge_mass, gamma=gamma, - analytic_kinematics=self._analytic_kinematics) - light_profile_list, kwargs_light = self.kinematic_light_profile(kwargs_lens_light, - r_eff=r_eff, - MGE_fit=self._MGE_light, kwargs_mge=self._kwargs_mge_light, - model_kinematics_bool=self._light_model_kinematics_bool, - Hernquist_approx=self._Hernquist_approx, - analytic_kinematics=self._analytic_kinematics) - kwargs_model = {'mass_profile_list': mass_profile_list, 'light_profile_list': light_profile_list, - 'anisotropy_model': self._anisotropy_model} + mass_profile_list, kwargs_profile = self.kinematic_lens_profiles( + kwargs_lens, + MGE_fit=self._MGE_mass, + theta_E=theta_E, + model_kinematics_bool=self._lens_model_kinematics_bool, + kwargs_mge=self._kwargs_mge_mass, + gamma=gamma, + analytic_kinematics=self._analytic_kinematics, + ) + light_profile_list, kwargs_light = self.kinematic_light_profile( + kwargs_lens_light, + r_eff=r_eff, + MGE_fit=self._MGE_light, + kwargs_mge=self._kwargs_mge_light, + model_kinematics_bool=self._light_model_kinematics_bool, + Hernquist_approx=self._Hernquist_approx, + analytic_kinematics=self._analytic_kinematics, + ) + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": self._anisotropy_model, + } if self._multi_observations is True: - galkin = GalkinMultiObservation(kwargs_model=kwargs_model, kwargs_aperture_list=self._kwargs_aperture_kin, - kwargs_psf_list=self._kwargs_psf_kin, kwargs_cosmo=self._kwargs_cosmo, - kwargs_numerics=self._kwargs_numerics_kin, - analytic_kinematics=self._analytic_kinematics) - elif self._kwargs_aperture_kin['aperture_type'] == 'IFU_shells' and not self._analytic_kinematics: - galkin = GalkinShells(kwargs_model=kwargs_model, kwargs_aperture=self._kwargs_aperture_kin, - kwargs_psf=self._kwargs_psf_kin, kwargs_cosmo=self._kwargs_cosmo, - kwargs_numerics=self._kwargs_numerics_kin, analytic_kinematics=self._analytic_kinematics) + galkin = GalkinMultiObservation( + kwargs_model=kwargs_model, + kwargs_aperture_list=self._kwargs_aperture_kin, + kwargs_psf_list=self._kwargs_psf_kin, + kwargs_cosmo=self._kwargs_cosmo, + kwargs_numerics=self._kwargs_numerics_kin, + analytic_kinematics=self._analytic_kinematics, + ) + elif ( + self._kwargs_aperture_kin["aperture_type"] == "IFU_shells" + and not self._analytic_kinematics + ): + galkin = GalkinShells( + kwargs_model=kwargs_model, + kwargs_aperture=self._kwargs_aperture_kin, + kwargs_psf=self._kwargs_psf_kin, + kwargs_cosmo=self._kwargs_cosmo, + kwargs_numerics=self._kwargs_numerics_kin, + analytic_kinematics=self._analytic_kinematics, + ) else: - galkin = Galkin(kwargs_model=kwargs_model, kwargs_aperture=self._kwargs_aperture_kin, - kwargs_psf=self._kwargs_psf_kin, kwargs_cosmo=self._kwargs_cosmo, - kwargs_numerics=self._kwargs_numerics_kin, analytic_kinematics=self._analytic_kinematics) + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=self._kwargs_aperture_kin, + kwargs_psf=self._kwargs_psf_kin, + kwargs_cosmo=self._kwargs_cosmo, + kwargs_numerics=self._kwargs_numerics_kin, + analytic_kinematics=self._analytic_kinematics, + ) return galkin, kwargs_profile, kwargs_light - def kinematic_lens_profiles(self, kwargs_lens, MGE_fit=False, model_kinematics_bool=None, theta_E=None, gamma=None, - kwargs_mge=None, analytic_kinematics=False): - """ - translates the lenstronomy lens and mass profiles into a (sub) set of profiles that are compatible with the - GalKin module to compute the kinematics thereof. - The requirement is that the - profiles are centered at (0, 0) and that for all profile types there exists a 3d de-projected analytical - representation. + def kinematic_lens_profiles( + self, + kwargs_lens, + MGE_fit=False, + model_kinematics_bool=None, + theta_E=None, + gamma=None, + kwargs_mge=None, + analytic_kinematics=False, + ): + """Translates the lenstronomy lens and mass profiles into a (sub) set of + profiles that are compatible with the GalKin module to compute the kinematics + thereof. The requirement is that the profiles are centered at (0, 0) and that + for all profile types there exists a 3d de-projected analytical representation. :param kwargs_lens: lens model parameters :param MGE_fit: bool, if true performs the MGE for the mass distribution - :param model_kinematics_bool: bool list of length of the lens model. Only takes a subset of all the models - as part of the kinematics computation (can be used to ignore substructure, shear etc that do not describe the - main deflector potential - :param theta_E: (optional float) estimate of the Einstein radius. If present, does not numerically compute this - quantity in this routine numerically + :param model_kinematics_bool: bool list of length of the lens model. Only takes + a subset of all the models as part of the kinematics computation (can be + used to ignore substructure, shear etc that do not describe the main + deflector potential + :param theta_E: (optional float) estimate of the Einstein radius. If present, + does not numerically compute this quantity in this routine numerically :param gamma: local power-law slope at the Einstein radius (optional) :param kwargs_mge: keyword arguments that go into the MGE decomposition routine - :param analytic_kinematics: bool, if True, solves the Jeans equation analytically for the - power-law mass profile with Hernquist light profile + :param analytic_kinematics: bool, if True, solves the Jeans equation + analytically for the power-law mass profile with Hernquist light profile :return: mass_profile_list, keyword argument list """ if analytic_kinematics is True: if gamma is None or theta_E is None: - raise ValueError('power-law slope and Einstein radius must be set to allow for analytic kinematics to ' - 'be computed!') - return None, {'theta_E': theta_E, 'gamma': gamma} + raise ValueError( + "power-law slope and Einstein radius must be set to allow for analytic kinematics to " + "be computed!" + ) + return None, {"theta_E": theta_E, "gamma": gamma} mass_profile_list = [] kwargs_profile = [] if model_kinematics_bool is None: @@ -261,96 +383,154 @@ def kinematic_lens_profiles(self, kwargs_lens, MGE_fit=False, model_kinematics_b for i, lens_model in enumerate(self._lens_model_list): if model_kinematics_bool[i] is True: mass_profile_list.append(lens_model) - if lens_model in ['INTERPOL', 'INTERPOL_SCLAED']: - center_x_i, center_y_i = self._lensMassProfile.convergence_peak(kwargs_lens, model_bool_list=i, - grid_num=200, grid_spacing=0.01, - center_x_init=0, center_y_init=0) + if lens_model in ["INTERPOL", "INTERPOL_SCLAED"]: + center_x_i, center_y_i = self._lensMassProfile.convergence_peak( + kwargs_lens, + model_bool_list=i, + grid_num=200, + grid_spacing=0.01, + center_x_init=0, + center_y_init=0, + ) kwargs_lens_i = copy.deepcopy(kwargs_lens[i]) - kwargs_lens_i['grid_interp_x'] -= center_x_i - kwargs_lens_i['grid_interp_y'] -= center_y_i + kwargs_lens_i["grid_interp_x"] -= center_x_i + kwargs_lens_i["grid_interp_y"] -= center_y_i else: - kwargs_lens_i = {k: v for k, v in kwargs_lens[i].items() if not k in ['center_x', 'center_y']} + kwargs_lens_i = { + k: v + for k, v in kwargs_lens[i].items() + if not k in ["center_x", "center_y"] + } kwargs_profile.append(kwargs_lens_i) if MGE_fit is True: if kwargs_mge is None: - raise ValueError('kwargs_mge needs to be specified!') + raise ValueError("kwargs_mge needs to be specified!") if theta_E is None: - raise ValueError('rough estimate of the Einstein radius needs to be provided to compute the MGE!') + raise ValueError( + "rough estimate of the Einstein radius needs to be provided to compute the MGE!" + ) r_array = np.logspace(-4, 2, 200) * theta_E - if self._lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCLAED']: - center_x, center_y = self._lensMassProfile.convergence_peak(kwargs_lens, model_bool_list=model_kinematics_bool, - grid_num=200, grid_spacing=0.01, - center_x_init=0, center_y_init=0) + if self._lens_model_list[0] in ["INTERPOL", "INTERPOL_SCLAED"]: + center_x, center_y = self._lensMassProfile.convergence_peak( + kwargs_lens, + model_bool_list=model_kinematics_bool, + grid_num=200, + grid_spacing=0.01, + center_x_init=0, + center_y_init=0, + ) else: center_x, center_y = None, None - mass_r = self._lensMassProfile.radial_lens_profile(r_array, kwargs_lens, center_x=center_x, - center_y=center_y, model_bool_list=model_kinematics_bool) - amps, sigmas, norm = mge.mge_1d(r_array, mass_r, N=kwargs_mge.get('n_comp', 20)) - mass_profile_list = ['MULTI_GAUSSIAN_KAPPA'] - kwargs_profile = [{'amp': amps, 'sigma': sigmas}] + mass_r = self._lensMassProfile.radial_lens_profile( + r_array, + kwargs_lens, + center_x=center_x, + center_y=center_y, + model_bool_list=model_kinematics_bool, + ) + amps, sigmas, norm = mge.mge_1d( + r_array, mass_r, N=kwargs_mge.get("n_comp", 20) + ) + mass_profile_list = ["MULTI_GAUSSIAN_KAPPA"] + kwargs_profile = [{"amp": amps, "sigma": sigmas}] return mass_profile_list, kwargs_profile - def kinematic_light_profile(self, kwargs_lens_light, r_eff=None, MGE_fit=False, model_kinematics_bool=None, - Hernquist_approx=False, kwargs_mge=None, analytic_kinematics=False): - """ - setting up of the light profile to compute the kinematics in the GalKin module. The requirement is that the - profiles are centered at (0, 0) and that for all profile types there exists a 3d de-projected analytical - representation. + def kinematic_light_profile( + self, + kwargs_lens_light, + r_eff=None, + MGE_fit=False, + model_kinematics_bool=None, + Hernquist_approx=False, + kwargs_mge=None, + analytic_kinematics=False, + ): + """Setting up of the light profile to compute the kinematics in the GalKin + module. The requirement is that the profiles are centered at (0, 0) and that for + all profile types there exists a 3d de-projected analytical representation. :param kwargs_lens_light: deflector light model keyword argument list - :param r_eff: (optional float, else=None) Pre-calculated projected half-light radius of the deflector profile. - If not provided, numerical calculation is done in this routine if required. - :param MGE_fit: boolean, if True performs a Multi-Gaussian expansion of the radial light profile and returns - this solution. - :param model_kinematics_bool: list of booleans to indicate a subset of light profiles to be part of the physical - deflector light. - :param Hernquist_approx: boolean, if True replaces the actual light profile(s) with a Hernquist model with - matched half-light radius. + :param r_eff: (optional float, else=None) Pre-calculated projected half-light + radius of the deflector profile. If not provided, numerical calculation is + done in this routine if required. + :param MGE_fit: boolean, if True performs a Multi-Gaussian expansion of the + radial light profile and returns this solution. + :param model_kinematics_bool: list of booleans to indicate a subset of light + profiles to be part of the physical deflector light. + :param Hernquist_approx: boolean, if True replaces the actual light profile(s) + with a Hernquist model with matched half-light radius. :param kwargs_mge: keyword arguments that go into the MGE decomposition routine - :param analytic_kinematics: bool, if True, solves the Jeans equation analytically for the - power-law mass profile with Hernquist light profile and adjust the settings accordingly + :param analytic_kinematics: bool, if True, solves the Jeans equation + analytically for the power-law mass profile with Hernquist light profile and + adjust the settings accordingly :return: deflector type list, keyword arguments list """ if analytic_kinematics is True: if r_eff is None: - raise ValueError('half light radius "r_eff" needs to be set to allow for analytic kinematics to be ' - 'computed!') - return None, {'r_eff': r_eff} + raise ValueError( + 'half light radius "r_eff" needs to be set to allow for analytic kinematics to be ' + "computed!" + ) + return None, {"r_eff": r_eff} light_profile_list = [] kwargs_light = [] if Hernquist_approx is True: if r_eff is None: - raise ValueError('r_eff needs to be pre-computed and specified when using the Hernquist approximation') - light_profile_list = ['HERNQUIST'] - kwargs_light = [{'Rs': r_eff * 0.551, 'amp': 1.}] + raise ValueError( + "r_eff needs to be pre-computed and specified when using the Hernquist approximation" + ) + light_profile_list = ["HERNQUIST"] + kwargs_light = [{"Rs": r_eff * 0.551, "amp": 1.0}] return light_profile_list, kwargs_light if model_kinematics_bool is None: model_kinematics_bool = [True] * len(kwargs_lens_light) for i, light_model in enumerate(self._lens_light_model_list): if model_kinematics_bool[i] is True: light_profile_list.append(light_model) - kwargs_lens_light_i = {k: v for k, v in kwargs_lens_light[i].items() if - not k in ['center_x', 'center_y']} - if 'e1' in kwargs_lens_light_i: - kwargs_lens_light_i['e1'] = 0 - kwargs_lens_light_i['e2'] = 0 + kwargs_lens_light_i = { + k: v + for k, v in kwargs_lens_light[i].items() + if not k in ["center_x", "center_y"] + } + if "e1" in kwargs_lens_light_i: + kwargs_lens_light_i["e1"] = 0 + kwargs_lens_light_i["e2"] = 0 kwargs_light.append(kwargs_lens_light_i) if MGE_fit is True: if kwargs_mge is None: - raise ValueError('kwargs_mge must be provided to compute the MGE') - amps, sigmas, center_x, center_y = self._lensLightProfile.multi_gaussian_decomposition( - kwargs_lens_light, model_bool_list=model_kinematics_bool, r_h=r_eff, **kwargs_mge) - light_profile_list = ['MULTI_GAUSSIAN'] - kwargs_light = [{'amp': amps, 'sigma': sigmas}] + raise ValueError("kwargs_mge must be provided to compute the MGE") + ( + amps, + sigmas, + center_x, + center_y, + ) = self._lensLightProfile.multi_gaussian_decomposition( + kwargs_lens_light, + model_bool_list=model_kinematics_bool, + r_h=r_eff, + **kwargs_mge + ) + light_profile_list = ["MULTI_GAUSSIAN"] + kwargs_light = [{"amp": amps, "sigma": sigmas}] return light_profile_list, kwargs_light - def kinematics_modeling_settings(self, anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False, - Hernquist_approx=False, MGE_light=False, MGE_mass=False, kwargs_mge_light=None, - kwargs_mge_mass=None, sampling_number=1000, num_kin_sampling=1000, - num_psf_sampling=100): + def kinematics_modeling_settings( + self, + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=False, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + kwargs_mge_light=None, + kwargs_mge_mass=None, + sampling_number=1000, + num_kin_sampling=1000, + num_psf_sampling=100, + ): """ :param anisotropy_model: type of stellar anisotropy model. See details in MamonLokasAnisotropy() class of lenstronomy.GalKin.anisotropy @@ -368,13 +548,18 @@ def kinematics_modeling_settings(self, anisotropy_model, kwargs_numerics_galkin, :return: """ if kwargs_mge_mass is None: - self._kwargs_mge_mass = {'n_comp': 20} - else : + self._kwargs_mge_mass = {"n_comp": 20} + else: self._kwargs_mge_mass = kwargs_mge_mass if kwargs_mge_light is None: - self._kwargs_mge_light = {'grid_spacing': 0.01, 'grid_num': 100, 'n_comp': 20, 'center_x': None, - 'center_y': None} + self._kwargs_mge_light = { + "grid_spacing": 0.01, + "grid_num": 100, + "n_comp": 20, + "center_x": None, + "center_y": None, + } else: self._kwargs_mge_light = kwargs_mge_light self._kwargs_numerics_kin = kwargs_numerics_galkin diff --git a/lenstronomy/Analysis/lens_profile.py b/lenstronomy/Analysis/lens_profile.py index ffd9e028a..63aedea98 100644 --- a/lenstronomy/Analysis/lens_profile.py +++ b/lenstronomy/Analysis/lens_profile.py @@ -6,13 +6,12 @@ from lenstronomy.Util import analysis_util from lenstronomy.LensModel.lens_model_extensions import LensModelExtensions -__all__ = ['LensProfileAnalysis'] +__all__ = ["LensProfileAnalysis"] class LensProfileAnalysis(object): - """ - class with analysis routines to compute derived properties of the lens model - """ + """Class with analysis routines to compute derived properties of the lens model.""" + def __init__(self, lens_model): """ @@ -20,10 +19,18 @@ def __init__(self, lens_model): """ self._lens_model = lens_model - def effective_einstein_radius_grid(self, kwargs_lens, center_x=None, center_y=None, model_bool_list=None, - grid_num=200, grid_spacing=0.05, get_precision=False, verbose=True): - """ - computes the radius with mean convergence=1 on a grid + def effective_einstein_radius_grid( + self, + kwargs_lens, + center_x=None, + center_y=None, + model_bool_list=None, + grid_num=200, + grid_spacing=0.05, + get_precision=False, + verbose=True, + ): + """Computes the radius with mean convergence=1 on a grid. :param kwargs_lens: list of lens model keyword arguments :param center_x: position of the center (if not set, is attempting to find it from the parameters kwargs_lens) @@ -38,26 +45,40 @@ def effective_einstein_radius_grid(self, kwargs_lens, center_x=None, center_y=No :type verbose: bool :return: estimate of the Einstein radius """ - center_x, center_y = analysis_util.profile_center(kwargs_lens, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_lens, center_x, center_y + ) x_grid, y_grid = util.make_grid(numPix=grid_num, deltapix=grid_spacing) x_grid += center_x y_grid += center_y kappa = self._lens_model.kappa(x_grid, y_grid, kwargs_lens, k=model_bool_list) - if self._lens_model.lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCALED']: + if self._lens_model.lens_model_list[0] in ["INTERPOL", "INTERPOL_SCALED"]: center_x = x_grid[kappa == np.max(kappa)][0] center_y = y_grid[kappa == np.max(kappa)][0] - return einstein_radius_from_grid(kappa, x_grid, y_grid, grid_spacing, grid_num, center_x=center_x, - center_y=center_y, get_precision=get_precision, verbose=verbose) - - def effective_einstein_radius(self, kwargs_lens, r_min=1e-3, r_max=1e1, num_points=30): - """ - Numerical estimate of the Einstein radius with integral approximation of radial convergence profile + return einstein_radius_from_grid( + kappa, + x_grid, + y_grid, + grid_spacing, + grid_num, + center_x=center_x, + center_y=center_y, + get_precision=get_precision, + verbose=verbose, + ) + + def effective_einstein_radius( + self, kwargs_lens, r_min=1e-3, r_max=1e1, num_points=30 + ): + """Numerical estimate of the Einstein radius with integral approximation of + radial convergence profile. :param kwargs_lens: list of lens model keyword arguments :param r_min: minimum radius of the convergence integrand - :param r_max: maximum radius of the convergence integrand (should be larger than Einstein radius) + :param r_max: maximum radius of the convergence integrand (should be larger than + Einstein radius) :param num_points: number of radial points in log spacing :return: estimate of the Einstein radius """ @@ -86,65 +107,94 @@ def effective_einstein_radius(self, kwargs_lens, r_min=1e-3, r_max=1e1, num_poin kappa_cdf = np.cumsum(kappa_slice) # calculate average convergence at radius - kappa_average = kappa_cdf / (np.pi * r_array ** 2) + kappa_average = kappa_cdf / (np.pi * r_array**2) # we interpolate as the inverse function and evaluate this function for average kappa = 1 # (assumes monotonic decline in average convergence) - inv_interp = scipy.interpolate.interp1d(np.log10(kappa_average), np.log10(r_array)) + inv_interp = scipy.interpolate.interp1d( + np.log10(kappa_average), np.log10(r_array) + ) try: theta_e = 10 ** inv_interp(0) except: theta_e = np.nan return theta_e - def local_lensing_effect(self, kwargs_lens, ra_pos=0, dec_pos=0, model_list_bool=None): - """ - computes deflection, shear and convergence at (ra_pos,dec_pos) for those part of the lens model not included - in the main deflector. + def local_lensing_effect( + self, kwargs_lens, ra_pos=0, dec_pos=0, model_list_bool=None + ): + """Computes deflection, shear and convergence at (ra_pos,dec_pos) for those part + of the lens model not included in the main deflector. :param kwargs_lens: lens model keyword argument list :param ra_pos: RA position where to compute the external effect :param dec_pos: DEC position where to compute the external effect - :param model_list_bool: boolean list indicating which models effect to be added to the estimate + :param model_list_bool: boolean list indicating which models effect to be added + to the estimate :return: alpha_x, alpha_y, kappa, shear1, shear2 """ - f_x, f_y = self._lens_model.alpha(ra_pos, dec_pos, kwargs_lens, k=model_list_bool) - f_xx, f_xy, f_yx, f_yy = self._lens_model.hessian(ra_pos, dec_pos, kwargs_lens, k=model_list_bool) - kappa = (f_xx + f_yy)/2. - shear1 = 1./2 * (f_xx - f_yy) + f_x, f_y = self._lens_model.alpha( + ra_pos, dec_pos, kwargs_lens, k=model_list_bool + ) + f_xx, f_xy, f_yx, f_yy = self._lens_model.hessian( + ra_pos, dec_pos, kwargs_lens, k=model_list_bool + ) + kappa = (f_xx + f_yy) / 2.0 + shear1 = 1.0 / 2 * (f_xx - f_yy) shear2 = f_xy return f_x, f_y, kappa, shear1, shear2 - def profile_slope(self, kwargs_lens, radius, center_x=None, center_y=None, model_list_bool=None, num_points=10): - """ - computes the logarithmic power-law slope of a profile. ATTENTION: this is not an observable! + def profile_slope( + self, + kwargs_lens, + radius, + center_x=None, + center_y=None, + model_list_bool=None, + num_points=10, + ): + """Computes the logarithmic power-law slope of a profile. ATTENTION: this is not + an observable! :param kwargs_lens: lens model keyword argument list - :param radius: radius from the center where to compute the logarithmic slope (angular units + :param radius: radius from the center where to compute the logarithmic slope + (angular units :param center_x: center of profile from where to compute the slope :param center_y: center of profile from where to compute the slope :param model_list_bool: bool list, indicate which part of the model to consider :param num_points: number of estimates around the Einstein radius :return: logarithmic power-law slope """ - center_x, center_y = analysis_util.profile_center(kwargs_lens, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_lens, center_x, center_y + ) x, y = util.points_on_circle(radius, num_points) dr = 0.01 x_dr, y_dr = util.points_on_circle(radius + dr, num_points) - alpha_E_x_i, alpha_E_y_i = self._lens_model.alpha(center_x + x, center_y + y, kwargs_lens, k=model_list_bool) + alpha_E_x_i, alpha_E_y_i = self._lens_model.alpha( + center_x + x, center_y + y, kwargs_lens, k=model_list_bool + ) alpha_E_r = np.sqrt(alpha_E_x_i**2 + alpha_E_y_i**2) - alpha_E_dr_x_i, alpha_E_dr_y_i = self._lens_model.alpha(center_x + x_dr, center_y + y_dr, kwargs_lens, - k=model_list_bool) - alpha_E_dr = np.sqrt(alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2) + alpha_E_dr_x_i, alpha_E_dr_y_i = self._lens_model.alpha( + center_x + x_dr, center_y + y_dr, kwargs_lens, k=model_list_bool + ) + alpha_E_dr = np.sqrt(alpha_E_dr_x_i**2 + alpha_E_dr_y_i**2) slope = np.mean(np.log(alpha_E_dr / alpha_E_r) / np.log((radius + dr) / radius)) gamma = -slope + 2 return gamma - def mst_invariant_differential(self, kwargs_lens, radius, center_x=None, center_y=None, model_list_bool=None, - num_points=10): - """ - Average of the radial stretch differential in radial direction, divided by the radial stretch factor. + def mst_invariant_differential( + self, + kwargs_lens, + radius, + center_x=None, + center_y=None, + model_list_bool=None, + num_points=10, + ): + """Average of the radial stretch differential in radial direction, divided by + the radial stretch factor. .. math:: \\xi = \\frac{\\partial \\lambda_{\\rm rad}}{\\partial r} \\frac{1}{\\lambda_{\\rm rad}} @@ -161,14 +211,32 @@ def mst_invariant_differential(self, kwargs_lens, radius, center_x=None, center_ :param num_points: number of estimates around the radius :return: xi """ - center_x, center_y = analysis_util.profile_center(kwargs_lens, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_lens, center_x, center_y + ) x, y = util.points_on_circle(radius, num_points) ext = LensModelExtensions(lensModel=self._lens_model) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials(x, y, kwargs_lens, center_x=center_x, center_y=center_y) - xi = np.mean(dlambda_rad_drad/lambda_rad) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials( + x, y, kwargs_lens, center_x=center_x, center_y=center_y + ) + xi = np.mean(dlambda_rad_drad / lambda_rad) return xi - def radial_lens_profile(self, r_list, kwargs_lens, center_x=None, center_y=None, model_bool_list=None): + def radial_lens_profile( + self, r_list, kwargs_lens, center_x=None, center_y=None, model_bool_list=None + ): """ :param r_list: list of radii to compute the spherically averaged lens light profile @@ -178,33 +246,47 @@ def radial_lens_profile(self, r_list, kwargs_lens, center_x=None, center_y=None, :param model_bool_list: bool list or None, indicating which profiles to sum over :return: flux amplitudes at r_list radii azimuthally averaged """ - center_x, center_y = analysis_util.profile_center(kwargs_lens, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_lens, center_x, center_y + ) kappa_list = [] for r in r_list: x, y = util.points_on_circle(r, num_points=20) - f_r = self._lens_model.kappa(x + center_x, y + center_y, kwargs=kwargs_lens, k=model_bool_list) + f_r = self._lens_model.kappa( + x + center_x, y + center_y, kwargs=kwargs_lens, k=model_bool_list + ) kappa_list.append(np.average(f_r)) return kappa_list - def multi_gaussian_lens(self, kwargs_lens, center_x=None, center_y=None, model_bool_list=None, n_comp=20): - """ - multi-gaussian lens model in convergence space + def multi_gaussian_lens( + self, kwargs_lens, center_x=None, center_y=None, model_bool_list=None, n_comp=20 + ): + """Multi-gaussian lens model in convergence space. :param kwargs_lens: :param n_comp: :return: """ - center_x, center_y = analysis_util.profile_center(kwargs_lens, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_lens, center_x, center_y + ) theta_E = self.effective_einstein_radius_grid(kwargs_lens) r_array = np.logspace(-4, 2, 200) * theta_E - kappa_s = self.radial_lens_profile(r_array, kwargs_lens, center_x=center_x, center_y=center_y, - model_bool_list=model_bool_list) + kappa_s = self.radial_lens_profile( + r_array, + kwargs_lens, + center_x=center_x, + center_y=center_y, + model_bool_list=model_bool_list, + ) amplitudes, sigmas, norm = mge.mge_1d(r_array, kappa_s, N=n_comp) return amplitudes, sigmas, center_x, center_y - def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, numPix=100): - """ - computes the mean convergence of all the different lens model components within a spherical aperture + def mass_fraction_within_radius( + self, kwargs_lens, center_x, center_y, theta_E, numPix=100 + ): + """Computes the mean convergence of all the different lens model components + within a spherical aperture. :param kwargs_lens: lens model keyword argument list :param center_x: center of the aperture @@ -212,7 +294,7 @@ def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, :param theta_E: radius of aperture :return: list of average convergences for all the model components """ - x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.*theta_E / numPix) + x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.0 * theta_E / numPix) x_grid += center_x y_grid += center_y mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x, center_y, theta_E) @@ -223,10 +305,17 @@ def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, kappa_list.append(kappa_mean) return kappa_list - def convergence_peak(self, kwargs_lens, model_bool_list=None, grid_num=200, grid_spacing=0.01, center_x_init=0, - center_y_init=0): - """ - computes the maximal convergence position on a grid and returns its coordinate + def convergence_peak( + self, + kwargs_lens, + model_bool_list=None, + grid_num=200, + grid_spacing=0.01, + center_x_init=0, + center_y_init=0, + ): + """Computes the maximal convergence position on a grid and returns its + coordinate. :param kwargs_lens: lens model keyword argument list :param model_bool_list: bool list (optional) to include certain models or not @@ -243,35 +332,49 @@ def convergence_peak(self, kwargs_lens, model_bool_list=None, grid_num=200, grid return center_x, center_y -def einstein_radius_from_grid(kappa, x_grid, y_grid, grid_spacing, grid_num, center_x=0, center_y=0, - get_precision=False, verbose=True): - """ - computes the radius with mean convergence=1 +def einstein_radius_from_grid( + kappa, + x_grid, + y_grid, + grid_spacing, + grid_num, + center_x=0, + center_y=0, + get_precision=False, + verbose=True, +): + """Computes the radius with mean convergence=1. :param kappa: convergence calculated on a grid :param x_grid: x-value of grid points :param y_grid: y-value of grid points :param grid_spacing: spacing of grid points :param grid_num: number of grid points - :param center_x: x-center of profile from where to measure circular averaged convergence - :param center_y: y-center of profile from where to measure circular averaged convergence - :param get_precision: if True, returns Einstein radius and expected numerical precision + :param center_x: x-center of profile from where to measure circular averaged + convergence + :param center_y: y-center of profile from where to measure circular averaged + convergence + :param get_precision: if True, returns Einstein radius and expected numerical + precision :param verbose: if True, indicates warning when Einstein radius can not be computed :type verbose: bool :return: einstein radius """ - r_array = np.linspace(start=0, stop=grid_num * grid_spacing / 2., num=grid_num * 2) + r_array = np.linspace(start=0, stop=grid_num * grid_spacing / 2.0, num=grid_num * 2) inner_most_bin = True for r in r_array: - mask = np.array(1 - mask_util.mask_center_2d(center_x, center_y, r, x_grid, y_grid)) + mask = np.array( + 1 - mask_util.mask_center_2d(center_x, center_y, r, x_grid, y_grid) + ) sum_mask = np.sum(mask) if sum_mask > 0: kappa_mean = np.sum(kappa * mask) / np.sum(mask) if inner_most_bin: if kappa_mean < 1: Warning( - "Central convergence value is subcritical <1 and hence an Einstein radius is ill defined.") + "Central convergence value is subcritical <1 and hence an Einstein radius is ill defined." + ) if get_precision: return np.nan, 0 else: @@ -283,7 +386,9 @@ def einstein_radius_from_grid(kappa, x_grid, y_grid, grid_spacing, grid_num, cen else: return r if verbose: - Warning('Einstein radius could not be computed (or does not exist) for lens model.') + Warning( + "Einstein radius could not be computed (or does not exist) for lens model." + ) if get_precision: return np.nan, 0 else: diff --git a/lenstronomy/Analysis/light2mass.py b/lenstronomy/Analysis/light2mass.py index c253e7d60..30d81dedc 100644 --- a/lenstronomy/Analysis/light2mass.py +++ b/lenstronomy/Analysis/light2mass.py @@ -2,14 +2,21 @@ from lenstronomy.Util import util from lenstronomy.LightModel.light_model import LightModel -__all__ = ['light2mass_interpol'] +__all__ = ["light2mass_interpol"] -def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, deltaPix=0.05, subgrid_res=5, - center_x=0, center_y=0): - """ - takes a lens light model and turns it numerically in a lens model - (with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities. +def light2mass_interpol( + lens_light_model_list, + kwargs_lens_light, + numPix=100, + deltaPix=0.05, + subgrid_res=5, + center_x=0, + center_y=0, +): + """Takes a lens light model and turns it numerically in a lens model (with all + lensmodel quantities computed on a grid). Then provides an interpolated grid for the + quantities. :param kwargs_lens_light: lens light keyword argument list :param numPix: number of pixels per axis for the return interpolation @@ -20,8 +27,11 @@ def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, de :return: keyword arguments for 'INTERPOL' lens model """ # make super-sampled grid - x_grid_sub, y_grid_sub = util.make_grid(numPix=numPix * 5, deltapix=deltaPix, subgrid_res=subgrid_res) + x_grid_sub, y_grid_sub = util.make_grid( + numPix=numPix * 5, deltapix=deltaPix, subgrid_res=subgrid_res + ) import lenstronomy.Util.mask_util as mask_util + mask = mask_util.mask_azimuthal(x_grid_sub, y_grid_sub, center_x, center_y, r=1) x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) # compute light on the subgrid @@ -33,11 +43,16 @@ def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, de # compute lensing quantities with subgrid convergence_sub = util.array2image(flux) - f_x_sub, f_y_sub = integral.deflection_from_kappa_grid(convergence_sub, grid_spacing=deltaPix / float(subgrid_res)) - f_sub = integral.potential_from_kappa_grid(convergence_sub, grid_spacing=deltaPix / float(subgrid_res)) + f_x_sub, f_y_sub = integral.deflection_from_kappa_grid( + convergence_sub, grid_spacing=deltaPix / float(subgrid_res) + ) + f_sub = integral.potential_from_kappa_grid( + convergence_sub, grid_spacing=deltaPix / float(subgrid_res) + ) # interpolation function on lensing quantities x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub) from lenstronomy.LensModel.Profiles.interpol import Interpol + interp_func = Interpol() interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub) # compute lensing quantities on sparser grid @@ -46,11 +61,26 @@ def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, de f_x, f_y = interp_func.derivatives(x_grid, y_grid) # numerical differentials for second order differentials from lenstronomy.LensModel.lens_model import LensModel - lens_model = LensModel(lens_model_list=['INTERPOL']) - kwargs = [{'grid_interp_x': x_axes_sub, 'grid_interp_y': y_axes_sub, 'f_': f_sub, - 'f_x': f_x_sub, 'f_y': f_y_sub}] + + lens_model = LensModel(lens_model_list=["INTERPOL"]) + kwargs = [ + { + "grid_interp_x": x_axes_sub, + "grid_interp_y": y_axes_sub, + "f_": f_sub, + "f_x": f_x_sub, + "f_y": f_y_sub, + } + ] f_xx, f_xy, f_yx, f_yy = lens_model.hessian(x_grid, y_grid, kwargs, diff=0.00001) - kwargs_interpol = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_), - 'f_x': util.array2image(f_x), 'f_y': util.array2image(f_y), 'f_xx': util.array2image(f_xx), - 'f_xy': util.array2image(f_xy), 'f_yy': util.array2image(f_yy)} + kwargs_interpol = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_), + "f_x": util.array2image(f_x), + "f_y": util.array2image(f_y), + "f_xx": util.array2image(f_xx), + "f_xy": util.array2image(f_xy), + "f_yy": util.array2image(f_yy), + } return kwargs_interpol diff --git a/lenstronomy/Analysis/light_profile.py b/lenstronomy/Analysis/light_profile.py index ac5ea6887..7826ffe8a 100644 --- a/lenstronomy/Analysis/light_profile.py +++ b/lenstronomy/Analysis/light_profile.py @@ -4,13 +4,12 @@ import lenstronomy.Util.analysis_util as analysis_util import lenstronomy.Util.multi_gauss_expansion as mge -__all__ = ['LightProfileAnalysis'] +__all__ = ["LightProfileAnalysis"] class LightProfileAnalysis(object): - """ - class with analysis routines to compute derived properties of the lens model - """ + """Class with analysis routines to compute derived properties of the lens model.""" + def __init__(self, light_model): """ @@ -18,57 +17,98 @@ def __init__(self, light_model): """ self._light_model = light_model - def ellipticity(self, kwargs_light, grid_spacing, grid_num, center_x=None, center_y=None, model_bool_list=None, - num_iterative=10, iterative=False): - """ - make sure that the window covers all the light, otherwise the moments may give a too low answers. + def ellipticity( + self, + kwargs_light, + grid_spacing, + grid_num, + center_x=None, + center_y=None, + model_bool_list=None, + num_iterative=10, + iterative=False, + ): + """Make sure that the window covers all the light, otherwise the moments may + give a too low answers. :param kwargs_light: keyword argument list of profiles - :param center_x: center of profile, if None takes it from the first profile in kwargs_light - :param center_y: center of profile, if None takes it from the first profile in kwargs_light + :param center_x: center of profile, if None takes it from the first profile in + kwargs_light + :param center_y: center of profile, if None takes it from the first profile in + kwargs_light :param model_bool_list: list of booleans to select subsets of the profile :param grid_spacing: grid spacing over which the moments are computed :param grid_num: grid size over which the moments are computed - :param iterative: if True iteratively adopts an eccentric mask to overcome edge effects + :param iterative: if True iteratively adopts an eccentric mask to overcome edge + effects :type iterative: boolean :param num_iterative: number of iterative changes in ellipticity :type num_iterative: int :return: eccentricities e1, e2 """ - center_x, center_y = analysis_util.profile_center(kwargs_light, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_light, center_x, center_y + ) if model_bool_list is None: model_bool_list = [True] * len(kwargs_light) x_grid, y_grid = util.make_grid(numPix=grid_num, deltapix=grid_spacing) x_grid += center_x y_grid += center_y - I_xy = self._light_model.surface_brightness(x_grid, y_grid, kwargs_light, k=model_bool_list) - e1, e2 = analysis_util.ellipticities(I_xy, x_grid-center_x, y_grid-center_y, center_x=0, - center_y=0, iterative=iterative, num_iterative=num_iterative) + I_xy = self._light_model.surface_brightness( + x_grid, y_grid, kwargs_light, k=model_bool_list + ) + e1, e2 = analysis_util.ellipticities( + I_xy, + x_grid - center_x, + y_grid - center_y, + center_x=0, + center_y=0, + iterative=iterative, + num_iterative=num_iterative, + ) return e1, e2 - def half_light_radius(self, kwargs_light, grid_spacing, grid_num, center_x=None, center_y=None, model_bool_list=None): - """ - computes numerically the half-light-radius of the deflector light and the total photon flux + def half_light_radius( + self, + kwargs_light, + grid_spacing, + grid_num, + center_x=None, + center_y=None, + model_bool_list=None, + ): + """Computes numerically the half-light-radius of the deflector light and the + total photon flux. :param kwargs_light: keyword argument list of profiles - :param center_x: center of profile, if None takes it from the first profile in kwargs_light - :param center_y: center of profile, if None takes it from the first profile in kwargs_light + :param center_x: center of profile, if None takes it from the first profile in + kwargs_light + :param center_y: center of profile, if None takes it from the first profile in + kwargs_light :param model_bool_list: list of booleans to select subsets of the profile :param grid_spacing: grid spacing over which the moments are computed :param grid_num: grid size over which the moments are computed :return: half-light radius """ - center_x, center_y = analysis_util.profile_center(kwargs_light, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_light, center_x, center_y + ) if model_bool_list is None: model_bool_list = [True] * len(kwargs_light) x_grid, y_grid = util.make_grid(numPix=grid_num, deltapix=grid_spacing) x_grid += center_x y_grid += center_y - lens_light = self._light_model.surface_brightness(x_grid, y_grid, kwargs_light, k=model_bool_list) - R_h = analysis_util.half_light_radius(lens_light, x_grid, y_grid, center_x, center_y) + lens_light = self._light_model.surface_brightness( + x_grid, y_grid, kwargs_light, k=model_bool_list + ) + R_h = analysis_util.half_light_radius( + lens_light, x_grid, y_grid, center_x, center_y + ) return R_h - def radial_light_profile(self, r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None): + def radial_light_profile( + self, r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None + ): """ :param r_list: list of radii to compute the spherically averaged lens light profile @@ -78,43 +118,80 @@ def radial_light_profile(self, r_list, kwargs_light, center_x=None, center_y=Non :param model_bool_list: bool list or None, indicating which profiles to sum over :return: flux amplitudes at r_list radii spherically averaged """ - center_x, center_y = analysis_util.profile_center(kwargs_light, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_light, center_x, center_y + ) f_list = [] for r in r_list: x, y = util.points_on_circle(r, num_points=20) - f_r = self._light_model.surface_brightness(x + center_x, y + center_y, kwargs_list=kwargs_light, k=model_bool_list) + f_r = self._light_model.surface_brightness( + x + center_x, y + center_y, kwargs_list=kwargs_light, k=model_bool_list + ) f_list.append(np.average(f_r)) return f_list - def multi_gaussian_decomposition(self, kwargs_light, model_bool_list=None, n_comp=20, center_x=None, center_y=None, - r_h=None, grid_spacing=0.02, grid_num=200): - """ - multi-gaussian decomposition of the lens light profile (in 1-dimension) + def multi_gaussian_decomposition( + self, + kwargs_light, + model_bool_list=None, + n_comp=20, + center_x=None, + center_y=None, + r_h=None, + grid_spacing=0.02, + grid_num=200, + ): + """Multi-gaussian decomposition of the lens light profile (in 1-dimension) :param kwargs_light: keyword argument list of profiles - :param center_x: center of profile, if None takes it from the first profile in kwargs_light - :param center_y: center of profile, if None takes it from the first profile in kwargs_light + :param center_x: center of profile, if None takes it from the first profile in + kwargs_light + :param center_y: center of profile, if None takes it from the first profile in + kwargs_light :param model_bool_list: list of booleans to select subsets of the profile - :param grid_spacing: grid spacing over which the moments are computed for the half-light radius + :param grid_spacing: grid spacing over which the moments are computed for the + half-light radius :param grid_num: grid size over which the moments are computed :param n_comp: maximum number of Gaussian's in the MGE - :param r_h: float, half light radius to be used for MGE (optional, otherwise using a numerical grid) + :param r_h: float, half light radius to be used for MGE (optional, otherwise + using a numerical grid) :return: amplitudes, sigmas, center_x, center_y """ - center_x, center_y = analysis_util.profile_center(kwargs_light, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_light, center_x, center_y + ) if r_h is None: - r_h = self.half_light_radius(kwargs_light, center_x=center_x, center_y=center_y, - model_bool_list=model_bool_list, grid_spacing=grid_spacing, grid_num=grid_num) + r_h = self.half_light_radius( + kwargs_light, + center_x=center_x, + center_y=center_y, + model_bool_list=model_bool_list, + grid_spacing=grid_spacing, + grid_num=grid_num, + ) r_array = np.logspace(-3, 2, 200) * r_h * 2 - flux_r = self.radial_light_profile(r_array, kwargs_light, center_x=center_x, center_y=center_y, - model_bool_list=model_bool_list) + flux_r = self.radial_light_profile( + r_array, + kwargs_light, + center_x=center_x, + center_y=center_y, + model_bool_list=model_bool_list, + ) amplitudes, sigmas, norm = mge.mge_1d(r_array, flux_r, N=n_comp) return amplitudes, sigmas, center_x, center_y - def multi_gaussian_decomposition_ellipse(self, kwargs_light, model_bool_list=None, - center_x=None, center_y=None, grid_num=100, grid_spacing=0.05, n_comp=20): + def multi_gaussian_decomposition_ellipse( + self, + kwargs_light, + model_bool_list=None, + center_x=None, + center_y=None, + grid_num=100, + grid_spacing=0.05, + n_comp=20, + ): """ MGE with ellipticity estimate. Attention: numerical grid settings for ellipticity estimate and radial MGE may not necessarily be the same! @@ -129,25 +206,41 @@ def multi_gaussian_decomposition_ellipse(self, kwargs_light, model_bool_list=Non :return: keyword arguments of the elliptical multi Gaussian profile in lenstronomy conventions """ # estimate center - center_x, center_y = analysis_util.profile_center(kwargs_light, center_x, center_y) + center_x, center_y = analysis_util.profile_center( + kwargs_light, center_x, center_y + ) - e1, e2 = self.ellipticity(kwargs_light, center_x=center_x, center_y=center_y, - model_bool_list=model_bool_list, grid_spacing=grid_spacing * 2, grid_num=grid_num) + e1, e2 = self.ellipticity( + kwargs_light, + center_x=center_x, + center_y=center_y, + model_bool_list=model_bool_list, + grid_spacing=grid_spacing * 2, + grid_num=grid_num, + ) # MGE around major axis - amplitudes, sigmas, center_x, center_y = self.multi_gaussian_decomposition(kwargs_light, - model_bool_list=model_bool_list, - n_comp=n_comp, grid_spacing=grid_spacing, - grid_num=grid_num, center_x=center_x, - center_y=center_y) - kwargs_mge = {'amp': amplitudes, 'sigma': sigmas, 'center_x': center_x, 'center_y': center_y} - kwargs_mge['e1'] = e1 - kwargs_mge['e2'] = e2 + amplitudes, sigmas, center_x, center_y = self.multi_gaussian_decomposition( + kwargs_light, + model_bool_list=model_bool_list, + n_comp=n_comp, + grid_spacing=grid_spacing, + grid_num=grid_num, + center_x=center_x, + center_y=center_y, + ) + kwargs_mge = { + "amp": amplitudes, + "sigma": sigmas, + "center_x": center_x, + "center_y": center_y, + } + kwargs_mge["e1"] = e1 + kwargs_mge["e2"] = e2 return kwargs_mge def flux_components(self, kwargs_light, grid_num=400, grid_spacing=0.01): - """ - computes the total flux in each component of the model + """Computes the total flux in each component of the model. :param kwargs_light: :param grid_num: @@ -159,11 +252,13 @@ def flux_components(self, kwargs_light, grid_num=400, grid_spacing=0.01): x_grid, y_grid = util.make_grid(numPix=grid_num, deltapix=grid_spacing) kwargs_copy = copy.deepcopy(kwargs_light) for k, kwargs in enumerate(kwargs_light): - if 'center_x' in kwargs_copy[k]: - kwargs_copy[k]['center_x'] = 0 - kwargs_copy[k]['center_y'] = 0 - light = self._light_model.surface_brightness(x_grid, y_grid, kwargs_copy, k=k) - flux = np.sum(light) * grid_spacing ** 2 + if "center_x" in kwargs_copy[k]: + kwargs_copy[k]["center_x"] = 0 + kwargs_copy[k]["center_y"] = 0 + light = self._light_model.surface_brightness( + x_grid, y_grid, kwargs_copy, k=k + ) + flux = np.sum(light) * grid_spacing**2 R_h = analysis_util.half_light_radius(light, x_grid, y_grid) flux_list.append(flux) R_h_list.append(R_h) diff --git a/lenstronomy/Analysis/multi_patch_reconstruction.py b/lenstronomy/Analysis/multi_patch_reconstruction.py index 1d7dcfd75..8a606b481 100644 --- a/lenstronomy/Analysis/multi_patch_reconstruction.py +++ b/lenstronomy/Analysis/multi_patch_reconstruction.py @@ -9,13 +9,19 @@ class MultiPatchReconstruction(MultiBandImageReconstruction): - """ - this class illustrates the model of disconnected multi-patch modeling with 'joint-linear' option in one single - array. - """ + """This class illustrates the model of disconnected multi-patch modeling with + 'joint-linear' option in one single array.""" - def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type='joint-linear', - kwargs_likelihood=None, kwargs_pixel_grid=None, verbose=True): + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="joint-linear", + kwargs_likelihood=None, + kwargs_pixel_grid=None, + verbose=True, + ): """ :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] @@ -32,12 +38,20 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type This can de-activated for speedup purposes (does not run linear inversion again), and reduces the number of prints. """ self._multi_band_list = multi_band_list - if not multi_band_type == 'joint-linear': - raise ValueError('MultiPatchPlot only works with multi_band_type="joint_linear". ' - 'Setting choice was %s. ' % multi_band_type) - MultiBandImageReconstruction.__init__(self, multi_band_list, kwargs_model, kwargs_params, - multi_band_type=multi_band_type, kwargs_likelihood=kwargs_likelihood, - verbose=verbose) + if not multi_band_type == "joint-linear": + raise ValueError( + 'MultiPatchPlot only works with multi_band_type="joint_linear". ' + "Setting choice was %s. " % multi_band_type + ) + MultiBandImageReconstruction.__init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type=multi_band_type, + kwargs_likelihood=kwargs_likelihood, + verbose=verbose, + ) if kwargs_pixel_grid is not None: self._pixel_grid_joint = PixelGrid(**kwargs_pixel_grid) else: @@ -54,9 +68,8 @@ def pixel_grid_joint(self): @staticmethod def _joint_pixel_grid(multi_band_list): - """ - Joint PixelGrid() class instance. - This routine only works when the individual patches have the same coordinate system orientation and pixel scale. + """Joint PixelGrid() class instance. This routine only works when the individual + patches have the same coordinate system orientation and pixel scale. :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] :return: PixelGrid() class instance covering the entire window of the sky including all individual patches @@ -64,10 +77,13 @@ def _joint_pixel_grid(multi_band_list): nx, ny = 0, 0 kwargs_data = copy.deepcopy(multi_band_list[0][0]) - kwargs_pixel_grid = {'nx': 0, 'ny': 0, - 'transform_pix2angle': kwargs_data['transform_pix2angle'], - 'ra_at_xy_0': kwargs_data['ra_at_xy_0'], - 'dec_at_xy_0': kwargs_data['dec_at_xy_0']} + kwargs_pixel_grid = { + "nx": 0, + "ny": 0, + "transform_pix2angle": kwargs_data["transform_pix2angle"], + "ra_at_xy_0": kwargs_data["ra_at_xy_0"], + "dec_at_xy_0": kwargs_data["dec_at_xy_0"], + } pixel_grid = PixelGrid(**kwargs_pixel_grid) Mpix2a = pixel_grid.transform_pix2angle @@ -88,17 +104,18 @@ def _joint_pixel_grid(multi_band_list): nx, ny = _update_frame_size(nx, ny, x_min, y_min, nx_i, ny_i) # select minimum in x- and y-axis # transform back in RA/DEC and make this the new zero point of the base coordinate system - ra_at_xy_0_new, dec_at_xy_0_new = pixel_grid.map_pix2coord(np.minimum(x_min, 0), np.minimum(y_min, 0)) - kwargs_pixel_grid['ra_at_xy_0'] = ra_at_xy_0_new - kwargs_pixel_grid['dec_at_xy_0'] = dec_at_xy_0_new - kwargs_pixel_grid['nx'] = nx - kwargs_pixel_grid['ny'] = ny + ra_at_xy_0_new, dec_at_xy_0_new = pixel_grid.map_pix2coord( + np.minimum(x_min, 0), np.minimum(y_min, 0) + ) + kwargs_pixel_grid["ra_at_xy_0"] = ra_at_xy_0_new + kwargs_pixel_grid["dec_at_xy_0"] = dec_at_xy_0_new + kwargs_pixel_grid["nx"] = nx + kwargs_pixel_grid["ny"] = ny pixel_grid = PixelGrid(**kwargs_pixel_grid) return pixel_grid def image_joint(self): - """ - patch together the individual patches of data and models + """Patch together the individual patches of data and models. :return: image_joint, model_joint, norm_residuals_joint """ @@ -110,7 +127,9 @@ def image_joint(self): if model_band is not None: image_model = model_band.image_model_class kwargs_params = model_band.kwargs_model - model = image_model._image(**kwargs_params) # TODO: avoid using private definitions uses sub-set of the model parameters + model = image_model._image( + **kwargs_params + ) # TODO: avoid using private definitions uses sub-set of the model parameters data_class_i = image_model.Data # evaluate pixel of zero point with the base coordinate system ra0, dec0 = data_class_i.radec_at_xy_0 @@ -118,16 +137,22 @@ def image_joint(self): y_min = int(y_min) x_min = int(x_min) nx_i, ny_i = data_class_i.num_pixel_axes - image_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = data_class_i.data - model_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = model - norm_residuals_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = model_band.norm_residuals + image_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = data_class_i.data + model_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = model + norm_residuals_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = model_band.norm_residuals return image_joint, model_joint, norm_residuals_joint def lens_model_joint(self): - """ - patch together the individual patches of the lens model (can be discontinues) + """Patch together the individual patches of the lens model (can be discontinues) - :return: 2d numpy arrays of kappa_joint, magnification_joint, alpha_x_joint, alpha_y_joint + :return: 2d numpy arrays of kappa_joint, magnification_joint, alpha_x_joint, + alpha_y_joint """ nx, ny = self._pixel_grid_joint.num_pixel_axes kappa_joint = np.zeros((ny, nx)) @@ -137,7 +162,7 @@ def lens_model_joint(self): if model_band is not None: image_model = model_band.image_model_class kwargs_params = model_band.kwargs_model - kwargs_lens = kwargs_params['kwargs_lens'] + kwargs_lens = kwargs_params["kwargs_lens"] lens_model = image_model.LensModel x_grid, y_grid = image_model.Data.pixel_coordinates kappa = lens_model.kappa(x_grid, y_grid, kwargs_lens) @@ -151,44 +176,65 @@ def lens_model_joint(self): y_min = int(y_min) x_min = int(x_min) nx_i, ny_i = data_class_i.num_pixel_axes - kappa_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = kappa - magnification_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = magnification - alpha_x_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = alpha_x - alpha_y_joint[int(y_min):int(y_min + ny_i), int(x_min):int(x_min + nx_i)] = alpha_y + kappa_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = kappa + magnification_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = magnification + alpha_x_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = alpha_x + alpha_y_joint[ + int(y_min) : int(y_min + ny_i), int(x_min) : int(x_min + nx_i) + ] = alpha_y return kappa_joint, magnification_joint, alpha_x_joint, alpha_y_joint def source(self, num_pix, delta_pix, center=None): - """ - source in the same coordinate system as the image + """Source in the same coordinate system as the image. :param num_pix: number of pixels per axes :param delta_pix: pixel size :param center: list with two entries [center_x, center_y] (optional) - :return: 2d surface brightness grid of the reconstructed source and PixelGrid() instance of source grid + :return: 2d surface brightness grid of the reconstructed source and PixelGrid() + instance of source grid """ - Mpix2coord = self._pixel_grid_joint.transform_pix2angle * delta_pix / self._pixel_grid_joint.pixel_width - x_grid_source, y_grid_source = util.make_grid_transformed(num_pix, Mpix2Angle=Mpix2coord) + Mpix2coord = ( + self._pixel_grid_joint.transform_pix2angle + * delta_pix + / self._pixel_grid_joint.pixel_width + ) + x_grid_source, y_grid_source = util.make_grid_transformed( + num_pix, Mpix2Angle=Mpix2coord + ) ra_at_xy_0, dec_at_xy_0 = x_grid_source[0], y_grid_source[0] image_model = self.model_band_list[0].image_model_class kwargs_model = self.model_band_list[0].kwargs_model - kwargs_source = kwargs_model['kwargs_source'] + kwargs_source = kwargs_model["kwargs_source"] center_x = 0 center_y = 0 if center is not None: center_x, center_y = center[0], center[1] elif len(kwargs_source) > 0: - center_x = kwargs_source[0]['center_x'] - center_y = kwargs_source[0]['center_y'] + center_x = kwargs_source[0]["center_x"] + center_y = kwargs_source[0]["center_y"] x_grid_source += center_x y_grid_source += center_y - pixel_grid = PixelGrid(nx=num_pix, ny=num_pix,transform_pix2angle=Mpix2coord, ra_at_xy_0=ra_at_xy_0 + center_x, - dec_at_xy_0=dec_at_xy_0 + center_y) + pixel_grid = PixelGrid( + nx=num_pix, + ny=num_pix, + transform_pix2angle=Mpix2coord, + ra_at_xy_0=ra_at_xy_0 + center_x, + dec_at_xy_0=dec_at_xy_0 + center_y, + ) - source = image_model.SourceModel.surface_brightness(x_grid_source, y_grid_source, kwargs_source) - source = util.array2image(source) * delta_pix ** 2 + source = image_model.SourceModel.surface_brightness( + x_grid_source, y_grid_source, kwargs_source + ) + source = util.array2image(source) * delta_pix**2 return source, pixel_grid diff --git a/lenstronomy/Analysis/td_cosmography.py b/lenstronomy/Analysis/td_cosmography.py index 407b07941..2eab763a8 100644 --- a/lenstronomy/Analysis/td_cosmography.py +++ b/lenstronomy/Analysis/td_cosmography.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -8,22 +8,32 @@ from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.Analysis.kinematics_api import KinematicsAPI -__all__ = ['TDCosmography'] +__all__ = ["TDCosmography"] class TDCosmography(KinematicsAPI): - """ - class equipped to perform a cosmographic analysis from a lens model with added measurements of time delays and - kinematics. - This class does not require any cosmological knowledge and can return angular diameter distance estimates - self-consistently integrating the kinematics routines and time delay estimates in the lens modeling. - This description follows Birrer et al. 2016, 2019. - + """Class equipped to perform a cosmographic analysis from a lens model with added + measurements of time delays and kinematics. + This class does not require any cosmological knowledge and can return angular + diameter distance estimates self-consistently integrating the kinematics routines + and time delay estimates in the lens modeling. This description follows Birrer et + al. 2016, 2019. """ - def __init__(self, z_lens, z_source, kwargs_model, cosmo_fiducial=None, lens_model_kinematics_bool=None, - light_model_kinematics_bool=None, kwargs_seeing=None, kwargs_aperture=None, anisotropy_model=None, - **kwargs_kin_api): + + def __init__( + self, + z_lens, + z_source, + kwargs_model, + cosmo_fiducial=None, + lens_model_kinematics_bool=None, + light_model_kinematics_bool=None, + kwargs_seeing=None, + kwargs_aperture=None, + anisotropy_model=None, + **kwargs_kin_api + ): """ :param z_lens: redshift of deflector @@ -49,136 +59,210 @@ def __init__(self, z_lens, z_source, kwargs_model, cosmo_fiducial=None, lens_mod self._z_lens = z_lens self._z_source = z_source self._cosmo_fiducial = cosmo_fiducial - self._lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial) - self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, extinction_class = class_creator.create_class_instances(all_models=True, **kwargs_model) - super(TDCosmography, self).__init__(z_lens=z_lens, z_source=z_source, kwargs_model=kwargs_model, - cosmo=cosmo_fiducial, lens_model_kinematics_bool=lens_model_kinematics_bool, - light_model_kinematics_bool=light_model_kinematics_bool, - kwargs_seeing=kwargs_seeing, kwargs_aperture=kwargs_aperture, - anisotropy_model=anisotropy_model, **kwargs_kin_api) - - def time_delays(self, kwargs_lens, kwargs_ps, kappa_ext=0, original_ps_position=False): - """ - predicts the time delays of the image positions given the fiducial cosmology relative to a straight line - without lensing. - Negative values correspond to images arriving earlier, and positive signs correspond to images arriving later. + self._lens_cosmo = LensCosmo( + z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial + ) + ( + self.LensModel, + self.SourceModel, + self.LensLightModel, + self.PointSource, + extinction_class, + ) = class_creator.create_class_instances(all_models=True, **kwargs_model) + super(TDCosmography, self).__init__( + z_lens=z_lens, + z_source=z_source, + kwargs_model=kwargs_model, + cosmo=cosmo_fiducial, + lens_model_kinematics_bool=lens_model_kinematics_bool, + light_model_kinematics_bool=light_model_kinematics_bool, + kwargs_seeing=kwargs_seeing, + kwargs_aperture=kwargs_aperture, + anisotropy_model=anisotropy_model, + **kwargs_kin_api + ) + + def time_delays( + self, kwargs_lens, kwargs_ps, kappa_ext=0, original_ps_position=False + ): + """Predicts the time delays of the image positions given the fiducial cosmology + relative to a straight line without lensing. Negative values correspond to + images arriving earlier, and positive signs correspond to images arriving later. :param kwargs_lens: lens model parameters :param kwargs_ps: point source parameters :param kappa_ext: external convergence (optional) - :param original_ps_position: boolean (only applies when first point source model is of type 'LENSED_POSITION'), - uses the image positions in the model parameters and does not re-compute images (which might be differently ordered) - in case of the lens equation solver + :param original_ps_position: boolean (only applies when first point source model + is of type 'LENSED_POSITION'), uses the image positions in the model + parameters and does not re-compute images (which might be differently + ordered) in case of the lens equation solver :return: time delays at image positions for the fixed cosmology in units of days """ - fermat_pot = self.fermat_potential(kwargs_lens, kwargs_ps, original_ps_position=original_ps_position) + fermat_pot = self.fermat_potential( + kwargs_lens, kwargs_ps, original_ps_position=original_ps_position + ) time_delay = self._lens_cosmo.time_delay_units(fermat_pot, kappa_ext) return time_delay def fermat_potential(self, kwargs_lens, kwargs_ps, original_ps_position=False): - """ - Fermat potential (negative sign means earlier arrival time) + """Fermat potential (negative sign means earlier arrival time) :param kwargs_lens: lens model keyword argument list :param kwargs_ps: point source keyword argument list - :param original_ps_position: boolean (only applies when first point source model is of type 'LENSED_POSITION'), - uses the image positions in the model parameters and does not re-compute images (which might be differently ordered) - in case of the lens equation solver - :return: Fermat potential of all the image positions in the first point source list entry + :param original_ps_position: boolean (only applies when first point source model + is of type 'LENSED_POSITION'), uses the image positions in the model + parameters and does not re-compute images (which might be differently + ordered) in case of the lens equation solver + :return: Fermat potential of all the image positions in the first point source + list entry """ - ra_pos, dec_pos = self.PointSource.image_position(kwargs_ps, kwargs_lens, original_position=original_ps_position) + ra_pos, dec_pos = self.PointSource.image_position( + kwargs_ps, kwargs_lens, original_position=original_ps_position + ) ra_pos = ra_pos[0] dec_pos = dec_pos[0] - ra_source, dec_source = self.LensModel.ray_shooting(ra_pos, dec_pos, kwargs_lens) + ra_source, dec_source = self.LensModel.ray_shooting( + ra_pos, dec_pos, kwargs_lens + ) sigma_source = np.sqrt(np.var(ra_source) + np.var(dec_source)) if sigma_source > 0.001: - Warning('Source position computed from the different image positions do not trace back to the same position! ' - 'The error is %s mas and may be larger than what is required for an accurate relative time delay estimate!' - 'See e.g. Birrer & Treu 2019.' % sigma_source * 1000) + Warning( + "Source position computed from the different image positions do not trace back to the same position! " + "The error is %s mas and may be larger than what is required for an accurate relative time delay estimate!" + "See e.g. Birrer & Treu 2019." % sigma_source * 1000 + ) ra_source = np.mean(ra_source) dec_source = np.mean(dec_source) - fermat_pot = self.LensModel.fermat_potential(ra_pos, dec_pos, kwargs_lens, ra_source, dec_source) + fermat_pot = self.LensModel.fermat_potential( + ra_pos, dec_pos, kwargs_lens, ra_source, dec_source + ) return fermat_pot - def velocity_dispersion_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None, - theta_E=None, gamma=None): - """ - sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy) - (Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and - cosmological independent quantity only depending on angular units. This function returns J given the lens - and light parameters and the anisotropy choice without an external mass sheet correction. + def velocity_dispersion_dimension_less( + self, + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=None, + theta_E=None, + gamma=None, + ): + """Sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy) (Equation + 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a + dimensionless and cosmological independent quantity only depending on angular + units. This function returns J given the lens and light parameters and the + anisotropy choice without an external mass sheet correction. :param kwargs_lens: lens model keyword arguments :param kwargs_lens_light: lens light model keyword arguments :param kwargs_anisotropy: stellar anisotropy keyword arguments - :param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional, - if set to None will be computed in this function with default settings that may not be accurate. + :param r_eff: projected half-light radius of the stellar light associated with + the deflector galaxy, optional, if set to None will be computed in this + function with default settings that may not be accurate. :param theta_E: pre-computed Einstein radius (optional) :param gamma: pre-computed power-law slope of mass profile :return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019) """ - sigma_v = self.velocity_dispersion(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light, - kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E, - gamma=gamma) - sigma_v *= 1000 # convert from [km/s] to [m/s] - J = sigma_v ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2 + sigma_v = self.velocity_dispersion( + kwargs_lens=kwargs_lens, + kwargs_lens_light=kwargs_lens_light, + kwargs_anisotropy=kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + gamma=gamma, + ) + sigma_v *= 1000 # convert from [km/s] to [m/s] + J = sigma_v**2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c**2 return J - def velocity_dispersion_map_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None, - theta_E=None, gamma=None): - """ - sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy) - (Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and - cosmological independent quantity only depending on angular units. This function returns J given the lens - and light parameters and the anisotropy choice without an external mass sheet correction. - This routine computes the IFU map of the kinematic quantities. + def velocity_dispersion_map_dimension_less( + self, + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=None, + theta_E=None, + gamma=None, + ): + """Sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy) (Equation + 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a + dimensionless and cosmological independent quantity only depending on angular + units. This function returns J given the lens and light parameters and the + anisotropy choice without an external mass sheet correction. This routine + computes the IFU map of the kinematic quantities. :param kwargs_lens: lens model keyword arguments :param kwargs_lens_light: lens light model keyword arguments :param kwargs_anisotropy: stellar anisotropy keyword arguments - :param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional, - if set to None will be computed in this function with default settings that may not be accurate. + :param r_eff: projected half-light radius of the stellar light associated with + the deflector galaxy, optional, if set to None will be computed in this + function with default settings that may not be accurate. :return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019) """ - sigma_v_map = self.velocity_dispersion_map(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light, - kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E, - gamma=gamma) + sigma_v_map = self.velocity_dispersion_map( + kwargs_lens=kwargs_lens, + kwargs_lens_light=kwargs_lens_light, + kwargs_anisotropy=kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + gamma=gamma, + ) sigma_v_map *= 1000 # convert from [km/s] to [m/s] - J_map = sigma_v_map ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2 + J_map = ( + sigma_v_map**2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c**2 + ) return J_map @staticmethod - def ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=0, kappa_ds=0, kappa_d=0): - """ - Time-delay distance in units of Mpc from the modeled Fermat potential and measured time delay from an image pair. + def ddt_from_time_delay( + d_fermat_model, dt_measured, kappa_s=0, kappa_ds=0, kappa_d=0 + ): + """Time-delay distance in units of Mpc from the modeled Fermat potential and + measured time delay from an image pair. - :param d_fermat_model: relative Fermat potential between two images from the same source in units arcsec^2 - :param dt_measured: measured time delay between the same image pair in units of days + :param d_fermat_model: relative Fermat potential between two images from the + same source in units arcsec^2 + :param dt_measured: measured time delay between the same image pair in units of + days :param kappa_s: external convergence from observer to source :param kappa_ds: external convergence from lens to source :param kappa_d: external convergence form observer to lens :return: D_dt, time-delay distance """ - D_dt_model = dt_measured * const.day_s * const.c / const.Mpc / d_fermat_model / const.arcsec ** 2 - D_dt = D_dt_model * (1-kappa_ds) / (1 - kappa_s) / (1 - kappa_d) + D_dt_model = ( + dt_measured + * const.day_s + * const.c + / const.Mpc + / d_fermat_model + / const.arcsec**2 + ) + D_dt = D_dt_model * (1 - kappa_ds) / (1 - kappa_s) / (1 - kappa_d) return D_dt @staticmethod def ds_dds_from_kinematics(sigma_v, J, kappa_s=0, kappa_ds=0): - """ - computes the estimate of the ratio of angular diameter distances Ds/Dds from the kinematic estimate of the lens - and the measured dispersion. + """Computes the estimate of the ratio of angular diameter distances Ds/Dds from + the kinematic estimate of the lens and the measured dispersion. :param sigma_v: velocity dispersion [km/s] :param J: dimensionless kinematic constraint (see Birrer et al. 2016, 2019) :return: Ds/Dds """ - ds_dds_model = (sigma_v * 1000) ** 2 / const.c ** 2 / J + ds_dds_model = (sigma_v * 1000) ** 2 / const.c**2 / J ds_dds = ds_dds_model * (1 - kappa_ds) / (1 - kappa_s) return ds_dds - def ddt_dd_from_time_delay_and_kinematics(self, d_fermat_model, dt_measured, sigma_v_measured, J, kappa_s=0, - kappa_ds=0, kappa_d=0): + def ddt_dd_from_time_delay_and_kinematics( + self, + d_fermat_model, + dt_measured, + sigma_v_measured, + J, + kappa_s=0, + kappa_ds=0, + kappa_d=0, + ): """ :param d_fermat_model: relative Fermat potential in units arcsec^2 @@ -190,7 +274,15 @@ def ddt_dd_from_time_delay_and_kinematics(self, d_fermat_model, dt_measured, sig :param kappa_d: LOS convergence from observer to deflector :return: D_dt, D_d """ - ddt = self.ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=kappa_s, kappa_ds=kappa_ds, kappa_d=kappa_d) - ds_dds = self.ds_dds_from_kinematics(sigma_v_measured, J, kappa_s=kappa_s, kappa_ds=kappa_ds) + ddt = self.ddt_from_time_delay( + d_fermat_model, + dt_measured, + kappa_s=kappa_s, + kappa_ds=kappa_ds, + kappa_d=kappa_d, + ) + ds_dds = self.ds_dds_from_kinematics( + sigma_v_measured, J, kappa_s=kappa_s, kappa_ds=kappa_ds + ) dd = ddt / ds_dds / (1 + self._z_lens) return ddt, dd diff --git a/lenstronomy/Conf/config_loader.py b/lenstronomy/Conf/config_loader.py index 58a15fe4f..cca3148be 100644 --- a/lenstronomy/Conf/config_loader.py +++ b/lenstronomy/Conf/config_loader.py @@ -9,12 +9,12 @@ try: from xdg.BaseDirectory import xdg_config_home except ImportError: - xdg_config_home = '~/.config' + xdg_config_home = "~/.config" user_config_file = os.path.join(xdg_config_home, "lenstronomy", "config.yaml") module_path = os.path.dirname(lenstronomy.__file__) -default_config_file = os.path.join(module_path, 'Conf', 'conf_default.yaml') +default_config_file = os.path.join(module_path, "Conf", "conf_default.yaml") if os.path.exists(user_config_file): conf_file = user_config_file @@ -26,13 +26,13 @@ # scalar values to the Python the dictionary format conf = yaml.safe_load(file) # conf = yaml.load(file, Loader=yaml.FullLoader) - numba_conf = conf['numba'] - nopython = numba_conf['nopython'] - cache = numba_conf['cache'] - parallel = numba_conf['parallel'] - numba_enabled = numba_conf['enable'] - fastmath = numba_conf['fastmath'] - error_model = numba_conf['error_model'] + numba_conf = conf["numba"] + nopython = numba_conf["nopython"] + cache = numba_conf["cache"] + parallel = numba_conf["parallel"] + numba_enabled = numba_conf["enable"] + fastmath = numba_conf["fastmath"] + error_model = numba_conf["error_model"] def numba_conf(): @@ -45,7 +45,7 @@ def numba_conf(): # scalar values to the Python the dictionary format conf = yaml.safe_load(file) # conf = yaml.load(file, Loader=yaml.FullLoader) - numba_conf = conf['numba'] + numba_conf = conf["numba"] return numba_conf @@ -56,5 +56,5 @@ def conventions_conf(): """ with open(conf_file) as file: conf = yaml.safe_load(file) - conventions_conf = conf['conventions'] + conventions_conf = conf["conventions"] return conventions_conf diff --git a/lenstronomy/Cosmo/__init__.py b/lenstronomy/Cosmo/__init__.py index c3412e3c2..d34951c17 100644 --- a/lenstronomy/Cosmo/__init__.py +++ b/lenstronomy/Cosmo/__init__.py @@ -1,4 +1,4 @@ -__author__ = 'Simon Birrer' -__email__ = 'sibirrer@gmail.com' -__version__ = '0.1.0' -__credits__ = 'ETH Zurich, UCLA' \ No newline at end of file +__author__ = "Simon Birrer" +__email__ = "sibirrer@gmail.com" +__version__ = "0.1.0" +__credits__ = "ETH Zurich, UCLA" diff --git a/lenstronomy/Cosmo/_cosmo_interp_astropy_v4.py b/lenstronomy/Cosmo/_cosmo_interp_astropy_v4.py index 12840b939..e6aea0c87 100644 --- a/lenstronomy/Cosmo/_cosmo_interp_astropy_v4.py +++ b/lenstronomy/Cosmo/_cosmo_interp_astropy_v4.py @@ -1,18 +1,21 @@ import astropy + if float(astropy.__version__[0]) < 5.0: from astropy.cosmology.core import vectorize_if_needed else: - Warning('This routines are only supported for astropy version <5. Current version is %s.' - % astropy.__version__) + Warning( + "This routines are only supported for astropy version <5. Current version is %s." + % astropy.__version__ + ) # from scipy.integrate import quad class CosmoInterp(object): - """ - class which interpolates the comoving transfer distance and then computes angular diameter distances from it - This class is modifying the astropy.cosmology routines - """ + """Class which interpolates the comoving transfer distance and then computes angular + diameter distances from it This class is modifying the astropy.cosmology + routines.""" + def __init__(self, cosmo): """ @@ -21,8 +24,8 @@ def __init__(self, cosmo): self._cosmo = cosmo def _integral_comoving_distance_z1z2(self, z1, z2): - """ Comoving line-of-sight distance in Mpc between objects at - redshifts z1 and z2. + """Comoving line-of-sight distance in Mpc between objects at redshifts z1 and + z2. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble @@ -39,5 +42,10 @@ def _integral_comoving_distance_z1z2(self, z1, z2): Comoving distance in Mpc between each input redshift. """ - f = lambda z1, z2: quad(self._cosmo._inv_efunc_scalar, z1, z2, args=self._cosmo._inv_efunc_scalar_args)[0] + f = lambda z1, z2: quad( + self._cosmo._inv_efunc_scalar, + z1, + z2, + args=self._cosmo._inv_efunc_scalar_args, + )[0] return self._cosmo._hubble_distance * vectorize_if_needed(f, z1, z2) diff --git a/lenstronomy/Cosmo/_cosmo_interp_astropy_v5.py b/lenstronomy/Cosmo/_cosmo_interp_astropy_v5.py index 63230a730..3310e4498 100644 --- a/lenstronomy/Cosmo/_cosmo_interp_astropy_v5.py +++ b/lenstronomy/Cosmo/_cosmo_interp_astropy_v5.py @@ -1,17 +1,20 @@ import astropy from scipy.integrate import quad + if float(astropy.__version__[0]) < 5.0: - Warning('This routines are only supported for astropy version >=5. Current version is %s.' - % astropy.__version__) + Warning( + "This routines are only supported for astropy version >=5. Current version is %s." + % astropy.__version__ + ) else: from astropy.cosmology.utils import vectorize_redshift_method class CosmoInterp(object): - """ - class which interpolates the comoving transfer distance and then computes angular diameter distances from it - This class is modifying the astropy.cosmology routines - """ + """Class which interpolates the comoving transfer distance and then computes angular + diameter distances from it This class is modifying the astropy.cosmology + routines.""" + def __init__(self, cosmo): """ @@ -20,11 +23,9 @@ def __init__(self, cosmo): self._cosmo = cosmo def _integral_comoving_distance_z1z2(self, z1, z2): - """ - Comoving line-of-sight distance in Mpc between objects at redshifts - ``z1`` and ``z2``. The comoving distance along the line-of-sight - between two objects remains constant with time for objects in the - Hubble flow. + """Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` + and ``z2``. The comoving distance along the line-of-sight between two objects + remains constant with time for objects in the Hubble flow. Parameters ---------- @@ -37,12 +38,14 @@ def _integral_comoving_distance_z1z2(self, z1, z2): Comoving distance in Mpc between each input redshift. """ - return self._cosmo._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) + return ( + self._cosmo._hubble_distance + * self._integral_comoving_distance_z1z2_scalar(z1, z2) + ) @vectorize_redshift_method(nin=2) def _integral_comoving_distance_z1z2_scalar(self, z1, z2): - """ - Comoving line-of-sight distance between objects at redshifts ``z1`` and + """Comoving line-of-sight distance between objects at redshifts ``z1`` and ``z2``. Value in Mpc. The comoving distance along the line-of-sight between two objects @@ -59,4 +62,9 @@ def _integral_comoving_distance_z1z2_scalar(self, z1, z2): Comoving distance in Mpc between each input redshift. Returns `float` if input scalar, `~numpy.ndarray` otherwise. """ - return quad(self._cosmo._inv_efunc_scalar, z1, z2, args=self._cosmo._inv_efunc_scalar_args)[0] + return quad( + self._cosmo._inv_efunc_scalar, + z1, + z2, + args=self._cosmo._inv_efunc_scalar_args, + )[0] diff --git a/lenstronomy/Cosmo/background.py b/lenstronomy/Cosmo/background.py index 30e5df32a..b168c88cc 100644 --- a/lenstronomy/Cosmo/background.py +++ b/lenstronomy/Cosmo/background.py @@ -1,16 +1,15 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import lenstronomy.Util.constants as const from lenstronomy.Cosmo.cosmo_interp import CosmoInterp -__all__ = ['Background'] +__all__ = ["Background"] class Background(object): - """ - class to compute cosmological distances - """ + """Class to compute cosmological distances.""" + def __init__(self, cosmo=None, interp=False, **kwargs_interp): """ @@ -23,6 +22,7 @@ def __init__(self, cosmo=None, interp=False, **kwargs_interp): if cosmo is None: from astropy.cosmology import default_cosmology + cosmo = default_cosmology.get() if interp: self.cosmo = CosmoInterp(cosmo, **kwargs_interp) @@ -31,13 +31,12 @@ def __init__(self, cosmo=None, interp=False, **kwargs_interp): @staticmethod def a_z(z): - """ - returns scale factor (a_0 = 1) for given redshift + """Returns scale factor (a_0 = 1) for given redshift. :param z: redshift :return: scale factor """ - return 1./(1+z) + return 1.0 / (1 + z) def d_xy(self, z_observer, z_source): """ @@ -50,14 +49,18 @@ def d_xy(self, z_observer, z_source): return D_xy.value def ddt(self, z_lens, z_source): - """ - time-delay distance + """Time-delay distance. :param z_lens: redshift of lens :param z_source: redshift of source :return: time-delay distance in units of proper Mpc """ - return self.d_xy(0, z_lens) * self.d_xy(0, z_source) / self.d_xy(z_lens, z_source) * (1 + z_lens) + return ( + self.d_xy(0, z_lens) + * self.d_xy(0, z_source) + / self.d_xy(z_lens, z_source) + * (1 + z_lens) + ) def T_xy(self, z_observer, z_source): """ @@ -72,10 +75,9 @@ def T_xy(self, z_observer, z_source): @property def rho_crit(self): - """ - critical density + """Critical density. :return: value in M_sol/Mpc^3 """ - h = self.cosmo.H(0).value / 100. - return 3 * h ** 2 / (8 * np.pi * const.G) * 10 ** 10 * const.Mpc / const.M_sun + h = self.cosmo.H(0).value / 100.0 + return 3 * h**2 / (8 * np.pi * const.G) * 10**10 * const.Mpc / const.M_sun diff --git a/lenstronomy/Cosmo/cosmo_interp.py b/lenstronomy/Cosmo/cosmo_interp.py index d029a5f46..0e1765383 100644 --- a/lenstronomy/Cosmo/cosmo_interp.py +++ b/lenstronomy/Cosmo/cosmo_interp.py @@ -1,9 +1,13 @@ import astropy + if float(astropy.__version__[0]) < 5.0: from astropy.cosmology.core import isiterable - DeprecationWarning('Astropy<5 is going to be deprecated soon. This is in combination with Python version<3.8.' - 'We recommend you to update astropy to the latest versionbut keep supporting your settings for ' - 'the time being.') + + DeprecationWarning( + "Astropy<5 is going to be deprecated soon. This is in combination with Python version<3.8." + "We recommend you to update astropy to the latest versionbut keep supporting your settings for " + "the time being." + ) else: from astropy.cosmology.utils import isiterable # @@ -15,10 +19,10 @@ class CosmoInterp(object): - """ - class which interpolates the comoving transfer distance and then computes angular diameter distances from it - This class is modifying the astropy.cosmology routines - """ + """Class which interpolates the comoving transfer distance and then computes angular + diameter distances from it This class is modifying the astropy.cosmology + routines.""" + def __init__(self, cosmo, z_stop, num_interp): """ @@ -28,13 +32,22 @@ def __init__(self, cosmo, z_stop, num_interp): """ self._cosmo = cosmo if float(astropy.__version__[0]) < 5.0: - from lenstronomy.Cosmo._cosmo_interp_astropy_v4 import CosmoInterp as CosmoInterp_ + from lenstronomy.Cosmo._cosmo_interp_astropy_v4 import ( + CosmoInterp as CosmoInterp_, + ) + self._comoving_interp = CosmoInterp_(cosmo) else: - from lenstronomy.Cosmo._cosmo_interp_astropy_v5 import CosmoInterp as CosmoInterp_ + from lenstronomy.Cosmo._cosmo_interp_astropy_v5 import ( + CosmoInterp as CosmoInterp_, + ) + self._comoving_interp = CosmoInterp_(cosmo) - self._comoving_distance_interpolation_func = self._interpolate_comoving_distance(z_start=0, z_stop=z_stop, - num_interp=num_interp) + self._comoving_distance_interpolation_func = ( + self._interpolate_comoving_distance( + z_start=0, z_stop=z_stop, num_interp=num_interp + ) + ) def _comoving_distance_interp(self, z): """ @@ -45,7 +58,7 @@ def _comoving_distance_interp(self, z): return self._comoving_distance_interpolation_func(z) * units.Mpc def angular_diameter_distance(self, z): - """ Angular diameter distance in Mpc at a given redshift. + """Angular diameter distance in Mpc at a given redshift. This gives the proper (sometimes called 'physical') transverse distance corresponding to an angle of 1 radian for an object @@ -68,11 +81,11 @@ def angular_diameter_distance(self, z): if isiterable(z): z = np.asarray(z) - return self.comoving_transverse_distance(z) / (1. + z) + return self.comoving_transverse_distance(z) / (1.0 + z) def angular_diameter_distance_z1z2(self, z1, z2): - """ Angular diameter distance between objects at 2 redshifts. - Useful for gravitational lensing. + """Angular diameter distance between objects at 2 redshifts. Useful for + gravitational lensing. Parameters ---------- @@ -84,15 +97,14 @@ def angular_diameter_distance_z1z2(self, z1, z2): d : `~astropy.units.Quantity`, shape (N,) or single if input scalar The angular diameter distance between each input redshift pair. - """ z1 = np.asanyarray(z1) z2 = np.asanyarray(z2) - return self._comoving_transverse_distance_z1z2(z1, z2) / (1. + z2) + return self._comoving_transverse_distance_z1z2(z1, z2) / (1.0 + z2) def comoving_transverse_distance(self, z): - """ Comoving transverse distance in Mpc at a given redshift. + """Comoving transverse distance in Mpc at a given redshift. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is @@ -140,7 +152,6 @@ def _comoving_transverse_distance_z1z2(self, z1, z2): ----- This quantity is also called the 'proper motion distance' in some texts. - """ Ok0 = self._cosmo._Ok0 @@ -155,8 +166,8 @@ def _comoving_transverse_distance_z1z2(self, z1, z2): return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value) def _comoving_distance_z1z2(self, z1, z2): - """ Comoving line-of-sight distance in Mpc between objects at - redshifts z1 and z2. + """Comoving line-of-sight distance in Mpc between objects at redshifts z1 and + z2. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble @@ -175,19 +186,20 @@ def _comoving_distance_z1z2(self, z1, z2): return self._comoving_distance_interp(z2) - self._comoving_distance_interp(z1) def _interpolate_comoving_distance(self, z_start, z_stop, num_interp): - """ - interpolates the comoving distance + """Interpolates the comoving distance. :param z_start: starting redshift range (should be zero) :param z_stop: highest redshift to which to compute the comoving distance :param num_interp: number of steps uniformly spread in redshift :return: interpolation object in this class """ - z_steps = np.linspace(start=z_start, stop=z_stop, num=num_interp+1) + z_steps = np.linspace(start=z_start, stop=z_stop, num=num_interp + 1) running_dist = 0 - ang_dist = np.zeros(num_interp+1) + ang_dist = np.zeros(num_interp + 1) for i in range(num_interp): - delta_dist = self._comoving_interp._integral_comoving_distance_z1z2(z_steps[i], z_steps[i+1]) + delta_dist = self._comoving_interp._integral_comoving_distance_z1z2( + z_steps[i], z_steps[i + 1] + ) running_dist += delta_dist.value - ang_dist[i+1] = copy.deepcopy(running_dist) + ang_dist[i + 1] = copy.deepcopy(running_dist) return interp1d(z_steps, ang_dist) diff --git a/lenstronomy/Cosmo/cosmo_solver.py b/lenstronomy/Cosmo/cosmo_solver.py index e7e6b8cdb..198d421c7 100644 --- a/lenstronomy/Cosmo/cosmo_solver.py +++ b/lenstronomy/Cosmo/cosmo_solver.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import scipy.optimize import scipy.interpolate as interpolate @@ -8,6 +8,7 @@ from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -21,24 +22,24 @@ def cosmo2angular_diameter_distances(H_0, omega_m, z_lens, z_source): :param z_source: source redshift :return: angular diameter distances Dd and Ds/Dds """ - cosmo = FlatLambdaCDM(H0=H_0, Om0=omega_m, Ob0=0.) + cosmo = FlatLambdaCDM(H0=H_0, Om0=omega_m, Ob0=0.0) lensCosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) Dd = lensCosmo.dd Ds = lensCosmo.ds Dds = lensCosmo.dds - return Dd, Ds/Dds + return Dd, Ds / Dds @export def ddt2h0(ddt, z_lens, z_source, cosmo): - """ - converts time-delay distance to H0 for a given expansion history + """Converts time-delay distance to H0 for a given expansion history. :param ddt: time-delay distance in Mpc :param z_lens: deflector redshift :param z_source: source redshift :param cosmo: astropy.cosmology class instance - :return: h0 value which matches the cosmology class effectively replacing the h0 value used in the creation of this class + :return: h0 value which matches the cosmology class effectively replacing the h0 + value used in the creation of this class """ h0_fiducial = cosmo.H0.value lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) @@ -49,10 +50,10 @@ def ddt2h0(ddt, z_lens, z_source, cosmo): @export class SolverFlatLCDM(object): - """ - class to solve multidimensional non-linear equations to determine the cosmological parameters H0 and omega_m given - the angular diameter distance relations - """ + """Class to solve multidimensional non-linear equations to determine the + cosmological parameters H0 and omega_m given the angular diameter distance + relations.""" + def __init__(self, z_d, z_s): self.z_d = z_d self.z_s = z_s @@ -64,15 +65,19 @@ def F(self, x, Dd, Ds_Dds): :return: """ [H_0, omega_m] = x - omega_m = abs(omega_m)%1 - Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances(H_0, omega_m, self.z_d, self.z_s) + omega_m = abs(omega_m) % 1 + Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances( + H_0, omega_m, self.z_d, self.z_s + ) y = np.zeros(2) y[0] = Dd - Dd_new y[1] = Ds_Dds - Ds_Dds_new return y def solve(self, init, dd, ds_dds): - x = scipy.optimize.fsolve(self.F, init, args=(dd, ds_dds), xtol=1.49012e-08, factor=0.1) + x = scipy.optimize.fsolve( + self.F, init, args=(dd, ds_dds), xtol=1.49012e-08, factor=0.1 + ) x[1] = abs(x[1]) % 1 y = self.F(x, dd, ds_dds) if abs(y[0]) >= 0.1 or abs(y[1]) > 0.1: @@ -82,9 +87,9 @@ def solve(self, init, dd, ds_dds): @export class InvertCosmo(object): - """ - class to do an interpolation and call the inverse of this interpolation to get H_0 and omega_m - """ + """Class to do an interpolation and call the inverse of this interpolation to get + H_0 and omega_m.""" + def __init__(self, z_d, z_s, H0_range=None, omega_m_range=None): self.z_d = z_d self.z_s = z_s @@ -96,42 +101,61 @@ def __init__(self, z_d, z_s, H0_range=None, omega_m_range=None): self._omega_m_range = omega_m_range def _make_interpolation(self): - """ - creates an interpolation grid in H_0, omega_m and computes quantities in Dd and Ds_Dds + """Creates an interpolation grid in H_0, omega_m and computes quantities in Dd + and Ds_Dds. :return: """ - grid2d = np.dstack(np.meshgrid(self._H0_range, self._omega_m_range)).reshape(-1, 2) + grid2d = np.dstack(np.meshgrid(self._H0_range, self._omega_m_range)).reshape( + -1, 2 + ) H0_grid = grid2d[:, 0] omega_m_grid = grid2d[:, 1] Dd_grid = np.zeros_like(H0_grid) Ds_Dds_grid = np.zeros_like(H0_grid) for i in range(len(H0_grid)): - Dd, Ds_Dds = cosmo2angular_diameter_distances(H0_grid[i], omega_m_grid[i], self.z_d, self.z_s) + Dd, Ds_Dds = cosmo2angular_diameter_distances( + H0_grid[i], omega_m_grid[i], self.z_d, self.z_s + ) Dd_grid[i] = Dd Ds_Dds_grid[i] = Ds_Dds - self._f_H0 = interpolate.interp2d(Dd_grid, Ds_Dds_grid, H0_grid, kind='linear', copy=False, bounds_error=False, - fill_value=-1) + self._f_H0 = interpolate.interp2d( + Dd_grid, + Ds_Dds_grid, + H0_grid, + kind="linear", + copy=False, + bounds_error=False, + fill_value=-1, + ) print("H0 interpolation done") - self._f_omega_m = interpolate.interp2d(Dd_grid, Ds_Dds_grid, omega_m_grid, kind='linear', copy=False, - bounds_error=False, fill_value=0) + self._f_omega_m = interpolate.interp2d( + Dd_grid, + Ds_Dds_grid, + omega_m_grid, + kind="linear", + copy=False, + bounds_error=False, + fill_value=0, + ) print("omega_m interpolation done") def get_cosmo(self, Dd, Ds_Dds): - """ - return the values of H0 and omega_m computed with an interpolation + """Return the values of H0 and omega_m computed with an interpolation. :param Dd: flat :param Ds_Dds: float :return: """ - if not hasattr(self, '_f_H0') or not hasattr(self, '_f_omega_m'): + if not hasattr(self, "_f_H0") or not hasattr(self, "_f_omega_m"): self._make_interpolation() H0 = self._f_H0(Dd, Ds_Dds) - print(H0, 'H0') + print(H0, "H0") omega_m = self._f_omega_m(Dd, Ds_Dds) - Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances(H0[0], omega_m[0], self.z_d, self.z_s) - if abs(Dd - Dd_new)/Dd > 0.01 or abs(Ds_Dds - Ds_Dds_new)/Ds_Dds > 0.01: + Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances( + H0[0], omega_m[0], self.z_d, self.z_s + ) + if abs(Dd - Dd_new) / Dd > 0.01 or abs(Ds_Dds - Ds_Dds_new) / Ds_Dds > 0.01: return -1, -1 else: return H0[0], omega_m[0] diff --git a/lenstronomy/Cosmo/kde_likelihood.py b/lenstronomy/Cosmo/kde_likelihood.py index dbed4d4d0..588b68d0f 100644 --- a/lenstronomy/Cosmo/kde_likelihood.py +++ b/lenstronomy/Cosmo/kde_likelihood.py @@ -1,15 +1,16 @@ import numpy as np from scipy import stats -__all__ = ['KDELikelihood'] +__all__ = ["KDELikelihood"] class KDELikelihood(object): - """ - class that samples the cosmographic likelihood given a distribution of points in the 2-dimensional distribution - of D_d and D_delta_t - """ - def __init__(self, D_d_sample, D_delta_t_sample, kde_type='scipy_gaussian', bandwidth=1): + """Class that samples the cosmographic likelihood given a distribution of points in + the 2-dimensional distribution of D_d and D_delta_t.""" + + def __init__( + self, D_d_sample, D_delta_t_sample, kde_type="scipy_gaussian", bandwidth=1 + ): """ :param D_d_sample: 1-d numpy array of angular diameter distances to the lens plane @@ -22,25 +23,25 @@ def __init__(self, D_d_sample, D_delta_t_sample, kde_type='scipy_gaussian', band :param bandwidth: width of kernel (in same units as the angular diameter quantities) """ values = np.vstack([D_d_sample, D_delta_t_sample]) - if kde_type == 'scipy_gaussian': + if kde_type == "scipy_gaussian": self._PDF_kernel = stats.gaussian_kde(values) else: from sklearn.neighbors import KernelDensity + self._kde = KernelDensity(bandwidth=bandwidth, kernel=kde_type) values = np.vstack([D_d_sample, D_delta_t_sample]) self._kde.fit(values.T) self._kde_type = kde_type def logLikelihood(self, D_d, D_delta_t): - """ - likelihood of the data (represented in the distribution of this class) given a model with predicted angular - diameter distances. + """Likelihood of the data (represented in the distribution of this class) given + a model with predicted angular diameter distances. :param D_d: model predicted angular diameter distance :param D_delta_t: model predicted time-delay distance :return: loglikelihood (log of KDE value) """ - if self._kde_type == 'scipy_gaussian': + if self._kde_type == "scipy_gaussian": density = self._PDF_kernel([D_d, D_delta_t]) logL = np.log(density) else: diff --git a/lenstronomy/Cosmo/lcdm.py b/lenstronomy/Cosmo/lcdm.py index 618f5d21e..62d57bb77 100644 --- a/lenstronomy/Cosmo/lcdm.py +++ b/lenstronomy/Cosmo/lcdm.py @@ -1,15 +1,14 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from astropy.cosmology import FlatLambdaCDM, LambdaCDM from lenstronomy.Cosmo.lens_cosmo import LensCosmo -__all__ = ['LCDM'] +__all__ = ["LCDM"] class LCDM(object): - """ - Flat LCDM cosmology background with free Hubble parameter and Omega_m at fixed lens redshift configuration - """ + """Flat LCDM cosmology background with free Hubble parameter and Omega_m at fixed + lens redshift configuration.""" def __init__(self, z_lens, z_source, flat=True): """ @@ -38,8 +37,7 @@ def _get_cosom(self, H_0, Om0, Ode0=None): return lensCosmo def D_d(self, H_0, Om0, Ode0=None): - """ - angular diameter to deflector + """Angular diameter to deflector. :param H_0: Hubble parameter [km/s/Mpc] :param Om0: normalized matter density at present time @@ -49,8 +47,7 @@ def D_d(self, H_0, Om0, Ode0=None): return lensCosmo.dd def D_s(self, H_0, Om0, Ode0=None): - """ - angular diameter to source + """Angular diameter to source. :param H_0: Hubble parameter [km/s/Mpc] :param Om0: normalized matter density at present time @@ -60,8 +57,7 @@ def D_s(self, H_0, Om0, Ode0=None): return lensCosmo.ds def D_ds(self, H_0, Om0, Ode0=None): - """ - angular diameter from deflector to source + """Angular diameter from deflector to source. :param H_0: Hubble parameter [km/s/Mpc] :param Om0: normalized matter density at present time @@ -71,8 +67,7 @@ def D_ds(self, H_0, Om0, Ode0=None): return lensCosmo.dds def D_dt(self, H_0, Om0, Ode0=None): - """ - time-delay distance + """Time-delay distance. :param H_0: Hubble parameter [km/s/Mpc] :param Om0: normalized matter density at present time diff --git a/lenstronomy/Cosmo/lens_cosmo.py b/lenstronomy/Cosmo/lens_cosmo.py index b72083fce..5017de236 100644 --- a/lenstronomy/Cosmo/lens_cosmo.py +++ b/lenstronomy/Cosmo/lens_cosmo.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to convert lensing and physical units @@ -7,13 +7,13 @@ from lenstronomy.Cosmo.background import Background from lenstronomy.Cosmo.nfw_param import NFWParam -__all__ = ['LensCosmo'] +__all__ = ["LensCosmo"] class LensCosmo(object): - """ - class to manage the physical units and distances present in a single plane lens with fixed input cosmology - """ + """Class to manage the physical units and distances present in a single plane lens + with fixed input cosmology.""" + def __init__(self, z_lens, z_source, cosmo=None): """ @@ -29,7 +29,7 @@ def __init__(self, z_lens, z_source, cosmo=None): @property def h(self): - return self.background.cosmo.H(0).value / 100. + return self.background.cosmo.H(0).value / 100.0 @property def dd(self): @@ -65,36 +65,41 @@ def ddt(self): @property def sigma_crit(self): - """ - returns the critical projected lensing mass density in units of M_sun/Mpc^2 + """Returns the critical projected lensing mass density in units of M_sun/Mpc^2. :return: critical projected lensing mass density """ - if not hasattr(self, '_sigma_crit_mpc'): - const_SI = const.c ** 2 / (4 * np.pi * const.G) # c^2/(4*pi*G) in units of [kg/m] + if not hasattr(self, "_sigma_crit_mpc"): + const_SI = const.c**2 / ( + 4 * np.pi * const.G + ) # c^2/(4*pi*G) in units of [kg/m] conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc] - factor = const_SI*conversion # c^2/(4*pi*G) in units of [M_sun/Mpc] - self._sigma_crit_mpc = self.ds / (self.dd * self.dds) * factor # [M_sun/Mpc^2] + factor = const_SI * conversion # c^2/(4*pi*G) in units of [M_sun/Mpc] + self._sigma_crit_mpc = ( + self.ds / (self.dd * self.dds) * factor + ) # [M_sun/Mpc^2] return self._sigma_crit_mpc @property def sigma_crit_angle(self): - """ - returns the critical surface density in units of M_sun/arcsec^2 (in physical solar mass units) - when provided a physical mass per physical Mpc^2 + """Returns the critical surface density in units of M_sun/arcsec^2 (in physical + solar mass units) when provided a physical mass per physical Mpc^2. :return: critical projected mass density """ - if not hasattr(self, '_sigma_crit_arcsec'): - const_SI = const.c ** 2 / (4 * np.pi * const.G) # c^2/(4*pi*G) in units of [kg/m] + if not hasattr(self, "_sigma_crit_arcsec"): + const_SI = const.c**2 / ( + 4 * np.pi * const.G + ) # c^2/(4*pi*G) in units of [kg/m] conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc] factor = const_SI * conversion # c^2/(4*pi*G) in units of [M_sun/Mpc] - self._sigma_crit_arcsec = self.ds / (self.dd * self.dds) * factor * (self.dd * const.arcsec) ** 2 # [M_sun/arcsec^2] + self._sigma_crit_arcsec = ( + self.ds / (self.dd * self.dds) * factor * (self.dd * const.arcsec) ** 2 + ) # [M_sun/arcsec^2] return self._sigma_crit_arcsec def phys2arcsec_lens(self, phys): - """ - convert physical Mpc into arc seconds + """Convert physical Mpc into arc seconds. :param phys: physical distance [Mpc] :return: angular diameter [arcsec] @@ -102,8 +107,7 @@ def phys2arcsec_lens(self, phys): return phys / self.dd / const.arcsec def arcsec2phys_lens(self, arcsec): - """ - convert angular to physical quantities for lens plane + """Convert angular to physical quantities for lens plane. :param arcsec: angular size at lens plane [arcsec] :return: physical size at lens plane [Mpc] @@ -111,8 +115,7 @@ def arcsec2phys_lens(self, arcsec): return arcsec * const.arcsec * self.dd def arcsec2phys_source(self, arcsec): - """ - convert angular to physical quantities for source plane + """Convert angular to physical quantities for source plane. :param arcsec: angular size at source plane [arcsec] :return: physical size at source plane [Mpc] @@ -120,8 +123,7 @@ def arcsec2phys_source(self, arcsec): return arcsec * const.arcsec * self.ds def kappa2proj_mass(self, kappa): - """ - convert convergence to projected mass M_sun/Mpc^2 + """Convert convergence to projected mass M_sun/Mpc^2. :param kappa: lensing convergence :return: projected mass [M_sun/Mpc^2] @@ -129,8 +131,7 @@ def kappa2proj_mass(self, kappa): return kappa * self.sigma_crit def mass_in_theta_E(self, theta_E): - """ - mass within Einstein radius (area * epsilon crit) [M_sun] + """Mass within Einstein radius (area * epsilon crit) [M_sun] :param theta_E: Einstein radius [arcsec] :return: mass within Einstein radius [M_sun] @@ -146,7 +147,14 @@ def mass_in_coin(self, theta_E): """ chi_L = self.background.T_xy(0, self.z_lens) chi_S = self.background.T_xy(0, self.z_source) - return 1./3 * np.pi * (chi_L * theta_E * const.arcsec) ** 2 * chi_S * self.background.rho_crit #[M_sun/Mpc**3] + return ( + 1.0 + / 3 + * np.pi + * (chi_L * theta_E * const.arcsec) ** 2 + * chi_S + * self.background.rho_crit + ) # [M_sun/Mpc**3] def time_delay_units(self, fermat_pot, kappa_ext=0): """ @@ -155,8 +163,10 @@ def time_delay_units(self, fermat_pot, kappa_ext=0): :param kappa_ext: unit-less external shear not accounted for in the Fermat potential :return: time delay in days """ - D_dt = self.ddt * (1. - kappa_ext) * const.Mpc # eqn 7 in Suyu et al. - return D_dt / const.c * fermat_pot / const.day_s * const.arcsec ** 2 # * self.arcsec2phys_lens(1.)**2 + D_dt = self.ddt * (1.0 - kappa_ext) * const.Mpc # eqn 7 in Suyu et al. + return ( + D_dt / const.c * fermat_pot / const.day_s * const.arcsec**2 + ) # * self.arcsec2phys_lens(1.)**2 def time_delay2fermat_pot(self, dt): """ @@ -165,11 +175,10 @@ def time_delay2fermat_pot(self, dt): :return: Fermat potential in units arcsec**2 for a given cosmology """ D_dt = self.ddt * const.Mpc - return dt * const.c * const.day_s / D_dt / const.arcsec ** 2 + return dt * const.c * const.day_s / D_dt / const.arcsec**2 def nfw_angle2physical(self, Rs_angle, alpha_Rs): - """ - converts the angular parameters into the physical ones for an NFW profile + """Converts the angular parameters into the physical ones for an NFW profile. :param alpha_Rs: observed bending angle at the scale radius in units of arcsec :param Rs_angle: scale radius in units of arcsec @@ -177,7 +186,7 @@ def nfw_angle2physical(self, Rs_angle, alpha_Rs): """ Rs = Rs_angle * const.arcsec * self.dd theta_scaled = alpha_Rs * self.sigma_crit * self.dd * const.arcsec - rho0 = theta_scaled / (4 * Rs ** 2 * (1 + np.log(1. / 2.))) + rho0 = theta_scaled / (4 * Rs**2 * (1 + np.log(1.0 / 2.0))) rho0_com = rho0 / self.h**2 c = self.nfw_param.c_rho0(rho0_com, self.z_lens) r200 = c * Rs @@ -185,84 +194,94 @@ def nfw_angle2physical(self, Rs_angle, alpha_Rs): return rho0, Rs, c, r200, M200 def nfw_physical2angle(self, M, c): - """ - converts the physical mass and concentration parameter of an NFW profile into the lensing quantities + """Converts the physical mass and concentration parameter of an NFW profile into + the lensing quantities. - :param M: mass enclosed 200 rho_crit in units of M_sun (physical units, meaning no little h) + :param M: mass enclosed 200 rho_crit in units of M_sun (physical units, meaning + no little h) :param c: NFW concentration parameter (r200/r_s) - :return: Rs_angle (angle at scale radius) (in units of arcsec), alpha_Rs (observed bending angle at the scale radius + :return: Rs_angle (angle at scale radius) (in units of arcsec), alpha_Rs + (observed bending angle at the scale radius """ rho0, Rs, r200 = self.nfwParam_physical(M, c) Rs_angle = Rs / self.dd / const.arcsec # Rs in arcsec - alpha_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.))) + alpha_Rs = rho0 * (4 * Rs**2 * (1 + np.log(1.0 / 2.0))) return Rs_angle, alpha_Rs / self.sigma_crit / self.dd / const.arcsec def nfwParam_physical(self, M, c): - """ - returns the NFW parameters in physical units + """Returns the NFW parameters in physical units. :param M: physical mass in M_sun in definition m200 :param c: concentration :return: rho0 [Msun/Mpc^3], Rs [Mpc], r200 [Mpc] """ - r200 = self.nfw_param.r200_M(M * self.h, self.z_lens) / self.h # physical radius r200 - rho0 = self.nfw_param.rho0_c(c, self.z_lens) * self.h**2 # physical density in M_sun/Mpc**3 - Rs = r200/c + r200 = ( + self.nfw_param.r200_M(M * self.h, self.z_lens) / self.h + ) # physical radius r200 + rho0 = ( + self.nfw_param.rho0_c(c, self.z_lens) * self.h**2 + ) # physical density in M_sun/Mpc**3 + Rs = r200 / c return rho0, Rs, r200 def nfw_M_theta_r200(self, M): - """ - returns r200 radius in angular units of arc seconds on the sky + """Returns r200 radius in angular units of arc seconds on the sky. :param M: physical mass in M_sun :return: angle (in arc seconds) of the r200 radius """ - r200 = self.nfw_param.r200_M(M * self.h, self.z_lens) / self.h # physical radius r200 + r200 = ( + self.nfw_param.r200_M(M * self.h, self.z_lens) / self.h + ) # physical radius r200 theta_r200 = r200 / self.dd / const.arcsec return theta_r200 def sis_theta_E2sigma_v(self, theta_E): - """ - converts the lensing Einstein radius into a physical velocity dispersion + """Converts the lensing Einstein radius into a physical velocity dispersion. :param theta_E: Einstein radius (in arcsec) :return: velocity dispersion in units (km/s) """ - v_sigma_c2 = theta_E * const.arcsec / (4*np.pi) * self.ds / self.dds - return np.sqrt(v_sigma_c2)*const.c / 1000 + v_sigma_c2 = theta_E * const.arcsec / (4 * np.pi) * self.ds / self.dds + return np.sqrt(v_sigma_c2) * const.c / 1000 def sis_sigma_v2theta_E(self, v_sigma): - """ - converts the velocity dispersion into an Einstein radius for a SIS profile + """Converts the velocity dispersion into an Einstein radius for a SIS profile. :param v_sigma: velocity dispersion (km/s) :return: theta_E (arcsec) """ - theta_E = 4 * np.pi * (v_sigma * 1000./const.c) ** 2 * self.dds / self.ds / const.arcsec + theta_E = ( + 4 + * np.pi + * (v_sigma * 1000.0 / const.c) ** 2 + * self.dds + / self.ds + / const.arcsec + ) return theta_E def hernquist_phys2angular(self, mass, rs): - """ - Translates physical mass definitions of the Hernquist profile to the angular units used in the Hernquist lens - profile of lenstronomy. + """Translates physical mass definitions of the Hernquist profile to the angular + units used in the Hernquist lens profile of lenstronomy. - 'sigma0' is defined such that the deflection at projected RS leads to - alpha = 2./3 * Rs * sigma0 + 'sigma0' is defined such that the deflection at projected RS leads to alpha = + 2./3 * Rs * sigma0 - :param mass: A spherical overdensity mass in M_sun corresponding to the mass definition mdef at redshift z + :param mass: A spherical overdensity mass in M_sun corresponding to the mass + definition mdef at redshift z :param rs: rs in units of physical Mpc :return: sigma0, Rs_angle """ rs_angle = rs / self.dd / const.arcsec # Rs in arcsec - rhos = mass / (2 * np.pi) / rs ** 3 # units of M_sun / Mpc^3 + rhos = mass / (2 * np.pi) / rs**3 # units of M_sun / Mpc^3 sigma0 = rhos * rs # units of M_sun / Mpc^2 sigma0 /= self.sigma_crit return sigma0, rs_angle def hernquist_angular2phys(self, sigma0, rs_angle): - """ - 'sigma0' is defined such that the deflection at projected RS leads to - alpha = 2./3 * Rs * sigma0 + """'sigma0' is defined such that the deflection at projected RS leads to alpha = + 2./3 * Rs * sigma0. :param sigma0: convergence normalization :param rs_angle: rs in angular units [arcseconds] @@ -270,50 +289,51 @@ def hernquist_angular2phys(self, sigma0, rs_angle): """ rs = rs_angle * self.dd * const.arcsec # units of Mpc rhos = sigma0 / rs * self.sigma_crit - m_tot = 2*np.pi*rhos*rs**3 + m_tot = 2 * np.pi * rhos * rs**3 return m_tot, rs def uldm_angular2phys(self, kappa_0, theta_c): - """ - converts the anguar parameters entering the LensModel Uldm() (Ultra Light - Dark Matter) class in physical masses, i.e. the total soliton mass and the - mass of the particle + """Converts the anguar parameters entering the LensModel Uldm() (Ultra Light + Dark Matter) class in physical masses, i.e. the total soliton mass and the mass + of the particle. :param kappa_0: central convergence of profile :param theta_c: core radius (in arcseconds) - :return: m_eV_log10, M_sol_log10, the log10 of the masses, m in eV and M in M_sun + :return: m_eV_log10, M_sol_log10, the log10 of the masses, m in eV and M in + M_sun """ - D_Lens = self.dd * 10**6 # in parsec - Sigma_c = self.sigma_crit * 10**(-12) # in M_sun / parsec^2 + D_Lens = self.dd * 10**6 # in parsec + Sigma_c = self.sigma_crit * 10 ** (-12) # in M_sun / parsec^2 r_c = theta_c * const.arcsec * D_Lens rho0 = 2048 * np.sqrt(0.091) * kappa_0 * Sigma_c / (429 * np.pi * r_c) - m_log10 = -22 + 0.5*np.log10(190 / rho0 * (r_c / 100)**(-4)) + m_log10 = -22 + 0.5 * np.log10(190 / rho0 * (r_c / 100) ** (-4)) M_log10 = 9 + np.log10(160 * 1.4 / r_c) - 2 * (m_log10 + 22) return m_log10, M_log10 def uldm_mphys2angular(self, m_log10, M_log10): - """ - converts physical ULDM mass in the ones, in angular units, that enter - the LensModel Uldm() class + """Converts physical ULDM mass in the ones, in angular units, that enter the + LensModel Uldm() class. :param m_log10: exponent of ULDM mass in eV :param M_log10: exponent of soliton mass in M_sun - :return: kappa_0, theta_c, the central convergence and core radius (in arcseconds) - """ - D_Lens = self.dd * 10**6 # in parsec - Sigma_c = self.sigma_crit * 10**(-12) # in M_sun/parsec^2 - m22 = 10**(m_log10 + 22) - M9 = 10**(M_log10 -9) - r_c = 160 * 1.4 * m22**(-2) * M9**(-1) # core radius in parsec - rho0 = 190 * m22**(-2) * (r_c / 100)**(-4) # central density in M_sun/parsec^3 + :return: kappa_0, theta_c, the central convergence and core radius (in + arcseconds) + """ + D_Lens = self.dd * 10**6 # in parsec + Sigma_c = self.sigma_crit * 10 ** (-12) # in M_sun/parsec^2 + m22 = 10 ** (m_log10 + 22) + M9 = 10 ** (M_log10 - 9) + r_c = 160 * 1.4 * m22 ** (-2) * M9 ** (-1) # core radius in parsec + rho0 = ( + 190 * m22 ** (-2) * (r_c / 100) ** (-4) + ) # central density in M_sun/parsec^3 kappa_0 = 429 * np.pi * rho0 * r_c / (2048 * np.sqrt(0.091) * Sigma_c) theta_c = r_c / D_Lens / const.arcsec return kappa_0, theta_c def sersic_m_star2k_eff(self, m_star, R_sersic, n_sersic): - """ - translates a total stellar mass into 'k_eff', the convergence at - 'R_sersic' (effective radius or half-light radius) for a Sersic profile + """Translates a total stellar mass into 'k_eff', the convergence at 'R_sersic' + (effective radius or half-light radius) for a Sersic profile. :param m_star: total stellar mass in physical Msun :param R_sersic: half-light radius in arc seconds @@ -322,8 +342,11 @@ def sersic_m_star2k_eff(self, m_star, R_sersic, n_sersic): """ # compute mass integral from lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil + sersic_util = SersicUtil() - norm_integral = sersic_util.total_flux(amp=1, R_sersic=R_sersic, n_sersic=n_sersic) + norm_integral = sersic_util.total_flux( + amp=1, R_sersic=R_sersic, n_sersic=n_sersic + ) # compute total kappa normalization and re k_eff = m_star / self.sigma_crit_angle # renormalize @@ -331,8 +354,8 @@ def sersic_m_star2k_eff(self, m_star, R_sersic, n_sersic): return k_eff def sersic_k_eff2m_star(self, k_eff, R_sersic, n_sersic): - """ - translates convergence at half-light radius to total integrated physical stellar mass for a Sersic profile + """Translates convergence at half-light radius to total integrated physical + stellar mass for a Sersic profile. :param k_eff: lensing convergence at half-light radius :param R_sersic: half-light radius in arc seconds @@ -340,7 +363,10 @@ def sersic_k_eff2m_star(self, k_eff, R_sersic, n_sersic): :return: stellar mass in physical Msun """ from lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil + sersic_util = SersicUtil() - norm_integral = sersic_util.total_flux(amp=1, R_sersic=R_sersic, n_sersic=n_sersic) - m_star = k_eff *self.sigma_crit_angle * norm_integral + norm_integral = sersic_util.total_flux( + amp=1, R_sersic=R_sersic, n_sersic=n_sersic + ) + m_star = k_eff * self.sigma_crit_angle * norm_integral return m_star diff --git a/lenstronomy/Cosmo/micro_lensing.py b/lenstronomy/Cosmo/micro_lensing.py index c544ca520..6f14347d0 100644 --- a/lenstronomy/Cosmo/micro_lensing.py +++ b/lenstronomy/Cosmo/micro_lensing.py @@ -5,8 +5,7 @@ def einstein_radius(mass, d_l, d_s): - """ - Einstein radius for a given point mass and distances to lens and source + """Einstein radius for a given point mass and distances to lens and source. :param mass: point source mass [M_sun] :param d_l: distance to lens [pc] @@ -17,7 +16,9 @@ def einstein_radius(mass, d_l, d_s): dl_m = d_l * constants.pc ds_m = d_s * constants.pc # Einstein radius in radian - theta_e = np.sqrt(4 * constants.G * mass_kg / constants.c**2 * (ds_m - dl_m)/(ds_m * dl_m)) + theta_e = np.sqrt( + 4 * constants.G * mass_kg / constants.c**2 * (ds_m - dl_m) / (ds_m * dl_m) + ) theta_e /= constants.arcsec # arc seconds return theta_e diff --git a/lenstronomy/Cosmo/nfw_param.py b/lenstronomy/Cosmo/nfw_param.py index 56436b3fe..d68e54b8f 100644 --- a/lenstronomy/Cosmo/nfw_param.py +++ b/lenstronomy/Cosmo/nfw_param.py @@ -1,14 +1,15 @@ import numpy as np -__all__ = ['NFWParam'] +__all__ = ["NFWParam"] class NFWParam(object): - """ - class which contains a halo model parameters dependent on cosmology for NFW profile - All distances are given in physical units. Mass definitions are relative to 200 crit including redshift evolution. - The redshift evolution is cosmology dependent (dark energy). - The H0 dependence is propagated into the input and return units. + """Class which contains a halo model parameters dependent on cosmology for NFW + profile All distances are given in physical units. + + Mass definitions are relative to 200 crit including redshift evolution. The redshift + evolution is cosmology dependent (dark energy). The H0 dependence is propagated into + the input and return units. """ rhoc = 2.77536627e11 # critical density [h^2 M_sun Mpc^-3] @@ -35,8 +36,7 @@ def rhoc_z(self, z): @staticmethod def M200(rs, rho0, c): - """ - Calculation of the mass enclosed r_200 for NFW profile defined as + """Calculation of the mass enclosed r_200 for NFW profile defined as. .. math:: M_{200} = 4 \\pi \\rho_0^{3} * \\left(\\log(1+c) - c / (1 + c) \\right)) @@ -49,11 +49,10 @@ def M200(rs, rho0, c): :type c: float [4,40] :return: M(R_200) mass in units of rho0 * rs^3 """ - return 4 * np.pi * rho0 * rs ** 3 * (np.log(1. + c) - c / (1. + c)) + return 4 * np.pi * rho0 * rs**3 * (np.log(1.0 + c) - c / (1.0 + c)) def r200_M(self, M, z): - """ - computes the radius R_200 crit of a halo of mass M in physical mass M/h + """Computes the radius R_200 crit of a halo of mass M in physical mass M/h. :param M: halo mass in M_sun/h :type M: float or numpy array @@ -61,7 +60,7 @@ def r200_M(self, M, z): :type z: float :return: radius R_200 in physical Mpc/h """ - return (3*M/(4*np.pi*self.rhoc_z(z)*200))**(1./3.) + return (3 * M / (4 * np.pi * self.rhoc_z(z) * 200)) ** (1.0 / 3.0) def M_r200(self, r200, z): """ @@ -70,32 +69,33 @@ def M_r200(self, r200, z): :param z: redshift :return: M200 in M_sun/h """ - return self.rhoc_z(z)*200 * r200**3 * 4*np.pi/3. + return self.rhoc_z(z) * 200 * r200**3 * 4 * np.pi / 3.0 def rho0_c(self, c, z): - """ - computes density normalization as a function of concentration parameter + """Computes density normalization as a function of concentration parameter. :param c: concentration :param z: redshift :return: density normalization in h^2/Mpc^3 (physical) """ - return 200./3*self.rhoc_z(z)*c**3/(np.log(1.+c)-c/(1.+c)) + return 200.0 / 3 * self.rhoc_z(z) * c**3 / (np.log(1.0 + c) - c / (1.0 + c)) def c_rho0(self, rho0, z): - """ - computes the concentration given density normalization rho_0 in h^2/Mpc^3 (physical) (inverse of function rho0_c) + """Computes the concentration given density normalization rho_0 in h^2/Mpc^3 + (physical) (inverse of function rho0_c) :param rho0: density normalization in h^2/Mpc^3 (physical) :param z: redshift :return: concentration parameter c """ - if not hasattr(self, '_c_rho0_interp'): + if not hasattr(self, "_c_rho0_interp"): c_array = np.linspace(0.1, 30, 100) rho0_array = self.rho0_c(c_array, z) from scipy import interpolate - self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline(rho0_array, c_array, w=None, - bbox=[None, None], k=3) + + self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline( + rho0_array, c_array, w=None, bbox=[None, None], k=3 + ) return self._c_rho0_interp(rho0) @staticmethod @@ -114,16 +114,13 @@ def c_M_z(M, z): A = 5.22 B = -0.072 C = -0.42 - M_pivot = 2.*10**12 - return A*(M/M_pivot)**B*(1+z)**C + M_pivot = 2.0 * 10**12 + return A * (M / M_pivot) ** B * (1 + z) ** C def nfw_Mz(self, M, z): - """ - returns all needed parameter (in physical units modulo h) to draw the profile of the main halo - r200 in physical Mpc/h - rho_s in h^2/Mpc^3 (physical) - Rs in Mpc/h physical - c unit less + """Returns all needed parameter (in physical units modulo h) to draw the profile + of the main halo r200 in physical Mpc/h rho_s in h^2/Mpc^3 (physical) Rs in + Mpc/h physical c unit less. :param M: Mass in physical M_sun/h :param z: redshift @@ -131,5 +128,5 @@ def nfw_Mz(self, M, z): c = self.c_M_z(M, z) r200 = self.r200_M(M, z) rho0 = self.rho0_c(c, z) - Rs = r200/c + Rs = r200 / c return r200, rho0, c, Rs diff --git a/lenstronomy/Data/angular_sensitivity.py b/lenstronomy/Data/angular_sensitivity.py index 720b5b47e..4ba317f15 100644 --- a/lenstronomy/Data/angular_sensitivity.py +++ b/lenstronomy/Data/angular_sensitivity.py @@ -1,31 +1,31 @@ -__all__ = ['AngularSensitivity'] +__all__ = ["AngularSensitivity"] class AngularSensitivity(object): + """Telescope Angular Sensitivity class. This class provides functions describing the + EM radiation sensitivity along different directions of some specific telescopes, + including radio antennae of an interferometric array. + + A general reference of telescope angular sensitivity can be found in Section 5.4 of + Bradt, H. (2004). Astronomy methods: A physical approach to astronomical + observations. Cambridge University Press. """ - Telescope Angular Sensitivity class. - This class provides functions describing the EM radiation sensitivity along different directions of - some specific telescopes, including radio antennae of an interferometric array. - - A general reference of telescope angular sensitivity can be found in Section 5.4 of - Bradt, H. (2004). Astronomy methods: A physical approach to astronomical observations. Cambridge University Press. - """ - - def __init__(self, antenna_primary_beam = None): + + def __init__(self, antenna_primary_beam=None): """ - :param antenna_primary_beam: 2d numpy array. + :param antenna_primary_beam: 2d numpy array. Primary beam is the angular power sensitivity of EM radiation antennae of some specific telescopes. - Usually the radiation sensitivity is largest at the center of the Field of View (FOV), and decays as the distance increases from the center. - If the primary beam applies, it should be multiplied on the unconvolved model images, as regions with less EM sensitivity + Usually the radiation sensitivity is largest at the center of the Field of View (FOV), and decays as the distance increases from the center. + If the primary beam applies, it should be multiplied on the unconvolved model images, as regions with less EM sensitivity get less flux from the model image after the multiplication with the corresponding primary beam. For interferometric users, the primary beam should be provided by data reduction softwares like CASA. """ self._pb = antenna_primary_beam - + @property def primary_beam(self): """ :return: 2d numpy array of primary beam """ - return self._pb \ No newline at end of file + return self._pb diff --git a/lenstronomy/Data/coord_transforms.py b/lenstronomy/Data/coord_transforms.py index a6f2a14d1..0b0cee77d 100644 --- a/lenstronomy/Data/coord_transforms.py +++ b/lenstronomy/Data/coord_transforms.py @@ -3,17 +3,16 @@ import lenstronomy.Util.util as util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Coordinates(object): - """ - class to handle linear coordinate transformations of a square pixel image - """ + """Class to handle linear coordinate transformations of a square pixel image.""" + def __init__(self, transform_pix2angle, ra_at_xy_0, dec_at_xy_0): - """ - initialize the coordinate-to-pixel transform and their inverse + """Initialize the coordinate-to-pixel transform and their inverse. :param transform_pix2angle: 2x2 matrix, mapping of pixel to coordinate :param ra_at_xy_0: ra coordinate at pixel (0,0) @@ -23,8 +22,9 @@ def __init__(self, transform_pix2angle, ra_at_xy_0, dec_at_xy_0): self._Ma2pix = linalg.inv(self._Mpix2a) self._ra_at_xy_0 = ra_at_xy_0 self._dec_at_xy_0 = dec_at_xy_0 - self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix(-self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0, - self._Ma2pix) + self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix( + -self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0, self._Ma2pix + ) @property def transform_angle2pix(self): @@ -59,30 +59,34 @@ def radec_at_xy_0(self): return self._ra_at_xy_0, self._dec_at_xy_0 def map_coord2pix(self, ra, dec): - """ - maps the (ra,dec) coordinates of the system into the pixel coordinate of the image + """Maps the (ra,dec) coordinates of the system into the pixel coordinate of the + image. :param ra: relative RA coordinate as defined by the coordinate frame :param dec: relative DEC coordinate as defined by the coordinate frame :return: (x, y) pixel coordinates """ - return util.map_coord2pix(ra, dec, self._x_at_radec_0, self._y_at_radec_0, self._Ma2pix) + return util.map_coord2pix( + ra, dec, self._x_at_radec_0, self._y_at_radec_0, self._Ma2pix + ) def map_pix2coord(self, x, y): - """ - maps the (x,y) pixel coordinates of the image into the system coordinates + """Maps the (x,y) pixel coordinates of the image into the system coordinates. - :param x: pixel coordinate (can be 1d numpy array), defined in the center of the pixel - :param y: pixel coordinate (can be 1d numpy array), defined in the center of the pixel + :param x: pixel coordinate (can be 1d numpy array), defined in the center of the + pixel + :param y: pixel coordinate (can be 1d numpy array), defined in the center of the + pixel :return: relative (RA, DEC) coordinates of the system """ - return util.map_coord2pix(x, y, self._ra_at_xy_0, self._dec_at_xy_0, self._Mpix2a) + return util.map_coord2pix( + x, y, self._ra_at_xy_0, self._dec_at_xy_0, self._Mpix2a + ) @property def pixel_area(self): - """ - angular area of a pixel in the image + """Angular area of a pixel in the image. :return: area [arcsec^2] """ @@ -90,8 +94,7 @@ def pixel_area(self): @property def pixel_width(self): - """ - size of pixel + """Size of pixel. :return: sqrt(pixel_area) """ @@ -104,15 +107,15 @@ def coordinate_grid(self, nx, ny): :param ny: number of pixels in y-direction :return: 2d arrays with coordinates in RA/DEC with ra_coord[y-axis, x-axis] """ - ra_coords, dec_coords = util.grid_from_coordinate_transform(nx, ny, self._Mpix2a, self._ra_at_xy_0, - self._dec_at_xy_0) + ra_coords, dec_coords = util.grid_from_coordinate_transform( + nx, ny, self._Mpix2a, self._ra_at_xy_0, self._dec_at_xy_0 + ) ra_coords = util.array2image(ra_coords, nx, ny) # new dec_coords = util.array2image(dec_coords, nx, ny) # new return ra_coords, dec_coords def shift_coordinate_system(self, x_shift, y_shift, pixel_unit=False): - """ - shifts the coordinate system + """Shifts the coordinate system. :param x_shift: shift in x (or RA) :param y_shift: shift in y (or DEC) @@ -122,9 +125,7 @@ def shift_coordinate_system(self, x_shift, y_shift, pixel_unit=False): self._shift_coordinates(x_shift, y_shift, pixel_unit) def _shift_coordinates(self, x_shift, y_shift, pixel_unit=False): - """ - - shifts the coordinate system + """Shifts the coordinate system. :param x_shift: shift in x (or RA) :param y_shift: shift in y (or DEC) @@ -135,20 +136,20 @@ def _shift_coordinates(self, x_shift, y_shift, pixel_unit=False): ra_shift, dec_shift = self.map_pix2coord(x_shift, y_shift) ra_shift -= self._ra_at_xy_0 dec_shift -= self._dec_at_xy_0 - print(ra_shift, dec_shift, 'test') + print(ra_shift, dec_shift, "test") else: ra_shift, dec_shift = x_shift, y_shift self._ra_at_xy_0 += ra_shift self._dec_at_xy_0 += dec_shift - self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix(-self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0, - self._Ma2pix) + self._x_at_radec_0, self._y_at_radec_0 = util.map_coord2pix( + -self._ra_at_xy_0, -self._dec_at_xy_0, 0, 0, self._Ma2pix + ) @export class Coordinates1D(Coordinates): - """ - coordinate grid described in 1-d arrays - """ + """Coordinate grid described in 1-d arrays.""" + def coordinate_grid(self, nx, ny): """ @@ -156,6 +157,7 @@ def coordinate_grid(self, nx, ny): :param ny: number of pixels in y-direction :return: 2d arrays with coordinates in RA/DEC with ra_coord[y-axis, x-axis] """ - ra_coords, dec_coords = util.grid_from_coordinate_transform(nx, ny, self._Mpix2a, self._ra_at_xy_0, - self._dec_at_xy_0) + ra_coords, dec_coords = util.grid_from_coordinate_transform( + nx, ny, self._Mpix2a, self._ra_at_xy_0, self._dec_at_xy_0 + ) return ra_coords, dec_coords diff --git a/lenstronomy/Data/image_noise.py b/lenstronomy/Data/image_noise.py index 90eedbb23..7a396d39c 100644 --- a/lenstronomy/Data/image_noise.py +++ b/lenstronomy/Data/image_noise.py @@ -3,17 +3,24 @@ from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class ImageNoise(object): - """ - class that deals with noise properties of imaging data - """ - - def __init__(self, image_data, exposure_time=None, background_rms=None, noise_map=None, - gradient_boost_factor=None, verbose=True, flux_scaling=1): + """Class that deals with noise properties of imaging data.""" + + def __init__( + self, + image_data, + exposure_time=None, + background_rms=None, + noise_map=None, + gradient_boost_factor=None, + verbose=True, + flux_scaling=1, + ): """ :param image_data: numpy array, pixel data values @@ -44,11 +51,12 @@ def __init__(self, image_data, exposure_time=None, background_rms=None, noise_ma assert np.shape(noise_map) == np.shape(image_data) else: if background_rms is not None and exposure_time is not None: - if np.any(background_rms * exposure_time) < 1 and \ - verbose is True: - print("WARNING! sigma_b*f %s < 1 count may introduce unstable error estimates with a Gaussian" - " error function for a Poisson distribution with mean < 1." % ( - background_rms * np.max(exposure_time))) + if np.any(background_rms * exposure_time) < 1 and verbose is True: + print( + "WARNING! sigma_b*f %s < 1 count may introduce unstable error estimates with a Gaussian" + " error function for a Poisson distribution with mean < 1." + % (background_rms * np.max(exposure_time)) + ) self._data = image_data self._gradient_boost_factor = gradient_boost_factor @@ -60,7 +68,9 @@ def background_rms(self): """ if self._background_rms is None: if self._noise_map is None: - raise ValueError("rms background value as 'background_rms' not specified!") + raise ValueError( + "rms background value as 'background_rms' not specified!" + ) self._background_rms = np.median(self._noise_map) return self._background_rms @@ -74,7 +84,9 @@ def exposure_map(self): """ if self._exp_map is None: if self._noise_map is None: - raise ValueError("Exposure map has not been specified in Noise() class!") + raise ValueError( + "Exposure map has not been specified in Noise() class!" + ) return self._exp_map @property @@ -86,12 +98,16 @@ def C_D(self): :return: covariance matrix of all pixel values in 2d numpy array (only diagonal component). """ - if not hasattr(self, '_C_D'): + if not hasattr(self, "_C_D"): if self._noise_map is not None: - self._C_D = self._noise_map ** 2 + self._C_D = self._noise_map**2 else: - self._C_D = covariance_matrix(self._data, self.background_rms, self.exposure_map, - self._gradient_boost_factor) + self._C_D = covariance_matrix( + self._data, + self.background_rms, + self.exposure_map, + self._gradient_boost_factor, + ) return self._C_D def C_D_model(self, model): @@ -101,15 +117,17 @@ def C_D_model(self, model): :return: estimate of the noise per pixel based on the model flux """ if self._noise_map is not None: - return self._noise_map ** 2 + return self._noise_map**2 else: - return covariance_matrix(model, self._background_rms, self._exp_map, self._gradient_boost_factor) + return covariance_matrix( + model, self._background_rms, self._exp_map, self._gradient_boost_factor + ) @export def covariance_matrix(data, background_rms, exposure_map, gradient_boost_factor=None): - """ - returns a diagonal matrix for the covariance estimation which describes the error + """Returns a diagonal matrix for the covariance estimation which describes the + error. Notes: @@ -133,5 +151,5 @@ def covariance_matrix(data, background_rms, exposure_map, gradient_boost_factor= gradient_map = 0 d_pos = np.zeros_like(data) d_pos[data >= 0] = data[data >= 0] - sigma = d_pos / exposure_map + background_rms ** 2 + gradient_map ** 2 + sigma = d_pos / exposure_map + background_rms**2 + gradient_map**2 return sigma diff --git a/lenstronomy/Data/imaging_data.py b/lenstronomy/Data/imaging_data.py index 49047b423..80b8fdac1 100644 --- a/lenstronomy/Data/imaging_data.py +++ b/lenstronomy/Data/imaging_data.py @@ -3,12 +3,12 @@ from lenstronomy.Data.pixel_grid import PixelGrid from lenstronomy.Data.image_noise import ImageNoise -__all__ = ['ImageData'] +__all__ = ["ImageData"] class ImageData(PixelGrid, ImageNoise): - """ - class to handle the data, coordinate system and masking, including convolution with various numerical precisions + """Class to handle the data, coordinate system and masking, including convolution + with various numerical precisions. The Data() class is initialized with keyword arguments: @@ -44,12 +44,26 @@ class to handle the data, coordinate system and masking, including convolution w the likelihood for the data given model P(data|model) is defined in the function below. Please make sure that your definitions and units of 'exposure_map', 'background_rms' and 'image_data' are in accordance with the likelihood function. In particular, make sure that the Poisson noise contribution is defined in the count rate. - - """ - def __init__(self, image_data, exposure_time=None, background_rms=None, noise_map=None, gradient_boost_factor=None, - ra_at_xy_0=0, dec_at_xy_0=0, transform_pix2angle=None, ra_shift=0, dec_shift=0, phi_rot=0, - log_likelihood_constant=0, antenna_primary_beam=None, likelihood_method='diagonal', flux_scaling=1): + + def __init__( + self, + image_data, + exposure_time=None, + background_rms=None, + noise_map=None, + gradient_boost_factor=None, + ra_at_xy_0=0, + dec_at_xy_0=0, + transform_pix2angle=None, + ra_shift=0, + dec_shift=0, + phi_rot=0, + log_likelihood_constant=0, + antenna_primary_beam=None, + likelihood_method="diagonal", + flux_scaling=1, + ): """ :param image_data: 2d numpy array of the image data @@ -82,30 +96,52 @@ def __init__(self, image_data, exposure_time=None, background_rms=None, noise_ma cos_phi, sin_phi = np.cos(phi_rot), np.sin(phi_rot) rot_matrix = np.array([[cos_phi, -sin_phi], [sin_phi, cos_phi]]) transform_pix2angle_rot = np.dot(transform_pix2angle, rot_matrix) - PixelGrid.__init__(self, nx, ny, transform_pix2angle_rot, ra_at_xy_0 + ra_shift, dec_at_xy_0 + dec_shift, - antenna_primary_beam) - ImageNoise.__init__(self, image_data, exposure_time=exposure_time, background_rms=background_rms, - noise_map=noise_map, gradient_boost_factor=gradient_boost_factor, verbose=False, - flux_scaling=flux_scaling) + PixelGrid.__init__( + self, + nx, + ny, + transform_pix2angle_rot, + ra_at_xy_0 + ra_shift, + dec_at_xy_0 + dec_shift, + antenna_primary_beam, + ) + ImageNoise.__init__( + self, + image_data, + exposure_time=exposure_time, + background_rms=background_rms, + noise_map=noise_map, + gradient_boost_factor=gradient_boost_factor, + verbose=False, + flux_scaling=flux_scaling, + ) self._logL_constant = log_likelihood_constant self._logL_method = likelihood_method - if self._logL_method != 'diagonal' and self._logL_method != 'interferometry_natwt': - raise ValueError("likelihood_method %s not supported! likelihood_method can only be 'diagonal' or 'interferometry_natwt'!" % self._logL_method) + if ( + self._logL_method != "diagonal" + and self._logL_method != "interferometry_natwt" + ): + raise ValueError( + "likelihood_method %s not supported! likelihood_method can only be 'diagonal' or 'interferometry_natwt'!" + % self._logL_method + ) def update_data(self, image_data): - """ - - update the data as well as the error matrix estimated from it when done so using the data + """Update the data as well as the error matrix estimated from it when done so + using the data. :param image_data: 2d numpy array of same size as nx, ny :return: None """ nx, ny = np.shape(image_data) if not self._nx == nx and not self._ny == ny: - raise ValueError("shape of new data %s %s must equal old data %s %s!" % (nx, ny, self._nx, self._ny)) + raise ValueError( + "shape of new data %s %s must equal old data %s %s!" + % (nx, ny, self._nx, self._ny) + ) self._data = image_data - if hasattr(self, '_C_D') and self._noise_map is None: + if hasattr(self, "_C_D") and self._noise_map is None: del self._C_D @property @@ -117,32 +153,31 @@ def data(self): return self._data def log_likelihood(self, model, mask, additional_error_map=0): - """ - - computes the likelihood of the data given the model p(data|model) - The Gaussian errors are estimated with the covariance matrix, based on the model image. The errors include the - background rms value and the exposure time to compute the Poisson noise level (in Gaussian approximation). + """Computes the likelihood of the data given the model p(data|model) The + Gaussian errors are estimated with the covariance matrix, based on the model + image. The errors include the background rms value and the exposure time to + compute the Poisson noise level (in Gaussian approximation). :param model: the model (same dimensions and units as data) - :param mask: bool (1, 0) values per pixel. If =0, the pixel is ignored in the likelihood - :param additional_error_map: additional error term (in same units as covariance matrix). - This can e.g. come from model errors in the PSF estimation. + :param mask: bool (1, 0) values per pixel. If =0, the pixel is ignored in the + likelihood + :param additional_error_map: additional error term (in same units as covariance + matrix). This can e.g. come from model errors in the PSF estimation. :return: the natural logarithm of the likelihood p(data|model) """ # if the likelihood method is assigned to be 'interferometry_natwt', it will return logL computed using the interfermetric likelihood function - if self._logL_method == 'interferometry_natwt': + if self._logL_method == "interferometry_natwt": return self.log_likelihood_interferometry(model) c_d = self.C_D_model(model) chi2 = (model - self._data) ** 2 / (c_d + np.abs(additional_error_map)) * mask chi2 = np.array(chi2) - log_likelihood = - np.sum(chi2) / 2 + log_likelihood = -np.sum(chi2) / 2 return log_likelihood def log_likelihood_interferometry(self, model): - """ - log_likelihood function for natural weighting interferometric images, - based on (placeholder for Nan Zhang's paper). + """log_likelihood function for natural weighting interferometric images, based + on (placeholder for Nan Zhang's paper). For the interferometry case, the model should be in the form [array1, array2], where array1 and array2 are unconvolved and convolved model images respectively. @@ -167,14 +202,13 @@ def log_likelihood_interferometry(self, model): xd = np.sum(model[0] * self._data) xAx = np.sum(model[0] * model[1]) - logL = - (xAx - 2 * xd) / (2 * self._background_rms ** 2) + self._logL_constant + logL = -(xAx - 2 * xd) / (2 * self._background_rms**2) + self._logL_constant return logL def likelihood_method(self): - """ + """Pass the likelihood_method to the ImageModel and will be used to identify the + method of likelihood computation in ImageLinearFit. - pass the likelihood_method to the ImageModel and will be used to identify the method of - likelihood computation in ImageLinearFit. :return: string, likelihood method """ return self._logL_method diff --git a/lenstronomy/Data/pixel_grid.py b/lenstronomy/Data/pixel_grid.py index b227c5617..0e1db505a 100644 --- a/lenstronomy/Data/pixel_grid.py +++ b/lenstronomy/Data/pixel_grid.py @@ -2,15 +2,22 @@ from lenstronomy.Data.coord_transforms import Coordinates from lenstronomy.Data.angular_sensitivity import AngularSensitivity -__all__ = ['PixelGrid'] +__all__ = ["PixelGrid"] class PixelGrid(Coordinates, AngularSensitivity): - """ - class that manages a specified pixel grid (rectangular at the moment) and its coordinates - """ - - def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, antenna_primary_beam=None): + """Class that manages a specified pixel grid (rectangular at the moment) and its + coordinates.""" + + def __init__( + self, + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + antenna_primary_beam=None, + ): """ :param nx: number of pixels in x-axis @@ -28,7 +35,9 @@ def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, antenna if antenna_primary_beam is not None: pbx, pby = np.shape(antenna_primary_beam) if (pbx, pby) != (nx, ny): - raise ValueError("The primary beam should have the same size with the image data!") + raise ValueError( + "The primary beam should have the same size with the image data!" + ) AngularSensitivity.__init__(self, antenna_primary_beam) @property @@ -64,13 +73,10 @@ def center(self): return np.mean(self._x_grid), np.mean(self._y_grid) def shift_coordinate_system(self, x_shift, y_shift, pixel_unit=False): - """ - shifts the coordinate system - :param x_shift: shift in x (or RA) - :param y_shift: shift in y (or DEC) - :param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC - :return: updated data class with change in coordinate system - """ + """Shifts the coordinate system :param x_shift: shift in x (or RA) :param + y_shift: shift in y (or DEC) :param pixel_unit: bool, if True, units of pixels + in input, otherwise RA/DEC :return: updated data class with change in coordinate + system.""" self._shift_coordinates(x_shift, y_shift, pixel_unit=pixel_unit) self._x_grid, self._y_grid = self.coordinate_grid(self._nx, self._ny) @@ -80,4 +86,4 @@ def pixel_coordinates(self): :return: RA coords, DEC coords """ - return self._x_grid, self._y_grid \ No newline at end of file + return self._x_grid, self._y_grid diff --git a/lenstronomy/Data/psf.py b/lenstronomy/Data/psf.py index 89203cf24..8f4c6f95c 100644 --- a/lenstronomy/Data/psf.py +++ b/lenstronomy/Data/psf.py @@ -3,19 +3,29 @@ import lenstronomy.Util.util as util import warnings -__all__ = ['PSF'] +__all__ = ["PSF"] class PSF(object): - """ - Point Spread Function class. - This class describes and manages products used to perform the PSF modeling (convolution for extended surface - brightness and painting of PSF's for point sources). + """Point Spread Function class. + + This class describes and manages products used to perform the PSF modeling + (convolution for extended surface brightness and painting of PSF's for point + sources). """ - def __init__(self, psf_type='NONE', fwhm=None, truncation=5, pixel_size=None, kernel_point_source=None, - psf_error_map=None, point_source_supersampling_factor=1, kernel_point_source_init=None, - kernel_point_source_normalisation=True): + def __init__( + self, + psf_type="NONE", + fwhm=None, + truncation=5, + pixel_size=None, + kernel_point_source=None, + psf_error_map=None, + point_source_supersampling_factor=1, + kernel_point_source_init=None, + kernel_point_source_normalisation=True, + ): """ :param psf_type: string, type of PSF: options are 'NONE', 'PIXEL', 'GAUSSIAN' @@ -39,30 +49,42 @@ def __init__(self, psf_type='NONE', fwhm=None, truncation=5, pixel_size=None, ke self.psf_type = psf_type self._pixel_size = pixel_size self.kernel_point_source_init = kernel_point_source_init - if self.psf_type == 'GAUSSIAN': + if self.psf_type == "GAUSSIAN": if fwhm is None: - raise ValueError('fwhm must be set for GAUSSIAN psf type!') + raise ValueError("fwhm must be set for GAUSSIAN psf type!") self._fwhm = fwhm self._sigma_gaussian = util.fwhm2sigma(self._fwhm) self._truncation = truncation self._point_source_supersampling_factor = 0 - elif self.psf_type == 'PIXEL': + elif self.psf_type == "PIXEL": if kernel_point_source is None: - raise ValueError('kernel_point_source needs to be specified for PIXEL PSF type!') + raise ValueError( + "kernel_point_source needs to be specified for PIXEL PSF type!" + ) if len(kernel_point_source) % 2 == 0: - raise ValueError('kernel needs to have odd axis number, not ', np.shape(kernel_point_source)) + raise ValueError( + "kernel needs to have odd axis number, not ", + np.shape(kernel_point_source), + ) if point_source_supersampling_factor > 1: self._kernel_point_source_supersampled = kernel_point_source - self._point_source_supersampling_factor = point_source_supersampling_factor - kernel_point_source = kernel_util.degrade_kernel(self._kernel_point_source_supersampled, self._point_source_supersampling_factor) + self._point_source_supersampling_factor = ( + point_source_supersampling_factor + ) + kernel_point_source = kernel_util.degrade_kernel( + self._kernel_point_source_supersampled, + self._point_source_supersampling_factor, + ) # making sure the PSF is positive semi-definite and do the normalisation if kernel_point_source_normalisation is true if np.min(kernel_point_source) < 0: - warnings.warn('Input PSF model has at least one negative element, which is unphysical except for a PSF of an interferometric array.') + warnings.warn( + "Input PSF model has at least one negative element, which is unphysical except for a PSF of an interferometric array." + ) self._kernel_point_source = kernel_point_source if kernel_point_source_normalisation is not False: self._kernel_point_source /= np.sum(kernel_point_source) - elif self.psf_type == 'NONE': + elif self.psf_type == "NONE": self._kernel_point_source = np.zeros((3, 3)) self._kernel_point_source[1, 1] = 1 else: @@ -70,86 +92,113 @@ def __init__(self, psf_type='NONE', fwhm=None, truncation=5, pixel_size=None, ke if psf_error_map is not None: n_kernel = len(self.kernel_point_source) self._psf_error_map = kernel_util.match_kernel_size(psf_error_map, n_kernel) - if self.psf_type == 'PIXEL' and point_source_supersampling_factor > 1: + if self.psf_type == "PIXEL" and point_source_supersampling_factor > 1: if len(psf_error_map) == len(self._kernel_point_source_supersampled): - Warning('psf_error_map has the same size as the super-sampled kernel. Make sure the units in the' - 'psf_error_map are on the down-sampled pixel scale.') + Warning( + "psf_error_map has the same size as the super-sampled kernel. Make sure the units in the" + "psf_error_map are on the down-sampled pixel scale." + ) self.psf_error_map_bool = True else: self.psf_error_map_bool = False @property def kernel_point_source(self): - if not hasattr(self, '_kernel_point_source'): - if self.psf_type == 'GAUSSIAN': - kernel_num_pix = min(round(self._truncation * self._fwhm / self._pixel_size), 201) + if not hasattr(self, "_kernel_point_source"): + if self.psf_type == "GAUSSIAN": + kernel_num_pix = min( + round(self._truncation * self._fwhm / self._pixel_size), 201 + ) if kernel_num_pix % 2 == 0: kernel_num_pix += 1 - self._kernel_point_source = kernel_util.kernel_gaussian(kernel_num_pix, self._pixel_size, self._fwhm) + self._kernel_point_source = kernel_util.kernel_gaussian( + kernel_num_pix, self._pixel_size, self._fwhm + ) return self._kernel_point_source @property def kernel_pixel(self): - """ - returns the convolution kernel for a uniform surface brightness on a pixel size + """Returns the convolution kernel for a uniform surface brightness on a pixel + size. :return: 2d numpy array """ - if not hasattr(self, '_kernel_pixel'): - self._kernel_pixel = kernel_util.pixel_kernel(self.kernel_point_source, subgrid_res=1) + if not hasattr(self, "_kernel_pixel"): + self._kernel_pixel = kernel_util.pixel_kernel( + self.kernel_point_source, subgrid_res=1 + ) return self._kernel_pixel def kernel_point_source_supersampled(self, supersampling_factor, updata_cache=True): - """ - generates (if not already available) a supersampled PSF with ood numbers of pixels centered - - :param supersampling_factor: int >=1, supersampling factor relative to pixel resolution - :param updata_cache: boolean, if True, updates the cached supersampling PSF if generated. - Attention, this will overwrite a previously used supersampled PSF if the resolution is changing. + """Generates (if not already available) a supersampled PSF with ood numbers of + pixels centered. + + :param supersampling_factor: int >=1, supersampling factor relative to pixel + resolution + :param updata_cache: boolean, if True, updates the cached supersampling PSF if + generated. Attention, this will overwrite a previously used supersampled PSF + if the resolution is changing. :return: super-sampled PSF as 2d numpy array """ - if hasattr(self, '_kernel_point_source_supersampled') and self._point_source_supersampling_factor == supersampling_factor: + if ( + hasattr(self, "_kernel_point_source_supersampled") + and self._point_source_supersampling_factor == supersampling_factor + ): kernel_point_source_supersampled = self._kernel_point_source_supersampled else: - if self.psf_type == 'GAUSSIAN': - kernel_numPix = self._truncation / self._pixel_size * supersampling_factor + if self.psf_type == "GAUSSIAN": + kernel_numPix = ( + self._truncation / self._pixel_size * supersampling_factor + ) kernel_numPix = int(round(kernel_numPix)) if kernel_numPix > 10000: - raise ValueError('The pixelized Gaussian kernel has a grid of %s pixels with a truncation at ' - '%s times the sigma of the Gaussian, exceeding the limit allowed.' % (kernel_numPix, self._truncation)) + raise ValueError( + "The pixelized Gaussian kernel has a grid of %s pixels with a truncation at " + "%s times the sigma of the Gaussian, exceeding the limit allowed." + % (kernel_numPix, self._truncation) + ) if kernel_numPix % 2 == 0: kernel_numPix += 1 - kernel_point_source_supersampled = kernel_util.kernel_gaussian(kernel_numPix, self._pixel_size / supersampling_factor, self._fwhm) - - elif self.psf_type == 'PIXEL': - - kernel = kernel_util.subgrid_kernel(self.kernel_point_source, supersampling_factor, odd=True, num_iter=5) + kernel_point_source_supersampled = kernel_util.kernel_gaussian( + kernel_numPix, self._pixel_size / supersampling_factor, self._fwhm + ) + + elif self.psf_type == "PIXEL": + kernel = kernel_util.subgrid_kernel( + self.kernel_point_source, supersampling_factor, odd=True, num_iter=5 + ) n = len(self.kernel_point_source) n_new = n * supersampling_factor if n_new % 2 == 0: n_new -= 1 - if hasattr(self, '_kernel_point_source_supersampled'): - warnings.warn("Super-sampled point source kernel over-written due to different subsampling" - " size requested.", Warning) - kernel_point_source_supersampled = kernel_util.cut_psf(kernel, psf_size=n_new) - elif self.psf_type == 'NONE': + if hasattr(self, "_kernel_point_source_supersampled"): + warnings.warn( + "Super-sampled point source kernel over-written due to different subsampling" + " size requested.", + Warning, + ) + kernel_point_source_supersampled = kernel_util.cut_psf( + kernel, psf_size=n_new + ) + elif self.psf_type == "NONE": kernel_point_source_supersampled = self._kernel_point_source else: - raise ValueError('psf_type %s not valid!' % self.psf_type) + raise ValueError("psf_type %s not valid!" % self.psf_type) if updata_cache is True: - self._kernel_point_source_supersampled = kernel_point_source_supersampled + self._kernel_point_source_supersampled = ( + kernel_point_source_supersampled + ) self._point_source_supersampling_factor = supersampling_factor return kernel_point_source_supersampled def set_pixel_size(self, deltaPix): - """ - update pixel size + """Update pixel size. :param deltaPix: pixel size in angular units (arc seconds) :return: None """ self._pixel_size = deltaPix - if self.psf_type == 'GAUSSIAN': + if self.psf_type == "GAUSSIAN": try: del self._kernel_point_source except: @@ -157,15 +206,15 @@ def set_pixel_size(self, deltaPix): @property def psf_error_map(self): - """ - error variance of the normalized PSF. + """Error variance of the normalized PSF. + This error will be added to the pixel error around the position of point sources as follows: sigma^2_i += 'psf_error_map'_j * **2 :return: error variance of the normalized PSF. Variance of :rtype: 2d numpy array of size of the PSF in pixel size (not supersampled) """ - if not hasattr(self, '_psf_error_map'): + if not hasattr(self, "_psf_error_map"): self._psf_error_map = np.zeros_like(self.kernel_point_source) return self._psf_error_map @@ -175,7 +224,7 @@ def fwhm(self): :return: full width at half maximum of kernel (in units of pixel) """ - if self.psf_type == 'GAUSSIAN': + if self.psf_type == "GAUSSIAN": return self._fwhm else: return kernel_util.fwhm_kernel(self.kernel_point_source) * self._pixel_size diff --git a/lenstronomy/GalKin/__init__.py b/lenstronomy/GalKin/__init__.py index fdfe3cff4..0f29aeab2 100644 --- a/lenstronomy/GalKin/__init__.py +++ b/lenstronomy/GalKin/__init__.py @@ -1,3 +1,3 @@ -__author__ = 'Simon Birrer' -__email__ = 'sibirrer@gmail.com' -__version__ = '0.1.0' +__author__ = "Simon Birrer" +__email__ = "sibirrer@gmail.com" +__version__ = "0.1.0" diff --git a/lenstronomy/GalKin/analytic_kinematics.py b/lenstronomy/GalKin/analytic_kinematics.py index 78835b37c..b7abb6162 100644 --- a/lenstronomy/GalKin/analytic_kinematics.py +++ b/lenstronomy/GalKin/analytic_kinematics.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from scipy.interpolate import interp1d @@ -9,12 +9,11 @@ import lenstronomy.Util.constants as const import math -__all__ = ['AnalyticKinematics'] +__all__ = ["AnalyticKinematics"] class AnalyticKinematics(Anisotropy): - """ - class to compute eqn 20 in Suyu+2010 with a Monte-Carlo from rendering from the + """Class to compute eqn 20 in Suyu+2010 with a Monte-Carlo from rendering from the light profile distribution and displacing them with a Gaussian seeing convolution. This class assumes spherical symmetry in light and mass distribution and @@ -29,10 +28,16 @@ class to compute eqn 20 in Suyu+2010 with a Monte-Carlo from rendering from the All units are meant to be in angular arc seconds. The physical units are fold in through the angular diameter distances - """ - def __init__(self, kwargs_cosmo, interpol_grid_num=100, log_integration=False, max_integrate=100, - min_integrate=0.001): + + def __init__( + self, + kwargs_cosmo, + interpol_grid_num=100, + log_integration=False, + max_integrate=100, + min_integrate=0.001, + ): """ :param kwargs_cosmo: keyword argument with angular diameter distances entering the Galkin.cosmo class @@ -44,19 +49,31 @@ def __init__(self, kwargs_cosmo, interpol_grid_num=100, log_integration=False, m self._interp_grid_num = interpol_grid_num self._log_int = log_integration - self._max_integrate = max_integrate # maximal integration (and interpolation) in units of arcsecs - self._min_integrate = min_integrate # min integration (and interpolation) in units of arcsecs + self._max_integrate = ( + max_integrate # maximal integration (and interpolation) in units of arcsecs + ) + self._min_integrate = ( + min_integrate # min integration (and interpolation) in units of arcsecs + ) self._max_interpolate = max_integrate # we chose to set the interpolation range to the integration range self._min_interpolate = min_integrate # we chose to set the interpolation range to the integration range self._cosmo = Cosmo(**kwargs_cosmo) self._spp = SPP() - Anisotropy.__init__(self, anisotropy_type='OM') + Anisotropy.__init__(self, anisotropy_type="OM") def _rho0_r0_gamma(self, theta_E, gamma): # equation (14) in Suyu+ 2010 - return -1 * math.gamma(gamma/2) / (np.sqrt(np.pi)*math.gamma((gamma-3)/2.)) * theta_E ** gamma / \ - self._cosmo.arcsec2phys_lens(theta_E) * self._cosmo.epsilon_crit * const.M_sun / const.Mpc ** 3 + return ( + -1 + * math.gamma(gamma / 2) + / (np.sqrt(np.pi) * math.gamma((gamma - 3) / 2.0)) + * theta_E**gamma + / self._cosmo.arcsec2phys_lens(theta_E) + * self._cosmo.epsilon_crit + * const.M_sun + / const.Mpc**3 + ) @staticmethod def draw_light(kwargs_light): @@ -65,45 +82,48 @@ def draw_light(kwargs_light): :param kwargs_light: keyword argument (list) of the light model :return: 3d radius (if possible), 2d projected radius, x-projected coordinate, y-projected coordinate """ - if 'a' not in kwargs_light: - kwargs_light['a'] = 0.551 * kwargs_light['r_eff'] - a = kwargs_light['a'] + if "a" not in kwargs_light: + kwargs_light["a"] = 0.551 * kwargs_light["r_eff"] + a = kwargs_light["a"] r = vel_util.draw_hernquist(a) R, x, y = vel_util.project2d_random(r) return r, R, x, y def _sigma_s2(self, r, R, r_ani, a, gamma, rho0_r0_gamma): - """ - projected velocity dispersion - :param r: 3d radius of the light tracer particle - :param R: 2d projected radius of the light tracer particle - :param r_ani: anisotropy radius - :param a: scale of the Hernquist light profile - :param gamma: power-law slope of the mass profile - :param rho0_r0_gamma: combination of Einstein radius and power-law slope as equation (14) in Suyu+ 2010 - :return: projected velocity dispersion - """ - beta = self.beta_r(r, **{'r_ani': r_ani}) - return (1 - beta * R**2/r**2) * self._sigma_r2_interp(r, a, gamma, rho0_r0_gamma, r_ani) + """Projected velocity dispersion :param r: 3d radius of the light tracer + particle :param R: 2d projected radius of the light tracer particle :param + r_ani: anisotropy radius :param a: scale of the Hernquist light profile :param + gamma: power-law slope of the mass profile :param rho0_r0_gamma: combination of + Einstein radius and power-law slope as equation (14) in Suyu+ 2010 :return: + projected velocity dispersion.""" + beta = self.beta_r(r, **{"r_ani": r_ani}) + return (1 - beta * R**2 / r**2) * self._sigma_r2_interp( + r, a, gamma, rho0_r0_gamma, r_ani + ) def sigma_s2(self, r, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - returns unweighted los velocity dispersion for a specified projected radius, with weight 1 + """Returns unweighted los velocity dispersion for a specified projected radius, + with weight 1. :param r: 3d radius (not needed for this calculation) :param R: 2d projected radius (in angular units of arcsec) - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. - :return: line-of-sight projected velocity dispersion at projected radius R from 3d radius r - """ - a, gamma, rho0_r0_gamma, r_ani = self._read_out_params(kwargs_mass, kwargs_light, kwargs_anisotropy) + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. + :return: line-of-sight projected velocity dispersion at projected radius R from + 3d radius r + """ + a, gamma, rho0_r0_gamma, r_ani = self._read_out_params( + kwargs_mass, kwargs_light, kwargs_anisotropy + ) return self._sigma_s2(r, R, r_ani, a, gamma, rho0_r0_gamma), 1 def sigma_r2(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - equation (19) in Suyu+ 2010 + """Equation (19) in Suyu+ 2010. :param r: 3d radius :param kwargs_mass: mass profile keyword arguments @@ -111,41 +131,46 @@ def sigma_r2(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): :param kwargs_anisotropy: anisotropy keyword arguments :return: velocity dispersion in [m/s] """ - a, gamma, rho0_r0_gamma, r_ani = self._read_out_params(kwargs_mass, kwargs_light, kwargs_anisotropy) + a, gamma, rho0_r0_gamma, r_ani = self._read_out_params( + kwargs_mass, kwargs_light, kwargs_anisotropy + ) return self._sigma_r2(r, a, gamma, rho0_r0_gamma, r_ani) def _read_out_params(self, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - reads the relevant parameters out of the keyword arguments and transforms them to the conventions used in this - class + """Reads the relevant parameters out of the keyword arguments and transforms + them to the conventions used in this class. :param kwargs_mass: mass profile keyword arguments :param kwargs_light: light profile keyword arguments :param kwargs_anisotropy: anisotropy keyword arguments :return: a (Rs of Hernquist profile), gamma, rho0_r0_gamma, r_ani """ - if 'a' not in kwargs_light: - kwargs_light['a'] = 0.551 * kwargs_light['r_eff'] - if 'rho0_r0_gamma' not in kwargs_mass: - kwargs_mass['rho0_r0_gamma'] = self._rho0_r0_gamma(kwargs_mass['theta_E'], kwargs_mass['gamma']) - a = kwargs_light['a'] - gamma = kwargs_mass['gamma'] - rho0_r0_gamma = kwargs_mass['rho0_r0_gamma'] - r_ani = kwargs_anisotropy['r_ani'] + if "a" not in kwargs_light: + kwargs_light["a"] = 0.551 * kwargs_light["r_eff"] + if "rho0_r0_gamma" not in kwargs_mass: + kwargs_mass["rho0_r0_gamma"] = self._rho0_r0_gamma( + kwargs_mass["theta_E"], kwargs_mass["gamma"] + ) + a = kwargs_light["a"] + gamma = kwargs_mass["gamma"] + rho0_r0_gamma = kwargs_mass["rho0_r0_gamma"] + r_ani = kwargs_anisotropy["r_ani"] return a, gamma, rho0_r0_gamma, r_ani def _sigma_r2(self, r, a, gamma, rho0_r0_gamma, r_ani): - """ - equation (19) in Suyu+ 2010 - """ + """Equation (19) in Suyu+ 2010.""" # first term - prefac1 = 4*np.pi * const.G * a**(-gamma) * rho0_r0_gamma / (3-gamma) - prefac2 = r * (r + a)**3/(r**2 + r_ani**2) + prefac1 = 4 * np.pi * const.G * a ** (-gamma) * rho0_r0_gamma / (3 - gamma) + prefac2 = r * (r + a) ** 3 / (r**2 + r_ani**2) # TODO check whether interpolation functions can speed this up - hyp1 = vel_util.hyp_2F1(a=2+gamma, b=gamma, c=3+gamma, z=1./(1+r/a)) - hyp2 = vel_util.hyp_2F1(a=3, b=gamma, c=1+gamma, z=-a/r) - fac = r_ani**2/a**2 * hyp1 / ((2+gamma) * (r/a + 1)**(2+gamma)) + hyp2 / (gamma*(r/a)**gamma) - return prefac1 * prefac2 * fac * (const.arcsec * self._cosmo.dd * const.Mpc) ** 2 + hyp1 = vel_util.hyp_2F1(a=2 + gamma, b=gamma, c=3 + gamma, z=1.0 / (1 + r / a)) + hyp2 = vel_util.hyp_2F1(a=3, b=gamma, c=1 + gamma, z=-a / r) + fac = r_ani**2 / a**2 * hyp1 / ( + (2 + gamma) * (r / a + 1) ** (2 + gamma) + ) + hyp2 / (gamma * (r / a) ** gamma) + return ( + prefac1 * prefac2 * fac * (const.arcsec * self._cosmo.dd * const.Mpc) ** 2 + ) def _sigma_r2_interp(self, r, a, gamma, rho0_r0_gamma, r_ani): """ @@ -157,37 +182,47 @@ def _sigma_r2_interp(self, r, a, gamma, rho0_r0_gamma, r_ani): :param r_ani: :return: """ - if not hasattr(self, '_interp_sigma_r2'): + if not hasattr(self, "_interp_sigma_r2"): min_log = np.log10(self._min_integrate) max_log = np.log10(self._max_integrate) r_array = np.logspace(min_log, max_log, self._interp_grid_num) I_R_sigma2_array = [] for r_i in r_array: - I_R_sigma2_array.append(self._sigma_r2(r_i, a, gamma, rho0_r0_gamma, r_ani)) - self._interp_sigma_r2 = interp1d(np.log(r_array), np.array(I_R_sigma2_array), fill_value="extrapolate") + I_R_sigma2_array.append( + self._sigma_r2(r_i, a, gamma, rho0_r0_gamma, r_ani) + ) + self._interp_sigma_r2 = interp1d( + np.log(r_array), np.array(I_R_sigma2_array), fill_value="extrapolate" + ) return self._interp_sigma_r2(np.log(r)) def grav_potential(self, r, kwargs_mass): - """ - Gravitational potential in SI units + """Gravitational potential in SI units. :param r: radius (arc seconds) :param kwargs_mass: :return: gravitational potential """ - theta_E = kwargs_mass['theta_E'] - gamma = kwargs_mass['gamma'] + theta_E = kwargs_mass["theta_E"] + gamma = kwargs_mass["gamma"] mass_dimless = self._spp.mass_3d_lens(r, theta_E, gamma) - mass_dim = mass_dimless * const.arcsec ** 2 * self._cosmo.dd * self._cosmo.ds / self._cosmo.dds * const.Mpc * \ - const.c ** 2 / (4 * np.pi * const.G) + mass_dim = ( + mass_dimless + * const.arcsec**2 + * self._cosmo.dd + * self._cosmo.ds + / self._cosmo.dds + * const.Mpc + * const.c**2 + / (4 * np.pi * const.G) + ) grav_pot = -const.G * mass_dim / (r * const.arcsec * self._cosmo.dd * const.Mpc) return grav_pot def delete_cache(self): - """ - deletes temporary cache tight to a specific model + """Deletes temporary cache tight to a specific model. :return: """ - if hasattr(self, '_interp_sigma_r2'): + if hasattr(self, "_interp_sigma_r2"): del self._interp_sigma_r2 diff --git a/lenstronomy/GalKin/anisotropy.py b/lenstronomy/GalKin/anisotropy.py index 917c9aab4..7b740ec33 100644 --- a/lenstronomy/GalKin/anisotropy.py +++ b/lenstronomy/GalKin/anisotropy.py @@ -4,6 +4,7 @@ from scipy.interpolate import interp1d from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -24,24 +25,23 @@ def __init__(self, anisotropy_type): :param anisotropy_type: string, anisotropy model type """ self._type = anisotropy_type - if self._type == 'const': + if self._type == "const": self._model = Const() - elif self._type == 'radial': + elif self._type == "radial": self._model = Radial() - elif self._type == 'isotropic': + elif self._type == "isotropic": self._model = Isotropic() - elif self._type == 'OM': + elif self._type == "OM": self._model = OsipkovMerritt() - elif self._type == 'GOM': + elif self._type == "GOM": self._model = GeneralizedOM() - elif self._type == 'Colin': + elif self._type == "Colin": self._model = Colin() else: - raise ValueError('anisotropy type %s not supported!' % self._type) + raise ValueError("anisotropy type %s not supported!" % self._type) def beta_r(self, r, **kwargs): - """ - returns the anisotropy parameter at a given radius + """Returns the anisotropy parameter at a given radius. :param r: 3d radius :param kwargs: parameters of the specified anisotropy model @@ -50,8 +50,7 @@ def beta_r(self, r, **kwargs): return self._model.beta_r(r, **kwargs) def K(self, r, R, **kwargs): - """ - equation A16 im Mamon & Lokas for Osipkov&Merrit anisotropy + """Equation A16 im Mamon & Lokas for Osipkov&Merrit anisotropy. :param r: 3d radius :param R: projected 2d radius @@ -61,9 +60,7 @@ def K(self, r, R, **kwargs): return self._model.K(r, R, **kwargs) def anisotropy_solution(self, r, **kwargs): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) + """The solution to d ln(f)/ d ln(r) = 2 beta(r) :param r: 3d radius :param kwargs: parameters of the specified anisotropy model @@ -72,28 +69,24 @@ def anisotropy_solution(self, r, **kwargs): return self._model.anisotropy_solution(r, **kwargs) def delete_anisotropy_cache(self): - """ - deletes cached interpolations for a fixed anisotropy model + """Deletes cached interpolations for a fixed anisotropy model. :return: None """ - if hasattr(self._model, 'delete_cache'): + if hasattr(self._model, "delete_cache"): self._model.delete_cache() @export class Const(object): - """ - constant anisotropy model class - See Mamon & Lokas 2005 for details - """ + """Constant anisotropy model class See Mamon & Lokas 2005 for details.""" + def __init__(self): pass @staticmethod def K(r, R, beta): - """ - equation A16 im Mamon & Lokas for constant anisotropy + """Equation A16 im Mamon & Lokas for constant anisotropy. :param r: 3d radius :param R: projected 2d radius @@ -101,15 +94,20 @@ def K(r, R, beta): :return: K(r, R, beta) """ u = r / R - k = np.sqrt(1 - 1. / u ** 2) / (1. - 2 * beta) + np.sqrt(np.pi) / 2 * special.gamma( - beta - 1. / 2) / special.gamma(beta) \ - * (3. / 2 - beta) * u ** (2 * beta - 1.) * (1 - special.betainc(beta + 1. / 2, 1. / 2, 1. / u ** 2)) + k = np.sqrt(1 - 1.0 / u**2) / (1.0 - 2 * beta) + np.sqrt( + np.pi + ) / 2 * special.gamma(beta - 1.0 / 2) / special.gamma(beta) * ( + 3.0 / 2 - beta + ) * u ** ( + 2 * beta - 1.0 + ) * ( + 1 - special.betainc(beta + 1.0 / 2, 1.0 / 2, 1.0 / u**2) + ) return k @staticmethod def beta_r(r, beta): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :param beta: anisotropy @@ -118,55 +116,47 @@ def beta_r(r, beta): return beta def anisotropy_solution(self, r, **kwargs): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) + """The solution to d ln(f)/ d ln(r) = 2 beta(r) :param r: 3d radius :param kwargs: parameters of the specified anisotropy model :return: f(r) """ - raise ValueError('routine not supported yet for constant anisotropy model!') + raise ValueError("routine not supported yet for constant anisotropy model!") @export class Isotropic(object): - """ - class for isotropic (beta=0) stellar orbits - See Mamon & Lokas 2005 for details - """ + """Class for isotropic (beta=0) stellar orbits See Mamon & Lokas 2005 for + details.""" + def __init__(self): pass @staticmethod def K(r, R): - """ - equation A16 im Mamon & Lokas for constant anisotropy + """Equation A16 im Mamon & Lokas for constant anisotropy. :param r: 3d radius :param R: projected 2d radius :return: K(r, R) """ u = r / R - k = np.sqrt(1 - 1. / u ** 2) + k = np.sqrt(1 - 1.0 / u**2) return k @staticmethod def beta_r(r): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :return: beta """ - return 0. + return 0.0 @staticmethod def anisotropy_solution(r, **kwargs): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) - See e.g. A3 in Mamon & Lokas + """The solution to d ln(f)/ d ln(r) = 2 beta(r) See e.g. A3 in Mamon & Lokas. :param r: 3d radius :param kwargs: parameters of the specified anisotropy model @@ -177,42 +167,39 @@ def anisotropy_solution(r, **kwargs): @export class Radial(object): - """ - class for radial (beta=1) stellar orbits - See Mamon & Lokas 2005 for details - """ + """Class for radial (beta=1) stellar orbits See Mamon & Lokas 2005 for details.""" + def __init__(self): pass @staticmethod def K(r, R): - """ - equation A16 im Mamon & Lokas for constant anisotropy + """Equation A16 im Mamon & Lokas for constant anisotropy. :param r: 3d radius :param R: projected 2d radius :return: K(r, R) """ u = r / R - k = np.pi / 4 * u - 1. / 2 * np.sqrt(1 - 1. / u ** 2) - u / 2. * np.arcsin(1. / u) + k = ( + np.pi / 4 * u + - 1.0 / 2 * np.sqrt(1 - 1.0 / u**2) + - u / 2.0 * np.arcsin(1.0 / u) + ) return k @staticmethod def beta_r(r): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :return: beta """ - return 1. + return 1.0 @staticmethod def anisotropy_solution(r): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) - See e.g. A4 in Mamon & Lokas + """The solution to d ln(f)/ d ln(r) = 2 beta(r) See e.g. A4 in Mamon & Lokas. :param r: 3d radius :return: f(r) @@ -222,17 +209,14 @@ def anisotropy_solution(r): @export class OsipkovMerritt(object): - """ - class for Osipkov&Merrit stellar orbits - See Mamon & Lokas 2005 for details - """ + """Class for Osipkov&Merrit stellar orbits See Mamon & Lokas 2005 for details.""" + def __init__(self): pass @staticmethod def K(r, R, r_ani): - """ - equation A16 im Mamon & Lokas 2005 for Osipkov&Merrit anisotropy + """Equation A16 im Mamon & Lokas 2005 for Osipkov&Merrit anisotropy. :param r: 3d radius :param R: projected 2d radius @@ -241,27 +225,28 @@ def K(r, R, r_ani): """ u = r / R ua = r_ani / R - k = (ua ** 2 + 1. / 2) / (ua ** 2 + 1) ** (3. / 2) * (u ** 2 + ua ** 2) / u * np.arctan( - np.sqrt((u ** 2 - 1) / (ua ** 2 + 1))) - 1. / 2 / (ua ** 2 + 1) * np.sqrt(1 - 1. / u ** 2) + k = (ua**2 + 1.0 / 2) / (ua**2 + 1) ** (3.0 / 2) * ( + u**2 + ua**2 + ) / u * np.arctan(np.sqrt((u**2 - 1) / (ua**2 + 1))) - 1.0 / 2 / ( + ua**2 + 1 + ) * np.sqrt( + 1 - 1.0 / u**2 + ) return k @staticmethod def beta_r(r, r_ani): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :param r_ani: anisotropy radius :return: beta """ - return r**2/(r_ani**2 + r**2) + return r**2 / (r_ani**2 + r**2) @staticmethod def anisotropy_solution(r, r_ani): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) - See e.g. A5 in Mamon & Lokas + """The solution to d ln(f)/ d ln(r) = 2 beta(r) See e.g. A5 in Mamon & Lokas. :param r: 3d radius :param r_ani: anisotropy radius @@ -272,26 +257,26 @@ def anisotropy_solution(r, r_ani): @export class GeneralizedOM(object): - """ - generalized Osipkov&Merrit profile + """Generalized Osipkov&Merrit profile. + see Agnello et al. 2014 https://arxiv.org/pdf/1401.4462.pdf b(r) = beta_inf * r^2 / (r^2 + r_ani^2) """ + def __init__(self): - self._z_interp = np.append(-np.flip(np.logspace(-1, 3, 200)**2), 0) + self._z_interp = np.append(-np.flip(np.logspace(-1, 3, 200) ** 2), 0) # self._z_interp = -np.linspace(-200, 0, 200)**2 # z = (R**2 - r**2) / (r_ani**2 + R**2) @staticmethod def beta_r(r, r_ani, beta_inf): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :param r_ani: anisotropy radius :param beta_inf: anisotropy at infinity :return: beta """ - return beta_inf * r**2/(r_ani**2 + r**2) + return beta_inf * r**2 / (r_ani**2 + r**2) def K(self, r, R, r_ani, beta_inf): """ @@ -308,10 +293,8 @@ def K(self, r, R, r_ani, beta_inf): @staticmethod def anisotropy_solution(r, r_ani, beta_inf): - """ - the solution to - d ln(f)/ d ln(r) = 2 beta(r) - See e.g. A5 in Mamon & Lokas with a scaling (nominator of Agnello et al. 2014 Equation (12) + """The solution to d ln(f)/ d ln(r) = 2 beta(r) See e.g. A5 in Mamon & Lokas + with a scaling (nominator of Agnello et al. 2014 Equation (12) :param r: 3d radius :param r_ani: anisotropy radius @@ -321,14 +304,14 @@ def anisotropy_solution(r, r_ani, beta_inf): return (r**2 + r_ani**2) ** beta_inf def delete_cache(self): - """ - deletes the interpolation function of the hypergeometic function for a specific beta_inf + """Deletes the interpolation function of the hypergeometic function for a + specific beta_inf. :return: deleted self variables """ - if hasattr(self, '_f_12_interp'): + if hasattr(self, "_f_12_interp"): del self._f_12_interp - if hasattr(self, '_f_32_interp'): + if hasattr(self, "_f_32_interp"): del self._f_32_interp def _k_beta(self, r, R, r_ani, beta_inf): @@ -344,8 +327,15 @@ def _k_beta(self, r, R, r_ani, beta_inf): """ z = (R**2 - r**2) / (r_ani**2 + R**2) # ((r**2 + r_ani**2) / (R**2 + r_ani**2)) ** beta_inf - return - self.beta_r(R, r_ani, beta_inf) * self._j_beta(R, r, r_ani, beta_inf) *\ - np.sqrt(r**2 - R**2) * (self._F_12(z, beta_inf) + 2. * (1 - r**2/R**2) / 3 * self._F_32(z, beta_inf)) + return ( + -self.beta_r(R, r_ani, beta_inf) + * self._j_beta(R, r, r_ani, beta_inf) + * np.sqrt(r**2 - R**2) + * ( + self._F_12(z, beta_inf) + + 2.0 * (1 - r**2 / R**2) / 3 * self._F_32(z, beta_inf) + ) + ) def _F_12(self, z, beta_inf): """ @@ -354,9 +344,11 @@ def _F_12(self, z, beta_inf): :param beta_inf: anisotropy at infinity :return: _F(1/2, z, beta_inf) """ - if not hasattr(self, '_f_12_interp'): - f_12_interp = self._F(1 / 2., self._z_interp, beta_inf) - self._f_12_interp = interp1d(self._z_interp, f_12_interp, kind='cubic', fill_value="extrapolate") + if not hasattr(self, "_f_12_interp"): + f_12_interp = self._F(1 / 2.0, self._z_interp, beta_inf) + self._f_12_interp = interp1d( + self._z_interp, f_12_interp, kind="cubic", fill_value="extrapolate" + ) return self._f_12_interp(z) def _F_32(self, z, beta_inf): @@ -366,55 +358,56 @@ def _F_32(self, z, beta_inf): :param beta_inf: anisotropy at infinity :return: _F(3/2, z, beta_inf) """ - if not hasattr(self, '_f_32_interp'): - f_32_interp = self._F(3 / 2., self._z_interp, beta_inf) - self._f_32_interp = interp1d(self._z_interp, f_32_interp, kind='cubic', fill_value="extrapolate") + if not hasattr(self, "_f_32_interp"): + f_32_interp = self._F(3 / 2.0, self._z_interp, beta_inf) + self._f_32_interp = interp1d( + self._z_interp, f_32_interp, kind="cubic", fill_value="extrapolate" + ) return self._f_32_interp(z) @staticmethod def _j_beta(r, s, r_ani, beta_inf): - """ - equation (12) in Agnello et al. 2014 + """Equation (12) in Agnello et al. 2014. :param r: :param s: - :param r_ani: - :param beta_inf + :param r_ani: :param beta_inf :return: """ return ((s**2 + r_ani**2) / (r**2 + r_ani**2)) ** beta_inf @staticmethod def _F(a, z, beta_inf): - """ - the hypergeometric function 2F1 (a, 1 +beta_inf, a + 1, z) + """The hypergeometric function 2F1 (a, 1 +beta_inf, a + 1, z) :param a: :param z: :return: """ if isinstance(z, int) or isinstance(z, float): - return velocity_util.hyp_2F1(a=a, b=1+beta_inf, c=a+1, z=z) + return velocity_util.hyp_2F1(a=a, b=1 + beta_inf, c=a + 1, z=z) else: _F_array = [] for z_i in z: - _F_array.append(velocity_util.hyp_2F1(a=a, b=1+beta_inf, c=a+1, z=z_i)) + _F_array.append( + velocity_util.hyp_2F1(a=a, b=1 + beta_inf, c=a + 1, z=z_i) + ) return np.array(_F_array, dtype=float) @export class Colin(object): + """Class for stellar orbits anisotropy parameter based on Colin et al. + + (2000) See Mamon & Lokas 2005 for details """ - class for stellar orbits anisotropy parameter based on Colin et al. (2000) - See Mamon & Lokas 2005 for details - """ + def __init__(self): pass @staticmethod def K(r, R, r_ani): - """ - equation A16 im Mamon & Lokas for Osipkov&Merrit anisotropy + """Equation A16 im Mamon & Lokas for Osipkov&Merrit anisotropy. :param r: 3d radius :param R: projected 2d radius @@ -423,25 +416,44 @@ def K(r, R, r_ani): """ u = r / R if np.min(u) < 1: - raise ValueError("3d radius is smaller than projected radius! Does not make sense.") + raise ValueError( + "3d radius is smaller than projected radius! Does not make sense." + ) ua = r_ani / R if ua == 1: - k = (1 + 1. / u) * np.arccosh(u) - 1. / 6 * (8. / u + 7) * np.sqrt((u - 1.) / (u + 1.)) + k = (1 + 1.0 / u) * np.arccosh(u) - 1.0 / 6 * (8.0 / u + 7) * np.sqrt( + (u - 1.0) / (u + 1.0) + ) elif ua > 1: - k = 0.5 / (ua ** 2 - 1) * np.sqrt(1 - 1. / u ** 2) + (1. + ua / u) * np.arccosh(u) - np.sign(ua - 1) * ua *\ - (ua ** 2 - 0.5) / np.abs(ua ** 2 - 1) ** (3. / 2) * (1. + ua / u) * np.arccosh((ua * u + 1) / (u + ua)) + k = ( + 0.5 / (ua**2 - 1) * np.sqrt(1 - 1.0 / u**2) + + (1.0 + ua / u) * np.arccosh(u) + - np.sign(ua - 1) + * ua + * (ua**2 - 0.5) + / np.abs(ua**2 - 1) ** (3.0 / 2) + * (1.0 + ua / u) + * np.arccosh((ua * u + 1) / (u + ua)) + ) else: # ua < 1 - k = 0.5 / (ua ** 2 - 1) * np.sqrt(1 - 1. / u ** 2) + (1. + ua / u) * np.arccosh(u) - np.sign(ua - 1) * ua *\ - (ua ** 2 - 0.5) / np.abs(ua ** 2 - 1) ** (3. / 2) * (1. + ua / u) * np.arccos((ua * u + 1) / (u + ua)) + k = ( + 0.5 / (ua**2 - 1) * np.sqrt(1 - 1.0 / u**2) + + (1.0 + ua / u) * np.arccosh(u) + - np.sign(ua - 1) + * ua + * (ua**2 - 0.5) + / np.abs(ua**2 - 1) ** (3.0 / 2) + * (1.0 + ua / u) + * np.arccos((ua * u + 1) / (u + ua)) + ) return k @staticmethod def beta_r(r, r_ani): - """ - anisotropy as a function of radius + """Anisotropy as a function of radius. :param r: 3d radius :param r_ani: anisotropy radius :return: beta """ - return 1./2 * r / (r + r_ani) + return 1.0 / 2 * r / (r + r_ani) diff --git a/lenstronomy/GalKin/aperture.py b/lenstronomy/GalKin/aperture.py index b15d8bbab..33b9b0dc2 100644 --- a/lenstronomy/GalKin/aperture.py +++ b/lenstronomy/GalKin/aperture.py @@ -1,10 +1,8 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.GalKin.aperture_types import Shell, Slit, IFUShells, Frame -__all__ = ['Aperture'] - - +__all__ = ["Aperture"] """ class that defines the aperture of the measurement (e.g. slit, integral field spectroscopy regions etc) @@ -18,9 +16,8 @@ class that defines the aperture of the measurement (e.g. slit, integral field sp class Aperture(object): - """ - defines mask(s) of spectra, can handle IFU and single slit/box type data. - """ + """Defines mask(s) of spectra, can handle IFU and single slit/box type data.""" + def __init__(self, aperture_type, **kwargs_aperture): """ @@ -28,17 +25,19 @@ def __init__(self, aperture_type, **kwargs_aperture): :param kwargs_aperture: keyword arguments reflecting the aperture type chosen. We refer to the specific class instances for documentation. """ - if aperture_type == 'slit': + if aperture_type == "slit": self._aperture = Slit(**kwargs_aperture) - elif aperture_type == 'shell': + elif aperture_type == "shell": self._aperture = Shell(**kwargs_aperture) - elif aperture_type == 'IFU_shells': + elif aperture_type == "IFU_shells": self._aperture = IFUShells(**kwargs_aperture) - elif aperture_type == 'frame': + elif aperture_type == "frame": self._aperture = Frame(**kwargs_aperture) else: - raise ValueError("aperture type %s not implemented! Available are 'slit', 'shell', 'IFU_shells'. " - % aperture_type) + raise ValueError( + "aperture type %s not implemented! Available are 'slit', 'shell', 'IFU_shells'. " + % aperture_type + ) self.aperture_type = aperture_type def aperture_select(self, ra, dec): diff --git a/lenstronomy/GalKin/aperture_types.py b/lenstronomy/GalKin/aperture_types.py index ca4aa0863..b9ad1a787 100644 --- a/lenstronomy/GalKin/aperture_types.py +++ b/lenstronomy/GalKin/aperture_types.py @@ -1,16 +1,15 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Slit(object): - """ - Slit aperture description - """ + """Slit aperture description.""" def __init__(self, length, width, center_ra=0, center_dec=0, angle=0): """ @@ -33,12 +32,22 @@ def aperture_select(self, ra, dec): :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """ - return slit_select(ra, dec, self._length, self._width, self._center_ra, self._center_dec, self._angle), 0 + return ( + slit_select( + ra, + dec, + self._length, + self._width, + self._center_ra, + self._center_dec, + self._angle, + ), + 0, + ) @property def num_segments(self): - """ - number of segments with separate measurements of the velocity dispersion + """Number of segments with separate measurements of the velocity dispersion. :return: int """ @@ -61,9 +70,9 @@ def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0): ra_ = ra - center_ra dec_ = dec - center_dec x = np.cos(angle) * ra_ + np.sin(angle) * dec_ - y = - np.sin(angle) * ra_ + np.cos(angle) * dec_ + y = -np.sin(angle) * ra_ + np.cos(angle) * dec_ - if abs(x) < length / 2. and abs(y) < width / 2.: + if abs(x) < length / 2.0 and abs(y) < width / 2.0: return True else: return False @@ -71,9 +80,8 @@ def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0): @export class Frame(object): - """ - rectangular box with a hole in the middle (also rectangular), effectively a frame - """ + """Rectangular box with a hole in the middle (also rectangular), effectively a + frame.""" def __init__(self, width_outer, width_inner, center_ra=0, center_dec=0, angle=0): """ @@ -96,12 +104,22 @@ def aperture_select(self, ra, dec): :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """ - return frame_select(ra, dec, self._width_outer, self._width_inner, self._center_ra, self._center_dec, self._angle), 0 + return ( + frame_select( + ra, + dec, + self._width_outer, + self._width_inner, + self._center_ra, + self._center_dec, + self._angle, + ), + 0, + ) @property def num_segments(self): - """ - number of segments with separate measurements of the velocity dispersion + """Number of segments with separate measurements of the velocity dispersion. :return: int """ @@ -124,9 +142,9 @@ def frame_select(ra, dec, width_outer, width_inner, center_ra=0, center_dec=0, a ra_ = ra - center_ra dec_ = dec - center_dec x = np.cos(angle) * ra_ + np.sin(angle) * dec_ - y = - np.sin(angle) * ra_ + np.cos(angle) * dec_ - if abs(x) < width_outer / 2. and abs(y) < width_outer / 2.: - if abs(x) < width_inner / 2. and abs(y) < width_inner / 2.: + y = -np.sin(angle) * ra_ + np.cos(angle) * dec_ + if abs(x) < width_outer / 2.0 and abs(y) < width_outer / 2.0: + if abs(x) < width_inner / 2.0 and abs(y) < width_inner / 2.0: return False else: return True @@ -135,9 +153,7 @@ def frame_select(ra, dec, width_outer, width_inner, center_ra=0, center_dec=0, a @export class Shell(object): - """ - Shell aperture - """ + """Shell aperture.""" def __init__(self, r_in, r_out, center_ra=0, center_dec=0): """ @@ -157,12 +173,16 @@ def aperture_select(self, ra, dec): :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """ - return shell_select(ra, dec, self._r_in, self._r_out, self._center_ra, self._center_dec), 0 + return ( + shell_select( + ra, dec, self._r_in, self._r_out, self._center_ra, self._center_dec + ), + 0, + ) @property def num_segments(self): - """ - number of segments with separate measurements of the velocity dispersion + """Number of segments with separate measurements of the velocity dispersion. :return: int """ @@ -183,7 +203,7 @@ def shell_select(ra, dec, r_in, r_out, center_ra=0, center_dec=0): """ x = ra - center_ra y = dec - center_dec - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) if (r >= r_in) and (r < r_out): return True else: @@ -192,9 +212,9 @@ def shell_select(ra, dec, r_in, r_out, center_ra=0, center_dec=0): @export class IFUShells(object): - """ - class for an Integral Field Unit spectrograph with azimuthal shells where the kinematics are measured - """ + """Class for an Integral Field Unit spectrograph with azimuthal shells where the + kinematics are measured.""" + def __init__(self, r_bins, center_ra=0, center_dec=0): """ @@ -213,14 +233,14 @@ def aperture_select(self, ra, dec): :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise, index of shell """ - return shell_ifu_select(ra, dec, self._r_bins, self._center_ra, self._center_dec) + return shell_ifu_select( + ra, dec, self._r_bins, self._center_ra, self._center_dec + ) @property def num_segments(self): - """ - number of segments with separate measurements of the velocity dispersion - :return: int - """ + """Number of segments with separate measurements of the velocity dispersion + :return: int.""" return len(self._r_bins) - 1 @@ -238,8 +258,8 @@ def shell_ifu_select(ra, dec, r_bin, center_ra=0, center_dec=0): """ x = ra - center_ra y = dec - center_dec - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) for i in range(0, len(r_bin) - 1): - if (r >= r_bin[i]) and (r < r_bin[i+1]): + if (r >= r_bin[i]) and (r < r_bin[i + 1]): return True, i return False, None diff --git a/lenstronomy/GalKin/cosmo.py b/lenstronomy/GalKin/cosmo.py index ca6ccbf93..2d61d9aa7 100644 --- a/lenstronomy/GalKin/cosmo.py +++ b/lenstronomy/GalKin/cosmo.py @@ -1,13 +1,12 @@ import lenstronomy.Util.constants as const import numpy as np -__all__ = ['Cosmo'] +__all__ = ["Cosmo"] class Cosmo(object): - """ - cosmological quantities - """ + """Cosmological quantities.""" + def __init__(self, d_d, d_s, d_ds): """ @@ -16,15 +15,22 @@ def __init__(self, d_d, d_s, d_ds): :param d_ds: angular diameter distance between deflector and source """ if d_ds <= 0 or d_s <= 0 or d_d <= 0: - raise ValueError('input angular diameter distances Dd: %s, Ds: %s, Dds: %s are not suppored for a lens' - ' model!' % (d_d, d_s, d_ds)) - self.dd = float(d_d) # angular diameter distance from observer to deflector in physical Mpc - self.ds = float(d_s) # angular diameter distance from observer to source in physical Mpc - self.dds = float(d_ds) # angular diameter distance from deflector to source in physical Mpc + raise ValueError( + "input angular diameter distances Dd: %s, Ds: %s, Dds: %s are not suppored for a lens" + " model!" % (d_d, d_s, d_ds) + ) + self.dd = float( + d_d + ) # angular diameter distance from observer to deflector in physical Mpc + self.ds = float( + d_s + ) # angular diameter distance from observer to source in physical Mpc + self.dds = float( + d_ds + ) # angular diameter distance from deflector to source in physical Mpc def arcsec2phys_lens(self, theta): - """ - converts are seconds to physical units on the deflector + """Converts are seconds to physical units on the deflector. :param theta: angle observed on the sky in units of arc seconds :return: physical distance of the angle in units of Mpc @@ -33,10 +39,11 @@ def arcsec2phys_lens(self, theta): @property def epsilon_crit(self): - """ - returns the critical projected mass density in units of M_sun/Mpc^2 (physical units) - """ - const_si = const.c**2 / (4*np.pi * const.G) # c^2/(4*pi*G) in units of [kg/m] + """Returns the critical projected mass density in units of M_sun/Mpc^2 (physical + units)""" + const_si = const.c**2 / ( + 4 * np.pi * const.G + ) # c^2/(4*pi*G) in units of [kg/m] conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc] pre_const = const_si * conversion # c^2/(4*pi*G) in units of [M_sun/Mpc] epsilon_crit = self.ds / (self.dd * self.dds) * pre_const # [M_sun/Mpc^2] diff --git a/lenstronomy/GalKin/galkin.py b/lenstronomy/GalKin/galkin.py index 351c40a74..15d20aca1 100644 --- a/lenstronomy/GalKin/galkin.py +++ b/lenstronomy/GalKin/galkin.py @@ -3,12 +3,12 @@ import numpy as np -__all__ = ['Galkin'] +__all__ = ["Galkin"] class Galkin(GalkinModel, GalkinObservation): - """ - Major class to compute velocity dispersion measurements given light and mass models + """Major class to compute velocity dispersion measurements given light and mass + models. The class supports any mass and light distribution (and superposition thereof) that has a 3d correspondance in their 2d lens model distribution. For models that do not have this correspondance, you may want to apply a @@ -45,10 +45,17 @@ class Galkin(GalkinModel, GalkinObservation): These numerical options should be chosen to allow for a converged result (within your tolerance) but not too conservative to impact too much the computational cost. Reasonable values might depend on the specific problem. - """ - def __init__(self, kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwargs_numerics=None, - analytic_kinematics=False): + + def __init__( + self, + kwargs_model, + kwargs_aperture, + kwargs_psf, + kwargs_cosmo, + kwargs_numerics=None, + analytic_kinematics=False, + ): """ :param kwargs_model: keyword arguments describing the model components @@ -59,41 +66,63 @@ def __init__(self, kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwar :param kwargs_numerics: numerics keyword arguments :param analytic_kinematics: bool, if True uses the analytic kinematic model """ - GalkinModel.__init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=kwargs_numerics, - analytic_kinematics=analytic_kinematics) - GalkinObservation.__init__(self, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf) - - def dispersion(self, kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000): - """ - computes the averaged LOS velocity dispersion in the slit (convolved) - - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. - :param sampling_number: int, number of spectral sampling of the light distribution + GalkinModel.__init__( + self, + kwargs_model, + kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=analytic_kinematics, + ) + GalkinObservation.__init__( + self, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf + ) + + def dispersion( + self, kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ): + """Computes the averaged LOS velocity dispersion in the slit (convolved) + + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. + :param sampling_number: int, number of spectral sampling of the light + distribution :return: integrated LOS velocity dispersion in units [km/s] """ sigma2_IR_sum = 0 IR_sum = 0 for i in range(0, sampling_number): - sigma2_IR, IR = self._draw_one_sigma2(kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma2_IR, IR = self._draw_one_sigma2( + kwargs_mass, kwargs_light, kwargs_anisotropy + ) sigma2_IR_sum += sigma2_IR IR_sum += IR sigma_s2_average = sigma2_IR_sum / IR_sum # apply unit conversion from arc seconds and deflections to physical velocity dispersion in (km/s) self.numerics.delete_cache() - return np.sqrt(sigma_s2_average) / 1000. # in units of km/s - - def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_sampling=1000, num_psf_sampling=100): - """ - computes the velocity dispersion in each Integral Field Unit + return np.sqrt(sigma_s2_average) / 1000.0 # in units of km/s + + def dispersion_map( + self, + kwargs_mass, + kwargs_light, + kwargs_anisotropy, + num_kin_sampling=1000, + num_psf_sampling=100, + ): + """Computes the velocity dispersion in each Integral Field Unit. :param kwargs_mass: keyword arguments of the mass model :param kwargs_light: keyword argument of the light model :param kwargs_anisotropy: anisotropy keyword arguments - :param num_kin_sampling: int, number of draws from a kinematic prediction of a LOS - :param num_psf_sampling: int, number of displacements/render from a spectra to be displaced on the IFU + :param num_kin_sampling: int, number of draws from a kinematic prediction of a + LOS + :param num_psf_sampling: int, number of displacements/render from a spectra to + be displaced on the IFU :return: ordered array of velocity dispersions [km/s] for each unit """ # draw from light profile (3d and 2d option) @@ -108,7 +137,9 @@ def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_s for i in range(0, num_kin_sampling): r, R, x, y = self.numerics.draw_light(kwargs_light) - sigma2_IR, IR = self.numerics.sigma_s2(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma2_IR, IR = self.numerics.sigma_s2( + r, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) for k in range(0, num_psf_sampling): x_, y_ = self.displace_psf(x, y) bool_ap, ifu_index = self.aperture_select(x_, y_) @@ -119,7 +150,7 @@ def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_s sigma_s2_average = sigma2_IR_sum / count_draws # apply unit conversion from arc seconds and deflections to physical velocity dispersion in (km/s) self.numerics.delete_cache() - return np.sqrt(sigma_s2_average) / 1000. # in units of km/s + return np.sqrt(sigma_s2_average) / 1000.0 # in units of km/s def _draw_one_sigma2(self, kwargs_mass, kwargs_light, kwargs_anisotropy): """ @@ -137,5 +168,7 @@ def _draw_one_sigma2(self, kwargs_mass, kwargs_light, kwargs_anisotropy): bool_ap, _ = self.aperture_select(x_, y_) if bool_ap is True: break - sigma2_IR, IR = self.numerics.sigma_s2(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma2_IR, IR = self.numerics.sigma_s2( + r, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) return sigma2_IR, IR diff --git a/lenstronomy/GalKin/galkin_model.py b/lenstronomy/GalKin/galkin_model.py index a1cc608e4..642f0de1c 100644 --- a/lenstronomy/GalKin/galkin_model.py +++ b/lenstronomy/GalKin/galkin_model.py @@ -1,14 +1,13 @@ from lenstronomy.GalKin.numeric_kinematics import NumericKinematics from lenstronomy.GalKin.analytic_kinematics import AnalyticKinematics -__all__ = ['GalkinModel'] +__all__ = ["GalkinModel"] class GalkinModel(object): - """ - this class handles all the kinematic modeling aspects of Galkin - Excluded are observational conditions (seeing, aperture etc) - Major class to compute velocity dispersion measurements given light and mass models + """This class handles all the kinematic modeling aspects of Galkin Excluded are + observational conditions (seeing, aperture etc) Major class to compute velocity + dispersion measurements given light and mass models. The class supports any mass and light distribution (and superposition thereof) that has a 3d correspondance in their 2d lens model distribution. For models that do not have this correspondence, you may want to apply a @@ -39,9 +38,15 @@ class GalkinModel(object): These numerical options should be chosen to allow for a converged result (within your tolerance) but not too conservative to impact too much the computational cost. Reasonable values might depend on the specific problem. - """ - def __init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=None, analytic_kinematics=False): + + def __init__( + self, + kwargs_model, + kwargs_cosmo, + kwargs_numerics=None, + analytic_kinematics=False, + ): """ :param kwargs_model: keyword arguments describing the model components @@ -51,18 +56,26 @@ def __init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=None, analytic_ki :param analytic_kinematics: bool, if True uses the analytic kinematic model """ if kwargs_numerics is None: - kwargs_numerics = {'interpol_grid_num': 200, # numerical interpolation, should converge -> infinity - 'log_integration': True, - # log or linear interpolation of surface brightness and mass models - 'max_integrate': 100, - 'min_integrate': 0.001} # lower/upper bound of numerical integrals + kwargs_numerics = { + "interpol_grid_num": 200, # numerical interpolation, should converge -> infinity + "log_integration": True, + # log or linear interpolation of surface brightness and mass models + "max_integrate": 100, + "min_integrate": 0.001, + } # lower/upper bound of numerical integrals if analytic_kinematics is True: - anisotropy_model = kwargs_model.get('anisotropy_model') - if not anisotropy_model == 'OM': - raise ValueError('analytic kinematics only available for OsipkovMerritt ("OM") anisotropy model.') - self.numerics = AnalyticKinematics(kwargs_cosmo=kwargs_cosmo, **kwargs_numerics) + anisotropy_model = kwargs_model.get("anisotropy_model") + if not anisotropy_model == "OM": + raise ValueError( + 'analytic kinematics only available for OsipkovMerritt ("OM") anisotropy model.' + ) + self.numerics = AnalyticKinematics( + kwargs_cosmo=kwargs_cosmo, **kwargs_numerics + ) else: - self.numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics) + self.numerics = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics + ) self._analytic_kinematics = analytic_kinematics def check_df(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): @@ -80,8 +93,12 @@ def check_df(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): dr = 0.01 # finite differential in radial direction r_dr = r + dr - sigmar2 = self.numerics.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy) - sigmar2_dr = self.numerics.sigma_r2(r_dr, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigmar2 = self.numerics.sigma_r2( + r, kwargs_mass, kwargs_light, kwargs_anisotropy + ) + sigmar2_dr = self.numerics.sigma_r2( + r_dr, kwargs_mass, kwargs_light, kwargs_anisotropy + ) grav_pot = self.numerics.grav_potential(r, kwargs_mass) grav_pot_dr = self.numerics.grav_potential(r_dr, kwargs_mass) self.numerics.delete_cache() diff --git a/lenstronomy/GalKin/galkin_multiobservation.py b/lenstronomy/GalKin/galkin_multiobservation.py index a38d02663..bce7f6e3a 100644 --- a/lenstronomy/GalKin/galkin_multiobservation.py +++ b/lenstronomy/GalKin/galkin_multiobservation.py @@ -3,19 +3,27 @@ import numpy as np -__all__ = ['GalkinMultiObservation'] +__all__ = ["GalkinMultiObservation"] class GalkinMultiObservation(GalkinModel): - """ - class to efficiently model the velocity dispersion measurement of a set of different observations - with individual apertures and seeing conditions for a given lens + """Class to efficiently model the velocity dispersion measurement of a set of + different observations with individual apertures and seeing conditions for a given + lens. - The main difference to the Galkin main class is that it feeds in list of observational settings. - Does not work with IFU observations (yet) + The main difference to the Galkin main class is that it feeds in list of + observational settings. Does not work with IFU observations (yet) """ - def __init__(self, kwargs_model, kwargs_aperture_list, kwargs_psf_list, kwargs_cosmo, kwargs_numerics=None, - analytic_kinematics=False): + + def __init__( + self, + kwargs_model, + kwargs_aperture_list, + kwargs_psf_list, + kwargs_cosmo, + kwargs_numerics=None, + analytic_kinematics=False, + ): """ :param kwargs_model: keyword arguments describing the model components @@ -27,23 +35,40 @@ def __init__(self, kwargs_model, kwargs_aperture_list, kwargs_psf_list, kwargs_c :param kwargs_numerics: numerics keyword arguments - see GalkinModel :param analytic_kinematics: bool, if True uses the analytic kinematic model """ - GalkinModel.__init__(self, kwargs_model, kwargs_cosmo, kwargs_numerics=kwargs_numerics, - analytic_kinematics=analytic_kinematics) + GalkinModel.__init__( + self, + kwargs_model, + kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=analytic_kinematics, + ) self._observation_list = [] self._num_observations = len(kwargs_aperture_list) for i in range(self._num_observations): - self._observation_list.append(GalkinObservation(kwargs_aperture=kwargs_aperture_list[i], - kwargs_psf=kwargs_psf_list[i])) + self._observation_list.append( + GalkinObservation( + kwargs_aperture=kwargs_aperture_list[i], + kwargs_psf=kwargs_psf_list[i], + ) + ) - def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_sampling=1000, num_psf_sampling=100): - """ - computes the velocity dispersion in each Integral Field Unit + def dispersion_map( + self, + kwargs_mass, + kwargs_light, + kwargs_anisotropy, + num_kin_sampling=1000, + num_psf_sampling=100, + ): + """Computes the velocity dispersion in each Integral Field Unit. :param kwargs_mass: keyword arguments of the mass model :param kwargs_light: keyword argument of the light model :param kwargs_anisotropy: anisotropy keyword arguments - :param num_kin_sampling: int, number of draws from a kinematic prediction of a LOS - :param num_psf_sampling: int, number of displacements/render from a spectra to be displaced on the IFU + :param num_kin_sampling: int, number of draws from a kinematic prediction of a + LOS + :param num_psf_sampling: int, number of displacements/render from a spectra to + be displaced on the IFU :return: ordered array of velocity dispersions [km/s] for each observation """ # draw from light profile (3d and 2d option) @@ -57,7 +82,9 @@ def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_s for i in range(0, num_kin_sampling): r, R, x, y = self.numerics.draw_light(kwargs_light) - sigma2_IR, IR = self.numerics.sigma_s2(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma2_IR, IR = self.numerics.sigma_s2( + r, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) for obs_index, observation in enumerate(self._observation_list): for k in range(0, num_psf_sampling): x_, y_ = observation.displace_psf(x, y) @@ -69,4 +96,4 @@ def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, num_kin_s sigma_s2_average = sigma2_R_sum / count_draws # apply unit conversion from arc seconds and deflections to physical velocity dispersion in (km/s) self.numerics.delete_cache() - return np.sqrt(sigma_s2_average) / 1000. # in units of km/s + return np.sqrt(sigma_s2_average) / 1000.0 # in units of km/s diff --git a/lenstronomy/GalKin/galkin_shells.py b/lenstronomy/GalKin/galkin_shells.py index 64e3212a4..f56b084e4 100644 --- a/lenstronomy/GalKin/galkin_shells.py +++ b/lenstronomy/GalKin/galkin_shells.py @@ -6,12 +6,17 @@ class GalkinShells(Galkin): - """ - class to calculate velocity dispersion for radial shells in a fast way - """ + """Class to calculate velocity dispersion for radial shells in a fast way.""" - def __init__(self, kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwargs_numerics=None, - analytic_kinematics=False): + def __init__( + self, + kwargs_model, + kwargs_aperture, + kwargs_psf, + kwargs_cosmo, + kwargs_numerics=None, + analytic_kinematics=False, + ): """ :param kwargs_model: keyword arguments describing the model components @@ -22,13 +27,21 @@ def __init__(self, kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwar :param kwargs_numerics: numerics keyword arguments :param analytic_kinematics: bool, if True uses the analytic kinematic model """ - Galkin.__init__(self, kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics, - analytic_kinematics=analytic_kinematics) - if not self.aperture_type == 'IFU_shells': - raise ValueError('GalkinShells is not supported with aperture_type %s. Only support with "IFU_shells"' - % self.aperture_type) - self._r_bins = kwargs_aperture['r_bins'] + Galkin.__init__( + self, + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=analytic_kinematics, + ) + if not self.aperture_type == "IFU_shells": + raise ValueError( + 'GalkinShells is not supported with aperture_type %s. Only support with "IFU_shells"' + % self.aperture_type + ) + self._r_bins = kwargs_aperture["r_bins"] r_max = np.max(self._r_bins) self._num_pix = 100 # factor of 1.5 to allow outside flux to be convolved into the largest bin @@ -45,22 +58,29 @@ def dispersion_map(self, kwargs_mass, kwargs_light, kwargs_anisotropy, **kwargs) We refer to the Anisotropy() class for details on the parameters. :return: array of velocity dispersion for each IFU shell [km/s] """ - I_R_sigma2, IR = self.numerics._I_R_sigma2_interp(self._r_grid, kwargs_mass, kwargs_light, - kwargs_anisotropy) + I_R_sigma2, IR = self.numerics._I_R_sigma2_interp( + self._r_grid, kwargs_mass, kwargs_light, kwargs_anisotropy + ) ir_map = util.array2image(IR) ir_sigma2_map = util.array2image(I_R_sigma2) - kernel = self.convolution_kernel(delta_pix=self._delta_pix, num_pix=self._num_pix) - I_R_sigma2_conv = signal.fftconvolve(ir_sigma2_map, kernel, mode='same') + kernel = self.convolution_kernel( + delta_pix=self._delta_pix, num_pix=self._num_pix + ) + I_R_sigma2_conv = signal.fftconvolve(ir_sigma2_map, kernel, mode="same") I_R_sigma2_conv = util.image2array(I_R_sigma2_conv) - I_R_conv = signal.fftconvolve(ir_map, kernel, mode='same') + I_R_conv = signal.fftconvolve(ir_map, kernel, mode="same") I_R_conv = util.image2array(I_R_conv) # average over radial bins vel_disp_array = [] r_min = self._r_bins[0] for r_max in self._r_bins[1:]: - mask = mask_util.mask_shell(self._r_grid, 0, center_x=0, center_y=0, r_in=r_min, r_out=r_max) - vel_disp = np.sum(I_R_sigma2_conv * mask) / np.sum(I_R_conv * mask) # luminosity weighted average + mask = mask_util.mask_shell( + self._r_grid, 0, center_x=0, center_y=0, r_in=r_min, r_out=r_max + ) + vel_disp = np.sum(I_R_sigma2_conv * mask) / np.sum( + I_R_conv * mask + ) # luminosity weighted average vel_disp_array.append(vel_disp) r_min = r_max self.numerics.delete_cache() diff --git a/lenstronomy/GalKin/light_profile.py b/lenstronomy/GalKin/light_profile.py index 3aaea383c..3e6fc6bb4 100644 --- a/lenstronomy/GalKin/light_profile.py +++ b/lenstronomy/GalKin/light_profile.py @@ -3,22 +3,27 @@ from scipy.interpolate import interp1d from lenstronomy.LightModel.light_model import LightModel -__all__ = ['LightProfile'] +__all__ = ["LightProfile"] class LightProfile(object): - """ - class to deal with the light distribution for GalKin + """Class to deal with the light distribution for GalKin. In particular, this class allows for: - (faster) interpolated calculation for a given profile (for a range that the Jeans equation is computed) - drawing 3d and 2d distributions from a given (spherical) profile (within bounds where the Jeans equation is expected to be accurate) - 2d projected profiles within the 3d integration range (truncated) - """ - def __init__(self, profile_list, interpol_grid_num=2000, max_interpolate=1000, min_interpolate=0.001, - max_draw=None): + + def __init__( + self, + profile_list, + interpol_grid_num=2000, + max_interpolate=1000, + min_interpolate=0.001, + max_draw=None, + ): """ :param profile_list: list of light profiles for LightModel module (must support light_3d() functionalities) @@ -37,43 +42,51 @@ def __init__(self, profile_list, interpol_grid_num=2000, max_interpolate=1000, m self._max_draw = max_draw def light_3d(self, r, kwargs_list): - """ - three-dimensional light profile + """Three-dimensional light profile. :param r: 3d radius - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) :return: flux per 3d volume at radius r """ light_3d = self.light_model.light_3d(r, kwargs_list) return light_3d def light_3d_interp(self, r, kwargs_list, new_compute=False): - """ - interpolated three-dimensional light profile within bounds [min_interpolate, max_interpolate] - in logarithmic units with interpol_grid_num numbers of interpolation steps + """Interpolated three-dimensional light profile within bounds [min_interpolate, + max_interpolate] in logarithmic units with interpol_grid_num numbers of + interpolation steps. :param r: 3d radius - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) - :param new_compute: boolean, if True, re-computes the interpolation - (becomes valid with updated kwargs_list argument) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) + :param new_compute: boolean, if True, re-computes the interpolation (becomes + valid with updated kwargs_list argument) :return: flux per 3d volume at radius r """ - if not hasattr(self, '_f_light_3d') or new_compute is True: - r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_interpolate), - self._interp_grid_num) + if not hasattr(self, "_f_light_3d") or new_compute is True: + r_array = np.logspace( + np.log10(self._min_interpolate), + np.log10(self._max_interpolate), + self._interp_grid_num, + ) light_3d_array = self.light_model.light_3d(r_array, kwargs_list) light_3d_array[light_3d_array < 10 ** (-1000)] = 10 ** (-1000) - f = interp1d(np.log(r_array), np.log(light_3d_array), fill_value=(np.log(light_3d_array[0]), -1000), - bounds_error=False) # "extrapolate" + f = interp1d( + np.log(r_array), + np.log(light_3d_array), + fill_value=(np.log(light_3d_array[0]), -1000), + bounds_error=False, + ) # "extrapolate" self._f_light_3d = f return np.exp(self._f_light_3d(np.log(r))) def light_2d(self, R, kwargs_list): - """ - projected light profile (integrated to infinity in the projected axis) + """Projected light profile (integrated to infinity in the projected axis) :param R: projected 2d radius - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) :return: projected surface brightness """ kwargs_light_circularized = self._circularize_kwargs(kwargs_list) @@ -86,31 +99,44 @@ def _circularize_kwargs(self, kwargs_list): :return: circularized arguments """ # TODO make sure averaging is done azimuthally - if not hasattr(self, '_kwargs_light_circularized'): + if not hasattr(self, "_kwargs_light_circularized"): kwargs_list_copy = copy.deepcopy(kwargs_list) kwargs_list_new = [] for kwargs in kwargs_list_copy: - if 'e1' in kwargs: - kwargs['e1'] = 0 - if 'e2' in kwargs: - kwargs['e2'] = 0 - kwargs_list_new.append({k: v for k, v in kwargs.items() if k not in ['center_x', 'center_y']}) + if "e1" in kwargs: + kwargs["e1"] = 0 + if "e2" in kwargs: + kwargs["e2"] = 0 + kwargs_list_new.append( + { + k: v + for k, v in kwargs.items() + if k not in ["center_x", "center_y"] + } + ) self._kwargs_light_circularized = kwargs_list_new return self._kwargs_light_circularized def _light_2d_finite_single(self, R, kwargs_list): - """ - projected light profile (integrated to FINITE 3d boundaries from the max_interpolate) - for a single float number of R + """Projected light profile (integrated to FINITE 3d boundaries from the + max_interpolate) for a single float number of R. :param R: projected 2d radius (between min_interpolate and max_interpolate) - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) :return: projected surface brightness """ # here we perform a logarithmic integral - stop = np.log10(np.maximum(np.sqrt(self._max_interpolate**2 - R**2), self._min_interpolate + 0.00001)) - x = np.logspace(start=np.log10(self._min_interpolate), stop=stop, num=self._interp_grid_num) + stop = np.log10( + np.maximum( + np.sqrt(self._max_interpolate**2 - R**2), + self._min_interpolate + 0.00001, + ) + ) + x = np.logspace( + start=np.log10(self._min_interpolate), stop=stop, num=self._interp_grid_num + ) r_array = np.sqrt(x**2 + R**2) flux_r = self.light_3d(r_array, kwargs_list) dlog_r = (np.log10(x[2]) - np.log10(x[1])) * np.log(10) @@ -135,11 +161,12 @@ def _light_2d_finite_single(self, R, kwargs_list): return flux_R * 2 # integral in both directions def light_2d_finite(self, R, kwargs_list): - """ - projected light profile (integrated to FINITE 3d boundaries from the max_interpolate) + """Projected light profile (integrated to FINITE 3d boundaries from the + max_interpolate) :param R: projected 2d radius (between min_interpolate and max_interpolate - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) :return: projected surface brightness """ @@ -154,18 +181,20 @@ def light_2d_finite(self, R, kwargs_list): return light_2d def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False): - """ - constructs the CDF and draws from it random realizations of projected radii R - The interpolation of the CDF is done in linear projected radius space + """Constructs the CDF and draws from it random realizations of projected radii R + The interpolation of the CDF is done in linear projected radius space. - :param kwargs_list: list of keyword arguments of light profiles (see LightModule) + :param kwargs_list: list of keyword arguments of light profiles (see + LightModule) :param n: int; number of draws - :param new_compute: boolean, if True, re-computes the interpolation - (becomes valid with updated kwargs_list argument) + :param new_compute: boolean, if True, re-computes the interpolation (becomes + valid with updated kwargs_list argument) :return: draw of projected radius for the given light profile distribution """ - if not hasattr(self, '_light_cdf') or new_compute is True: - r_array = np.linspace(self._min_interpolate, self._max_draw, self._interp_grid_num) + if not hasattr(self, "_light_cdf") or new_compute is True: + r_array = np.linspace( + self._min_interpolate, self._max_draw, self._interp_grid_num + ) cum_sum = np.zeros_like(r_array) sum_light = 0 for i, r in enumerate(r_array): @@ -174,25 +203,29 @@ def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False): else: sum_light += self.light_2d(r, kwargs_list) * r cum_sum[i] = copy.deepcopy(sum_light) - cum_sum_norm = cum_sum/cum_sum[-1] + cum_sum_norm = cum_sum / cum_sum[-1] f = interp1d(cum_sum_norm, r_array) self._light_cdf = f - cdf_draw = np.random.uniform(0., 1, n) + cdf_draw = np.random.uniform(0.0, 1, n) r_draw = self._light_cdf(cdf_draw) return r_draw def draw_light_2d(self, kwargs_list, n=1, new_compute=False): - """ - constructs the CDF and draws from it random realizations of projected radii R - CDF is constructed in logarithmic projected radius spacing + """Constructs the CDF and draws from it random realizations of projected radii R + CDF is constructed in logarithmic projected radius spacing. :param kwargs_list: light model keyword argument list :param n: int, number of draws per functino call :param new_compute: re-computes the interpolated CDF - :return: realization of projected radius following the distribution of the light model + :return: realization of projected radius following the distribution of the light + model """ - if not hasattr(self, '_light_cdf_log') or new_compute is True: - r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_draw), self._interp_grid_num) + if not hasattr(self, "_light_cdf_log") or new_compute is True: + r_array = np.logspace( + np.log10(self._min_interpolate), + np.log10(self._max_draw), + self._interp_grid_num, + ) cum_sum = np.zeros_like(r_array) sum_light = 0 for i, r in enumerate(r_array): @@ -201,53 +234,64 @@ def draw_light_2d(self, kwargs_list, n=1, new_compute=False): else: sum_light += self.light_2d(r, kwargs_list) * r * r cum_sum[i] = copy.deepcopy(sum_light) - cum_sum_norm = cum_sum/cum_sum[-1] + cum_sum_norm = cum_sum / cum_sum[-1] f = interp1d(cum_sum_norm, np.log(r_array)) self._light_cdf_log = f - cdf_draw = np.random.uniform(0., 1, n) + cdf_draw = np.random.uniform(0.0, 1, n) r_log_draw = self._light_cdf_log(cdf_draw) return np.exp(r_log_draw) def draw_light_3d(self, kwargs_list, n=1, new_compute=False): - """ - constructs the CDF and draws from it random realizations of 3D radii r + """Constructs the CDF and draws from it random realizations of 3D radii r. :param kwargs_list: light model keyword argument list :param n: int, number of draws per function call :param new_compute: re-computes the interpolated CDF - :return: realization of projected radius following the distribution of the light model + :return: realization of projected radius following the distribution of the light + model """ - if not hasattr(self, '_light_3d_cdf_log') or new_compute is True: - r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_draw), self._interp_grid_num) + if not hasattr(self, "_light_3d_cdf_log") or new_compute is True: + r_array = np.logspace( + np.log10(self._min_interpolate), + np.log10(self._max_draw), + self._interp_grid_num, + ) dlog_r = np.log10(r_array[1]) - np.log10(r_array[0]) - r_array_int = np.logspace(np.log10(self._min_interpolate) + dlog_r / 2, np.log10(self._max_draw) + dlog_r / - 2, self._interp_grid_num) + r_array_int = np.logspace( + np.log10(self._min_interpolate) + dlog_r / 2, + np.log10(self._max_draw) + dlog_r / 2, + self._interp_grid_num, + ) cum_sum = np.zeros_like(r_array) sum_light = 0 for i, r in enumerate(r_array_int[:-1]): # if i == 0: # cum_sum[i] = 0 # else: - sum_light += self.light_3d(r, kwargs_list) * r**2 * (r_array[i+1] - r_array[i]) # * r - cum_sum[i+1] = copy.deepcopy(sum_light) - cum_sum_norm = cum_sum/cum_sum[-1] + sum_light += ( + self.light_3d(r, kwargs_list) + * r**2 + * (r_array[i + 1] - r_array[i]) + ) # * r + cum_sum[i + 1] = copy.deepcopy(sum_light) + cum_sum_norm = cum_sum / cum_sum[-1] f = interp1d(cum_sum_norm, np.log(r_array)) self._light_3d_cdf_log = f - cdf_draw = np.random.uniform(0., 1, n) + cdf_draw = np.random.uniform(0.0, 1, n) r_log_draw = self._light_3d_cdf_log(cdf_draw) return np.exp(r_log_draw) def delete_cache(self): - """ - deletes cached interpolation function of the CDF for a specific light profile + """Deletes cached interpolation function of the CDF for a specific light + profile. :return: None """ - if hasattr(self, '_light_cdf_log'): + if hasattr(self, "_light_cdf_log"): del self._light_cdf_log - if hasattr(self, '_light_cdf'): + if hasattr(self, "_light_cdf"): del self._light_cdf - if hasattr(self, '_f_light_3d'): + if hasattr(self, "_f_light_3d"): del self._f_light_3d - if hasattr(self, '_kwargs_light_circularized'): + if hasattr(self, "_kwargs_light_circularized"): del self._kwargs_light_circularized diff --git a/lenstronomy/GalKin/numeric_kinematics.py b/lenstronomy/GalKin/numeric_kinematics.py index 2e4b114df..a67d2f806 100644 --- a/lenstronomy/GalKin/numeric_kinematics.py +++ b/lenstronomy/GalKin/numeric_kinematics.py @@ -8,13 +8,21 @@ from lenstronomy.LensModel.single_plane import SinglePlane import lenstronomy.GalKin.velocity_util as util -__all__ = ['NumericKinematics'] +__all__ = ["NumericKinematics"] class NumericKinematics(Anisotropy): - - def __init__(self, kwargs_model, kwargs_cosmo, interpol_grid_num=1000, log_integration=True, max_integrate=1000, - min_integrate=0.0001, max_light_draw=None, lum_weight_int_method=True): + def __init__( + self, + kwargs_model, + kwargs_cosmo, + interpol_grid_num=1000, + log_integration=True, + max_integrate=1000, + min_integrate=0.0001, + max_light_draw=None, + lum_weight_int_method=True, + ): """ What we need: - max projected R to have ACCURATE I_R_sigma values @@ -30,78 +38,105 @@ def __init__(self, kwargs_model, kwargs_cosmo, interpol_grid_num=1000, log_integ solution. ATTENTION: currently less accurate than 3d solution :param min_integrate: """ - mass_profile_list = kwargs_model.get('mass_profile_list') - light_profile_list = kwargs_model.get('light_profile_list') - anisotropy_model = kwargs_model.get('anisotropy_model') + mass_profile_list = kwargs_model.get("mass_profile_list") + light_profile_list = kwargs_model.get("light_profile_list") + anisotropy_model = kwargs_model.get("anisotropy_model") self._interp_grid_num = interpol_grid_num self._log_int = log_integration - self._max_integrate = max_integrate # maximal integration (and interpolation) in units of arcsecs - self._min_integrate = min_integrate # min integration (and interpolation) in units of arcsecs + self._max_integrate = ( + max_integrate # maximal integration (and interpolation) in units of arcsecs + ) + self._min_integrate = ( + min_integrate # min integration (and interpolation) in units of arcsecs + ) self._max_interpolate = max_integrate # we chose to set the interpolation range to the integration range self._min_interpolate = min_integrate # we chose to set the interpolation range to the integration range if max_light_draw is None: # make sure the actual solution for the kinematics is only computed way inside the integral max_light_draw = max_integrate - self.lightProfile = LightProfile(light_profile_list, interpol_grid_num=interpol_grid_num, - max_interpolate=max_integrate, min_interpolate=min_integrate, - max_draw=max_light_draw) + self.lightProfile = LightProfile( + light_profile_list, + interpol_grid_num=interpol_grid_num, + max_interpolate=max_integrate, + min_interpolate=min_integrate, + max_draw=max_light_draw, + ) Anisotropy.__init__(self, anisotropy_type=anisotropy_model) self.cosmo = Cosmo(**kwargs_cosmo) self._mass_profile = SinglePlane(mass_profile_list) self._lum_weight_int_method = lum_weight_int_method def sigma_s2(self, r, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - returns unweighted los velocity dispersion for a specified 3d and projected radius - (if lum_weight_int_method=True then the 3d radius is not required and the function directly performs the - luminosity weighted integral in projection at R) + """Returns unweighted los velocity dispersion for a specified 3d and projected + radius (if lum_weight_int_method=True then the 3d radius is not required and the + function directly performs the luminosity weighted integral in projection at R) :param r: 3d radius (not needed for this calculation) :param R: 2d projected radius (in angular units of arcsec) - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. - :return: weighted line-of-sight projected velocity dispersion at projected radius R with weights I + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. + :return: weighted line-of-sight projected velocity dispersion at projected + radius R with weights I """ if self._lum_weight_int_method is True: - return self.sigma_s2_project(R, kwargs_mass, kwargs_light, kwargs_anisotropy) + return self.sigma_s2_project( + R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) else: - return self.sigma_s2_r(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy), 1 + return ( + self.sigma_s2_r(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy), + 1, + ) def sigma_s2_project(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - returns luminosity-weighted los velocity dispersion for a specified projected radius R and weight + """Returns luminosity-weighted los velocity dispersion for a specified projected + radius R and weight. :param R: 2d projected radius (in angular units of arcsec) - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. :return: line-of-sight projected velocity dispersion at projected radius R """ # nominator is numerically to a finite distance, so luminosity weighting might be off # this could lead to an under-prediction of the velocity dispersion # so we ask the function _I_R_sigma2() to also return the numerical l(r) # I_R_sigma2, I_R = self._I_R_sigma2_interp(R, kwargs_mass, kwargs_light, kwargs_anisotropy) - I_R_sigma2, I_R = self._I_R_sigma2_interp(R, kwargs_mass, kwargs_light, kwargs_anisotropy) + I_R_sigma2, I_R = self._I_R_sigma2_interp( + R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) # I_R = self.lightProfile.light_2d(R, kwargs_light) return I_R_sigma2 / I_R, 1 def sigma_s2_r(self, r, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - returns unweighted los velocity dispersion for a specified 3d radius r at projected radius R + """Returns unweighted los velocity dispersion for a specified 3d radius r at + projected radius R. :param r: 3d radius (not needed for this calculation) :param R: 2d projected radius (in angular units of arcsec) - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. - :return: line-of-sight projected velocity dispersion at projected radius R from 3d radius r + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. + :return: line-of-sight projected velocity dispersion at projected radius R from + 3d radius r """ beta = self.beta_r(r, **kwargs_anisotropy) - return (1 - beta * R ** 2 / r ** 2) * self.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy) + return (1 - beta * R**2 / r**2) * self.sigma_r2( + r, kwargs_mass, kwargs_light, kwargs_anisotropy + ) def sigma_r2(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): """ @@ -126,25 +161,39 @@ def sigma_r2(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): # l_r = self.lightProfile.light_3d_interp(r, kwargs_light) l_r = self.lightProfile.light_3d(r, kwargs_light) f_r = self.anisotropy_solution(r, **kwargs_anisotropy) - return 1 / f_r / l_r * self._jeans_solution_integral(r, kwargs_mass, kwargs_light, kwargs_anisotropy) * \ - const.G / (const.arcsec * self.cosmo.dd * const.Mpc) + return ( + 1 + / f_r + / l_r + * self._jeans_solution_integral( + r, kwargs_mass, kwargs_light, kwargs_anisotropy + ) + * const.G + / (const.arcsec * self.cosmo.dd * const.Mpc) + ) def mass_3d(self, r, kwargs): - """ - mass enclosed a 3d radius + """Mass enclosed a 3d radius. :param r: in arc seconds :param kwargs: lens model parameters in arc seconds :return: mass enclosed physical radius in kg """ mass_dimless = self._mass_profile.mass_3d(r, kwargs) - mass_dim = mass_dimless * const.arcsec ** 2 * self.cosmo.dd * self.cosmo.ds / self.cosmo.dds * const.Mpc * \ - const.c ** 2 / (4 * np.pi * const.G) + mass_dim = ( + mass_dimless + * const.arcsec**2 + * self.cosmo.dd + * self.cosmo.ds + / self.cosmo.dds + * const.Mpc + * const.c**2 + / (4 * np.pi * const.G) + ) return mass_dim def grav_potential(self, r, kwargs_mass): - """ - Gravitational potential in SI units + """Gravitational potential in SI units. :param r: radius (arc seconds) :param kwargs_mass: @@ -165,29 +214,32 @@ def draw_light(self, kwargs_light): return r, R, x, y def delete_cache(self): - """ - delete interpolation function for a specific mass and light profile as well as for a specific anisotropy model + """Delete interpolation function for a specific mass and light profile as well + as for a specific anisotropy model. :return: """ - if hasattr(self, '_log_mass_3d'): + if hasattr(self, "_log_mass_3d"): del self._log_mass_3d - if hasattr(self, '_interp_jeans_integral'): + if hasattr(self, "_interp_jeans_integral"): del self._interp_jeans_integral - if hasattr(self, '_interp_I_R_sigma2'): + if hasattr(self, "_interp_I_R_sigma2"): del self._interp_I_R_sigma2 self.lightProfile.delete_cache() self.delete_anisotropy_cache() def _I_R_sigma2(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - equation A15 in Mamon & Lokas 2005 as a logarithmic numerical integral (if option is chosen) + """Equation A15 in Mamon & Lokas 2005 as a logarithmic numerical integral (if + option is chosen) :param R: 2d projected radius (in angular units) - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. :return: integral of A15 in Mamon&Lokas 2005 """ R = max(R, self._min_integrate) @@ -212,14 +264,22 @@ def _I_R_sigma2(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): min_log = np.log10(R) max_log = np.log10(max_integrate) dlogr = (max_log - min_log) / (self._interp_grid_num - 1) - r_array = np.logspace(min_log + dlogr / 2., max_log + dlogr / 2., self._interp_grid_num) + r_array = np.logspace( + min_log + dlogr / 2.0, max_log + dlogr / 2.0, self._interp_grid_num + ) dlog_r = (np.log10(r_array[2]) - np.log10(r_array[1])) * np.log(10) - IR_sigma2_ = self._integrand_A15(r_array, R, kwargs_mass, kwargs_light, kwargs_anisotropy) + IR_sigma2_ = self._integrand_A15( + r_array, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) IR_sigma2_dr = IR_sigma2_ * dlog_r * r_array else: - r_array = np.linspace(start=R, stop=self._max_interpolate, num=self._interp_grid_num) + r_array = np.linspace( + start=R, stop=self._max_interpolate, num=self._interp_grid_num + ) dr = r_array[2] - r_array[1] - IR_sigma2_ = self._integrand_A15(r_array + dr / 2., R, kwargs_mass, kwargs_light, kwargs_anisotropy) + IR_sigma2_ = self._integrand_A15( + r_array + dr / 2.0, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) IR_sigma2_dr = IR_sigma2_ * dr IR_sigma2 = np.sum(IR_sigma2_dr) # integral from angle to physical scales @@ -227,8 +287,7 @@ def _I_R_sigma2(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): return IR_sigma2 * 2 * const.G / (const.arcsec * self.cosmo.dd * const.Mpc), IR def _I_R_sigma2_interp(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - equation A15 in Mamon&Lokas 2005 as interpolation in log space + """Equation A15 in Mamon&Lokas 2005 as interpolation in log space. :param R: projected radius :param kwargs_mass: mass profile keyword arguments @@ -237,30 +296,40 @@ def _I_R_sigma2_interp(self, R, kwargs_mass, kwargs_light, kwargs_anisotropy): :return: """ R = np.maximum(R, self._min_integrate) - if not hasattr(self, '_interp_I_R_sigma2'): + if not hasattr(self, "_interp_I_R_sigma2"): min_log = np.log10(self._min_integrate) max_log = np.log10(self._max_integrate) - R_array = np.logspace(min_log, max_log, self._interp_grid_num) # self._interp_grid_num + R_array = np.logspace( + min_log, max_log, self._interp_grid_num + ) # self._interp_grid_num I_R_sigma2_array = [] I_R_array = [] for R_i in R_array: - I_R_sigma2_, IR_ = self._I_R_sigma2(R_i, kwargs_mass, kwargs_light, kwargs_anisotropy) + I_R_sigma2_, IR_ = self._I_R_sigma2( + R_i, kwargs_mass, kwargs_light, kwargs_anisotropy + ) I_R_sigma2_array.append(I_R_sigma2_) I_R_array.append(IR_) - self._interp_I_R_sigma2 = interp1d(np.log(R_array), np.array(I_R_sigma2_array), fill_value="extrapolate") - self._interp_I_R = interp1d(np.log(R_array), np.array(I_R_array), fill_value="extrapolate") + self._interp_I_R_sigma2 = interp1d( + np.log(R_array), np.array(I_R_sigma2_array), fill_value="extrapolate" + ) + self._interp_I_R = interp1d( + np.log(R_array), np.array(I_R_array), fill_value="extrapolate" + ) return self._interp_I_R_sigma2(np.log(R)), self._interp_I_R(np.log(R)) def _integrand_A15(self, r, R, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - integrand of A15 (in log space) in Mamon&Lokas 2005 + """Integrand of A15 (in log space) in Mamon&Lokas 2005. :param r: 3d radius in arc seconds :param R: 2d projected radius - :param kwargs_mass: mass model parameters (following lenstronomy lens model conventions) - :param kwargs_light: deflector light parameters (following lenstronomy light model conventions) - :param kwargs_anisotropy: anisotropy parameters, may vary according to anisotropy type chosen. - We refer to the Anisotropy() class for details on the parameters. + :param kwargs_mass: mass model parameters (following lenstronomy lens model + conventions) + :param kwargs_light: deflector light parameters (following lenstronomy light + model conventions) + :param kwargs_anisotropy: anisotropy parameters, may vary according to + anisotropy type chosen. We refer to the Anisotropy() class for details on + the parameters. :return: integrand """ k_r = self.K(r, R, **kwargs_anisotropy) @@ -272,8 +341,7 @@ def _integrand_A15(self, r, R, kwargs_mass, kwargs_light, kwargs_anisotropy): return out def _jeans_solution_integral(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - interpolated solution of the integral + """Interpolated solution of the integral. .. math:: \\int_r^{\\infty} f(s) l(s) G M(s) / s^2 ds @@ -285,28 +353,36 @@ def _jeans_solution_integral(self, r, kwargs_mass, kwargs_light, kwargs_anisotro :return: interpolated solution of the Jeans integral (copped values at large radius as they become numerically inaccurate) """ - if not hasattr(self, '_interp_jeans_integral'): + if not hasattr(self, "_interp_jeans_integral"): min_log = np.log10(self._min_integrate) # we extend the integral but ignore these outer solutions in the interpolation max_log = np.log10(self._max_integrate) r_array = np.logspace(min_log, max_log, self._interp_grid_num) dlog_r = (np.log10(r_array[2]) - np.log10(r_array[1])) * np.log(10) - integrand_jeans = self._integrand_jeans_solution(r_array, kwargs_mass, kwargs_light, kwargs_anisotropy) * \ - dlog_r * r_array + integrand_jeans = ( + self._integrand_jeans_solution( + r_array, kwargs_mass, kwargs_light, kwargs_anisotropy + ) + * dlog_r + * r_array + ) # flip array from inf to finite integral_jeans_r = np.cumsum(np.flip(integrand_jeans)) # flip array back integral_jeans_r = np.flip(integral_jeans_r) # call 1d interpolation function - self._interp_jeans_integral = interp1d(np.log(r_array[r_array <= self._max_integrate]), - integral_jeans_r[r_array <= self._max_integrate], - fill_value="extrapolate") + self._interp_jeans_integral = interp1d( + np.log(r_array[r_array <= self._max_integrate]), + integral_jeans_r[r_array <= self._max_integrate], + fill_value="extrapolate", + ) return self._interp_jeans_integral(np.log(r)) - def _integrand_jeans_solution(self, r, kwargs_mass, kwargs_light, kwargs_anisotropy): - """ - integrand of A1 (in log space) in Mamon&Lokas 2005 to calculate the Jeans equation numerically - f(s) l(s) M(s) / s^2 + def _integrand_jeans_solution( + self, r, kwargs_mass, kwargs_light, kwargs_anisotropy + ): + """Integrand of A1 (in log space) in Mamon&Lokas 2005 to calculate the Jeans + equation numerically f(s) l(s) M(s) / s^2. :param r: 3d radius :param kwargs_mass: mass model keyword arguments @@ -328,11 +404,20 @@ def _mass_3d_interp(self, r, kwargs, new_compute=False): :param new_compute: bool, if True, recomputes the interpolation :return: mass enclosed physical radius in kg """ - if not hasattr(self, '_log_mass_3d') or new_compute is True: - r_array = np.logspace(np.log10(self._min_interpolate), np.log10(self._max_interpolate), - self._interp_grid_num) + if not hasattr(self, "_log_mass_3d") or new_compute is True: + r_array = np.logspace( + np.log10(self._min_interpolate), + np.log10(self._max_interpolate), + self._interp_grid_num, + ) mass_3d_array = self.mass_3d(r_array, kwargs) - mass_3d_array[mass_3d_array < 10. ** (-100)] = 10. ** (-100) - self._log_mass_3d = interp1d(np.log(r_array), np.log(mass_3d_array/r_array), - fill_value=(np.log(mass_3d_array[0] / r_array[0]), -1000), bounds_error=False) - return np.exp(self._log_mass_3d(np.log(r))) * np.minimum(r, self._max_interpolate) + mass_3d_array[mass_3d_array < 10.0 ** (-100)] = 10.0 ** (-100) + self._log_mass_3d = interp1d( + np.log(r_array), + np.log(mass_3d_array / r_array), + fill_value=(np.log(mass_3d_array[0] / r_array[0]), -1000), + bounds_error=False, + ) + return np.exp(self._log_mass_3d(np.log(r))) * np.minimum( + r, self._max_interpolate + ) diff --git a/lenstronomy/GalKin/observation.py b/lenstronomy/GalKin/observation.py index d22e8fd08..f043dbd50 100644 --- a/lenstronomy/GalKin/observation.py +++ b/lenstronomy/GalKin/observation.py @@ -1,13 +1,13 @@ from lenstronomy.GalKin.aperture import Aperture from lenstronomy.GalKin.psf import PSF -__all__ = ['GalkinObservation'] +__all__ = ["GalkinObservation"] class GalkinObservation(PSF, Aperture): - """ - this class sets the base for the observational properties (aperture and seeing condition) - """ + """This class sets the base for the observational properties (aperture and seeing + condition)""" + def __init__(self, kwargs_aperture, kwargs_psf): Aperture.__init__(self, **kwargs_aperture) PSF.__init__(self, **kwargs_psf) diff --git a/lenstronomy/GalKin/psf.py b/lenstronomy/GalKin/psf.py index ea87bd08d..d6f437bf1 100644 --- a/lenstronomy/GalKin/psf.py +++ b/lenstronomy/GalKin/psf.py @@ -3,26 +3,27 @@ from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class PSF(object): - """ - general class to handle the PSF in the GalKin module for rendering the displacement of photons/spectro - """ + """General class to handle the PSF in the GalKin module for rendering the + displacement of photons/spectro.""" + def __init__(self, psf_type, **kwargs_psf): """ :param psf_type: string, point spread function type, current support for 'GAUSSIAN' and 'MOFFAT' :param kwargs_psf: keyword argument describing the relevant parameters of the PSF. """ - if psf_type == 'GAUSSIAN': + if psf_type == "GAUSSIAN": self._psf = PSFGaussian(**kwargs_psf) - elif psf_type == 'MOFFAT': + elif psf_type == "MOFFAT": self._psf = PSFMoffat(**kwargs_psf) else: - raise ValueError('psf_type %s not supported for convolution!' % psf_type) + raise ValueError("psf_type %s not supported for convolution!" % psf_type) def displace_psf(self, x, y): """ @@ -34,8 +35,7 @@ def displace_psf(self, x, y): return self._psf.displace_psf(x, y) def convolution_kernel(self, delta_pix, num_pix=21): - """ - normalized convolution kernel + """Normalized convolution kernel. :param delta_pix: pixel scale of kernel :param num_pix: number of pixels per axis of the kernel @@ -46,9 +46,8 @@ def convolution_kernel(self, delta_pix, num_pix=21): @export class PSFGaussian(object): - """ - Gaussian PSF - """ + """Gaussian PSF.""" + def __init__(self, fwhm): """ @@ -66,8 +65,7 @@ def displace_psf(self, x, y): return velocity_util.displace_PSF_gaussian(x, y, self._fwhm) def convolution_kernel(self, delta_pix, num_pix=21): - """ - normalized convolution kernel + """Normalized convolution kernel. :param delta_pix: pixel scale of kernel :param num_pix: number of pixels per axis of the kernel @@ -80,9 +78,7 @@ def convolution_kernel(self, delta_pix, num_pix=21): @export class PSFMoffat(object): - """ - Moffat PSF - """ + """Moffat PSF.""" def __init__(self, fwhm, moffat_beta): """ @@ -103,14 +99,17 @@ def displace_psf(self, x, y): return velocity_util.displace_PSF_moffat(x, y, self._fwhm, self._moffat_beta) def convolution_kernel(self, delta_pix, num_pix=21): - """ - normalized convolution kernel + """Normalized convolution kernel. :param delta_pix: pixel scale of kernel :param num_pix: number of pixels per axis of the kernel :return: 2d numpy array of kernel """ - kernel = kernel_util.kernel_moffat(num_pix=num_pix, delta_pix=delta_pix, fwhm=self._fwhm, - moffat_beta=self._moffat_beta) + kernel = kernel_util.kernel_moffat( + num_pix=num_pix, + delta_pix=delta_pix, + fwhm=self._fwhm, + moffat_beta=self._moffat_beta, + ) return kernel diff --git a/lenstronomy/GalKin/velocity_util.py b/lenstronomy/GalKin/velocity_util.py index c3567f5b4..9ec2428b8 100644 --- a/lenstronomy/GalKin/velocity_util.py +++ b/lenstronomy/GalKin/velocity_util.py @@ -1,8 +1,9 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -34,27 +35,25 @@ def displace_PSF_gaussian(x, y, FWHM): @export def moffat_r(r, alpha, beta): - """ - Moffat profile + """Moffat profile. :param r: radial coordinate :param alpha: Moffat parameter :param beta: exponent :return: Moffat profile """ - return 2. * (beta - 1) / alpha ** 2 * (1 + (r/alpha) ** 2) ** (-beta) + return 2.0 * (beta - 1) / alpha**2 * (1 + (r / alpha) ** 2) ** (-beta) @export def moffat_fwhm_alpha(FWHM, beta): - """ - computes alpha parameter from FWHM and beta for a Moffat profile + """Computes alpha parameter from FWHM and beta for a Moffat profile. :param FWHM: full width at half maximum :param beta: beta parameter of Moffat profile :return: alpha parameter of Moffat profile """ - return FWHM / (2 * np.sqrt(2 ** (1. / beta) - 1)) + return FWHM / (2 * np.sqrt(2 ** (1.0 / beta) - 1)) @export @@ -89,29 +88,25 @@ def displace_PSF_moffat(x, y, FWHM, beta): @export def draw_cdf_Y(beta): - """ - Draw c.d.f for Moffat function according to Berge et al. Ufig paper, equation B2 + """Draw c.d.f for Moffat function according to Berge et al. Ufig paper, equation B2 cdf(Y) = 1-Y**(1-beta) :return: """ x = np.random.uniform(0, 1) - return (1-x) ** (1./(1-beta)) + return (1 - x) ** (1.0 / (1 - beta)) @export def project2d_random(r): - """ - draws a random projection from radius r in 2d and 1d - :param r: 3d radius - :return: R, x, y - """ + """Draws a random projection from radius r in 2d and 1d :param r: 3d radius :return: + R, x, y.""" size = len(np.atleast_1d(r)) if size == 1: size = None u1 = np.random.uniform(0, 1, size=size) u2 = np.random.uniform(0, 1, size=size) - eta = np.arccos(2*u1 - 1) - np.pi / 2 + eta = np.arccos(2 * u1 - 1) - np.pi / 2 phi = 2 * np.pi * u2 x = r * np.cos(eta) * np.cos(phi) y = r * np.cos(eta) * np.sin(phi) @@ -141,5 +136,7 @@ def draw_hernquist(a): :return: realisation of radius of Hernquist luminosity weighting in 3d """ P = np.random.uniform() # draws uniform between [0,1) - r = a*np.sqrt(P)*(np.sqrt(P)+1)/(1-P) # solves analytically to r from P(r) + r = ( + a * np.sqrt(P) * (np.sqrt(P) + 1) / (1 - P) + ) # solves analytically to r from P(r) return r diff --git a/lenstronomy/ImSim/MultiBand/joint_linear.py b/lenstronomy/ImSim/MultiBand/joint_linear.py index 376bba45b..5192b196e 100644 --- a/lenstronomy/ImSim/MultiBand/joint_linear.py +++ b/lenstronomy/ImSim/MultiBand/joint_linear.py @@ -3,52 +3,93 @@ import numpy as np -__all__ = ['JointLinear'] +__all__ = ["JointLinear"] class JointLinear(MultiLinear): - """ - class to model multiple exposures in the same band and makes a constraint fit to all bands simultaneously - with joint constraints on the surface brightness of the model. This model setting require the same surface - brightness models to be called in all available images/bands + """Class to model multiple exposures in the same band and makes a constraint fit to + all bands simultaneously with joint constraints on the surface brightness of the + model. + This model setting require the same surface brightness models to be called in all + available images/bands """ - def __init__(self, multi_band_list, kwargs_model, compute_bool=None, likelihood_mask_list=None): + + def __init__( + self, + multi_band_list, + kwargs_model, + compute_bool=None, + likelihood_mask_list=None, + ): # TODO: make this raise statement valid # if kwargs_model.get('index_source_light_model_list', None) is not None or \ # kwargs_model.get('index_lens_light_model_list', None) is not None or \ # kwargs_model.get('index_point_source_model_list', None) is not None: # raise ValueError('You are not allowed to set partial surface brightness models to individual bands in the ' # 'joint-linear mode. Your settings are: ', kwargs_model) - super(JointLinear, self).__init__(multi_band_list, kwargs_model=kwargs_model, compute_bool=compute_bool, - likelihood_mask_list=likelihood_mask_list) - self.type = 'joint-linear' - - def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, inv_bool=False): + super(JointLinear, self).__init__( + multi_band_list, + kwargs_model=kwargs_model, + compute_bool=compute_bool, + likelihood_mask_list=likelihood_mask_list, + ) + self.type = "joint-linear" + + def image_linear_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + inv_bool=False, + ): + """Computes the image (lens and source surface brightness with a given lens + model). The linear parameters are computed with a weighted linear least square + optimization (i.e. flux normalization of the brightness profiles) + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the + purpose of the covariance matrix. + :return: 1d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data """ - computes the image (lens and source surface brightness with a given lens model). - The linear parameters are computed with a weighted linear least square optimization - (i.e. flux normalization of the brightness profiles) - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix. - :return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data - """ - A = self.linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, kwargs_special) + A = self.linear_response_matrix( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) C_D_response, model_error_list = self.error_response(kwargs_lens, kwargs_ps) d = self.data_response - param, cov_param, wls_model = de_lens.get_param_WLS(A.T, 1 / C_D_response, d, inv_bool=inv_bool) + param, cov_param, wls_model = de_lens.get_param_WLS( + A.T, 1 / C_D_response, d, inv_bool=inv_bool + ) wls_list = self._array2image_list(wls_model) return wls_list, model_error_list, cov_param, param - def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None): - """ - computes the linear response matrix (m x n), with n being the data size and m being the coefficients + def linear_response_matrix( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + ): + """Computes the linear response matrix (m x n), with n being the data size and m + being the coefficients. :param kwargs_lens: :param kwargs_source: @@ -59,8 +100,14 @@ def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_le A = [] for i in range(self._num_bands): if self._compute_bool[i] is True: - A_i = self._imageModel_list[i].linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, - kwargs_ps, kwargs_extinction, kwargs_special) + A_i = self._imageModel_list[i].linear_response_matrix( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) if len(A) == 0: A = A_i else: @@ -69,8 +116,8 @@ def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_le @property def data_response(self): - """ - returns the 1d array of the data element that is fitted for (including masking) + """Returns the 1d array of the data element that is fitted for (including + masking) :return: 1d numpy array """ @@ -85,8 +132,7 @@ def data_response(self): return d def _array2image_list(self, array): - """ - maps 1d vector of joint exposures in list of 2d images of single exposures + """Maps 1d vector of joint exposures in list of 2d images of single exposures. :param array: 1d numpy array :return: list of 2d numpy arrays of size of exposures @@ -96,22 +142,25 @@ def _array2image_list(self, array): for i in range(self._num_bands): if self._compute_bool[i] is True: num_data = self.num_response_list[i] - array_i = array[k:k + num_data] + array_i = array[k : k + num_data] image_i = self._imageModel_list[i].array_masked2image(array_i) image_list.append(image_i) k += num_data return image_list def error_response(self, kwargs_lens, kwargs_ps, kwargs_special=None): - """ - returns the 1d array of the error estimate corresponding to the data response + """Returns the 1d array of the error estimate corresponding to the data + response. - :return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties) + :return: 1d numpy array of response, 2d array of additonal errors (e.g. point + source uncertainties) """ C_D_response, model_error = [], [] for i in range(self._num_bands): if self._compute_bool[i] is True: - C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens, kwargs_ps, kwargs_special=kwargs_special) + C_D_response_i, model_error_i = self._imageModel_list[i].error_response( + kwargs_lens, kwargs_ps, kwargs_special=kwargs_special + ) model_error.append(model_error_i) if len(C_D_response) == 0: C_D_response = C_D_response_i @@ -119,38 +168,59 @@ def error_response(self, kwargs_lens, kwargs_ps, kwargs_special=None): C_D_response = np.append(C_D_response, C_D_response_i) return C_D_response, model_error - def likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, source_marg=False, linear_prior=None, - check_positive_flux=False): - """ - computes the likelihood of the data given a model - This is specified with the non-linear parameters and a linear inversion and prior marginalisation. + def likelihood_data_given_model( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + ): + """Computes the likelihood of the data given a model This is specified with the + non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: - :param check_positive_flux: bool, if True, checks whether the linear inversion resulted in non-negative flux - components and applies a punishment in the likelihood if so. - :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images) + :param check_positive_flux: bool, if True, checks whether the linear inversion + resulted in non-negative flux components and applies a punishment in the + likelihood if so. + :return: log likelihood (natural logarithm) (sum of the log likelihoods of the + individual images) """ # generate image - im_sim_list, model_error_list, cov_matrix, param = self.image_linear_solve(kwargs_lens, kwargs_source, - kwargs_lens_light, kwargs_ps, - kwargs_extinction, kwargs_special, - inv_bool=source_marg) + im_sim_list, model_error_list, cov_matrix, param = self.image_linear_solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + inv_bool=source_marg, + ) # compute X^2 logL = 0 index = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: - logL += self._imageModel_list[i].Data.log_likelihood(im_sim_list[index], self._imageModel_list[i].likelihood_mask, model_error_list[index]) + logL += self._imageModel_list[i].Data.log_likelihood( + im_sim_list[index], + self._imageModel_list[i].likelihood_mask, + model_error_list[index], + ) index += 1 if cov_matrix is not None and source_marg: marg_const = de_lens.marginalization_new(cov_matrix, d_prior=linear_prior) logL += marg_const if check_positive_flux is True and self._num_bands > 0: - bool_ = self._imageModel_list[0].check_positive_flux(kwargs_source, kwargs_lens_light, kwargs_ps) + bool_ = self._imageModel_list[0].check_positive_flux( + kwargs_source, kwargs_lens_light, kwargs_ps + ) if bool_ is False: - logL -= 10 ** 5 + logL -= 10**5 return logL diff --git a/lenstronomy/ImSim/MultiBand/multi_data_base.py b/lenstronomy/ImSim/MultiBand/multi_data_base.py index 8cd98cee4..a66336938 100644 --- a/lenstronomy/ImSim/MultiBand/multi_data_base.py +++ b/lenstronomy/ImSim/MultiBand/multi_data_base.py @@ -1,10 +1,9 @@ -__all__ = ['MultiDataBase'] +__all__ = ["MultiDataBase"] class MultiDataBase(object): - """ - Base class with definitions that are shared among all variations of modelling multiple data sets - """ + """Base class with definitions that are shared among all variations of modelling + multiple data sets.""" def __init__(self, image_model_list, compute_bool=None): """ @@ -17,7 +16,9 @@ def __init__(self, image_model_list, compute_bool=None): compute_bool = [True] * self._num_bands else: if not len(compute_bool) == self._num_bands: - raise ValueError('compute_bool statement has not the same range as number of bands available!') + raise ValueError( + "compute_bool statement has not the same range as number of bands available!" + ) self._compute_bool = compute_bool self._imageModel_list = image_model_list self._num_response_list = [] @@ -30,16 +31,14 @@ def num_bands(self): @property def num_response_list(self): - """ - list of number of data elements that are used in the minimization + """List of number of data elements that are used in the minimization. :return: list of integers """ return self._num_response_list def reset_point_source_cache(self, cache=True): - """ - deletes all the cache in the point source class and saves it from then on + """Deletes all the cache in the point source class and saves it from then on. :return: """ @@ -54,7 +53,9 @@ def num_data_evaluate(self): num += self._imageModel_list[i].num_data_evaluate return num - def num_param_linear(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): + def num_param_linear( + self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): """ :return: number of linear coefficients to be solved for in the linear inversion @@ -62,8 +63,9 @@ def num_param_linear(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs num = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: - num += self._imageModel_list[i].num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, - kwargs_ps) + num += self._imageModel_list[i].num_param_linear( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) return num def reduced_residuals(self, model_list, error_map_list=None): @@ -79,7 +81,10 @@ def reduced_residuals(self, model_list, error_map_list=None): index = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: - residual_list.append(self._imageModel_list[i].reduced_residuals(model_list[index], - error_map=error_map_list[index])) + residual_list.append( + self._imageModel_list[i].reduced_residuals( + model_list[index], error_map=error_map_list[index] + ) + ) index += 1 return residual_list diff --git a/lenstronomy/ImSim/MultiBand/multi_linear.py b/lenstronomy/ImSim/MultiBand/multi_linear.py index bf63c4b26..266ff7696 100644 --- a/lenstronomy/ImSim/MultiBand/multi_linear.py +++ b/lenstronomy/ImSim/MultiBand/multi_linear.py @@ -1,14 +1,13 @@ from lenstronomy.ImSim.MultiBand.multi_data_base import MultiDataBase from lenstronomy.ImSim.MultiBand.single_band_multi_model import SingleBandMultiModel -__all__ = ['MultiLinear'] +__all__ = ["MultiLinear"] class MultiLinear(MultiDataBase): - """ - class to simulate/reconstruct images in multi-band option. - This class calls functions of image_model.py with different bands with - joint non-linear parameters and decoupled linear parameters. + """Class to simulate/reconstruct images in multi-band option. This class calls + functions of image_model.py with different bands with joint non-linear parameters + and decoupled linear parameters. the class supports keyword arguments 'index_lens_model_list', 'index_source_light_model_list', 'index_lens_light_model_list', 'index_point_source_model_list', 'index_optical_depth_model_list' in kwargs_model @@ -20,11 +19,17 @@ class to simulate/reconstruct images in multi-band option. - set index_lens_light_model_list = [[0], [1]] - (optional) for now all the parameters between the two light profiles are independent in the model. You have the possibility to join a subset of model parameters (e.g. joint centroid). See the Param() class for documentation. - """ - def __init__(self, multi_band_list, kwargs_model, likelihood_mask_list=None, compute_bool=None, - kwargs_pixelbased=None, linear_solver=True): + def __init__( + self, + multi_band_list, + kwargs_model, + likelihood_mask_list=None, + compute_bool=None, + kwargs_pixelbased=None, + linear_solver=True, + ): """ :param multi_band_list: list of imaging band configurations [[kwargs_data, kwargs_psf, kwargs_numerics],[...], ...] @@ -34,41 +39,65 @@ def __init__(self, multi_band_list, kwargs_model, likelihood_mask_list=None, com :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such that they get overwritten by the linear solver solution. """ - self.type = 'multi-linear' + self.type = "multi-linear" imageModel_list = [] if linear_solver is False and len(multi_band_list) > 1: - raise ValueError('Multi-linear mode with more than one band does not support "linear_solver" = False.') + raise ValueError( + 'Multi-linear mode with more than one band does not support "linear_solver" = False.' + ) for band_index in range(len(multi_band_list)): - imageModel = SingleBandMultiModel(multi_band_list, kwargs_model, likelihood_mask_list=likelihood_mask_list, - band_index=band_index, kwargs_pixelbased=kwargs_pixelbased, - linear_solver=linear_solver) + imageModel = SingleBandMultiModel( + multi_band_list, + kwargs_model, + likelihood_mask_list=likelihood_mask_list, + band_index=band_index, + kwargs_pixelbased=kwargs_pixelbased, + linear_solver=linear_solver, + ) imageModel_list.append(imageModel) super(MultiLinear, self).__init__(imageModel_list, compute_bool=compute_bool) - def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, inv_bool=False): - """ - computes the image (lens and source surface brightness with a given lens model). - The linear parameters are computed with a weighted linear least square optimization - (i.e. flux normalization of the brightness profiles) + def image_linear_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + inv_bool=False, + ): + """Computes the image (lens and source surface brightness with a given lens + model). The linear parameters are computed with a weighted linear least square + optimization (i.e. flux normalization of the brightness profiles) - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix. - :return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the + purpose of the covariance matrix. + :return: 1d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data """ wls_list, error_map_list, cov_param_list, param_list = [], [], [], [] for i in range(self._num_bands): if self._compute_bool[i] is True: - wls_model, error_map, cov_param, param = self._imageModel_list[i].image_linear_solve(kwargs_lens, - kwargs_source, - kwargs_lens_light, - kwargs_ps, - kwargs_extinction, - kwargs_special, - inv_bool=inv_bool) + wls_model, error_map, cov_param, param = self._imageModel_list[ + i + ].image_linear_solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + inv_bool=inv_bool, + ) else: wls_model, error_map, cov_param, param = None, None, None, None wls_list.append(wls_model) @@ -77,20 +106,30 @@ def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_l param_list.append(param) return wls_list, error_map_list, cov_param_list, param_list - def likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, source_marg=False, linear_prior=None, - check_positive_flux=False): - """ - computes the likelihood of the data given a model - This is specified with the non-linear parameters and a linear inversion and prior marginalisation. + def likelihood_data_given_model( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + ): + """Computes the likelihood of the data given a model This is specified with the + non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: - :param check_positive_flux: bool, if True, checks whether the linear inversion resulted in non-negative flux - components and applies a punishment in the likelihood if so. - :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images) + :param check_positive_flux: bool, if True, checks whether the linear inversion + resulted in non-negative flux components and applies a punishment in the + likelihood if so. + :return: log likelihood (natural logarithm) (sum of the log likelihoods of the + individual images) """ # generate image logL = 0 @@ -98,10 +137,15 @@ def likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwar linear_prior = [None for i in range(self._num_bands)] for i in range(self._num_bands): if self._compute_bool[i] is True: - logL += self._imageModel_list[i].likelihood_data_given_model(kwargs_lens, kwargs_source, - kwargs_lens_light, kwargs_ps, - kwargs_extinction, kwargs_special, - source_marg=source_marg, - linear_prior=linear_prior[i], - check_positive_flux=check_positive_flux) + logL += self._imageModel_list[i].likelihood_data_given_model( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + source_marg=source_marg, + linear_prior=linear_prior[i], + check_positive_flux=check_positive_flux, + ) return logL diff --git a/lenstronomy/ImSim/MultiBand/single_band_multi_model.py b/lenstronomy/ImSim/MultiBand/single_band_multi_model.py index 40952c05b..dcb538b6e 100644 --- a/lenstronomy/ImSim/MultiBand/single_band_multi_model.py +++ b/lenstronomy/ImSim/MultiBand/single_band_multi_model.py @@ -3,14 +3,13 @@ from lenstronomy.Data.psf import PSF from lenstronomy.Util import class_creator -__all__ = ['SingleBandMultiModel'] +__all__ = ["SingleBandMultiModel"] class SingleBandMultiModel(ImageLinearFit): - """ - class to simulate/reconstruct images in multi-band option. - This class calls functions of image_model.py with different bands with - decoupled linear parameters and the option to pass/select different light models for the different bands + """Class to simulate/reconstruct images in multi-band option. This class calls + functions of image_model.py with different bands with decoupled linear parameters + and the option to pass/select different light models for the different bands. the class supports keyword arguments 'index_lens_model_list', 'index_source_light_model_list', 'index_lens_light_model_list', 'index_point_source_model_list', 'index_optical_depth_model_list' in kwargs_model @@ -22,11 +21,17 @@ class to simulate/reconstruct images in multi-band option. - set index_lens_light_model_list = [[0], [1]] - (optional) for now all the parameters between the two light profiles are independent in the model. You have the possibility to join a subset of model parameters (e.g. joint centroid). See the Param() class for documentation. - """ - def __init__(self, multi_band_list, kwargs_model, likelihood_mask_list=None, band_index=0, kwargs_pixelbased=None, - linear_solver=True): + def __init__( + self, + multi_band_list, + kwargs_model, + likelihood_mask_list=None, + band_index=0, + kwargs_pixelbased=None, + linear_solver=True, + ): """ :param multi_band_list: list of imaging band configurations [[kwargs_data, kwargs_psf, kwargs_numerics],[...], ...] @@ -38,75 +43,132 @@ def __init__(self, multi_band_list, kwargs_model, likelihood_mask_list=None, ban :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such that they get overwritten by the linear solver solution. """ - self.type = 'single-band-multi-model' + self.type = "single-band-multi-model" if likelihood_mask_list is None: likelihood_mask_list = [None for _ in range(len(multi_band_list))] - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances(band_index=band_index, **kwargs_model) + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(band_index=band_index, **kwargs_model) kwargs_data = multi_band_list[band_index][0] kwargs_psf = multi_band_list[band_index][1] kwargs_numerics = multi_band_list[band_index][2] data_i = ImageData(**kwargs_data) psf_i = PSF(**kwargs_psf) - index_lens_model_list = kwargs_model.get('index_lens_model_list', [None for _ in range(len(multi_band_list))]) + index_lens_model_list = kwargs_model.get( + "index_lens_model_list", [None for _ in range(len(multi_band_list))] + ) self._index_lens_model = index_lens_model_list[band_index] - index_source_list = kwargs_model.get('index_source_light_model_list', - [None for _ in range(len(multi_band_list))]) + index_source_list = kwargs_model.get( + "index_source_light_model_list", [None for _ in range(len(multi_band_list))] + ) self._index_source = index_source_list[band_index] - index_lens_light_list = kwargs_model.get('index_lens_light_model_list', - [None for _ in range(len(multi_band_list))]) + index_lens_light_list = kwargs_model.get( + "index_lens_light_model_list", [None for _ in range(len(multi_band_list))] + ) self._index_lens_light = index_lens_light_list[band_index] - index_point_source_list = kwargs_model.get('index_point_source_model_list', - [None for _ in range(len(multi_band_list))]) + index_point_source_list = kwargs_model.get( + "index_point_source_model_list", [None for _ in range(len(multi_band_list))] + ) self._index_point_source = index_point_source_list[band_index] - index_optical_depth = kwargs_model.get('index_optical_depth_model_list', - [None for _ in range(len(multi_band_list))]) + index_optical_depth = kwargs_model.get( + "index_optical_depth_model_list", + [None for _ in range(len(multi_band_list))], + ) self._index_optical_depth = index_optical_depth[band_index] - super(SingleBandMultiModel, self).__init__(data_i, psf_i, lens_model_class, source_model_class, - lens_light_model_class, point_source_class, extinction_class, - kwargs_numerics=kwargs_numerics, likelihood_mask=likelihood_mask_list[band_index], - kwargs_pixelbased=kwargs_pixelbased, linear_solver=linear_solver) - - def image(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, unconvolved=False, source_add=True, lens_light_add=True, - point_source_add=True): - """ - - make an image with a realisation of linear parameter values "param" - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) + super(SingleBandMultiModel, self).__init__( + data_i, + psf_i, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + kwargs_numerics=kwargs_numerics, + likelihood_mask=likelihood_mask_list[band_index], + kwargs_pixelbased=kwargs_pixelbased, + linear_solver=linear_solver, + ) + + def image( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + source_add=True, + lens_light_add=True, + point_source_add=True, + ): + """Make an image with a realisation of linear parameter values "param". + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) :param source_add: if True, compute source, otherwise without :param lens_light_add: if True, compute lens light, otherwise without :param point_source_add: if True, add point sources, otherwise without :return: 2d array of surface brightness pixels of the simulation """ - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( - kwargs_lens, - kwargs_source, - kwargs_lens_light, - kwargs_ps, - kwargs_extinction) - return self._image(kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, - kwargs_extinction_i, kwargs_special=kwargs_special, unconvolved=unconvolved, - source_add=source_add, lens_light_add=lens_light_add, - point_source_add=point_source_add) - - def source_surface_brightness(self, kwargs_source, kwargs_lens=None, kwargs_extinction=None, kwargs_special=None, - unconvolved=False, de_lensed=False, k=None, update_pixelbased_mapping=True): - """ - - computes the source surface brightness distribution - - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction + ) + return self._image( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + source_add=source_add, + lens_light_add=lens_light_add, + point_source_add=point_source_add, + ) + + def source_surface_brightness( + self, + kwargs_source, + kwargs_lens=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + de_lensed=False, + k=None, + update_pixelbased_mapping=True, + ): + """Computes the source surface brightness distribution. + + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles :param kwargs_extinction: list of keyword arguments of extinction model - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) - :param de_lensed: if True: returns the un-lensed source surface brightness profile, otherwise the lensed. + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) + :param de_lensed: if True: returns the un-lensed source surface brightness + profile, otherwise the lensed. :param k: integer, if set, will only return the model of the specific index :return: 2d array of surface brightness pixels """ @@ -115,19 +177,26 @@ def source_surface_brightness(self, kwargs_source, kwargs_lens=None, kwargs_exti kwargs_source, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=kwargs_extinction) - return self._source_surface_brightness(kwargs_source_i, kwargs_lens_i, kwargs_extinction=kwargs_extinction_i, - kwargs_special=kwargs_special, unconvolved=unconvolved, - de_lensed=de_lensed, k=k, - update_pixelbased_mapping=update_pixelbased_mapping) + kwargs_extinction=kwargs_extinction, + ) + return self._source_surface_brightness( + kwargs_source_i, + kwargs_lens_i, + kwargs_extinction=kwargs_extinction_i, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + de_lensed=de_lensed, + k=k, + update_pixelbased_mapping=update_pixelbased_mapping, + ) def lens_surface_brightness(self, kwargs_lens_light, unconvolved=False, k=None): - """ + """Computes the lens surface brightness distribution. - computes the lens surface brightness distribution - - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param unconvolved: if True, returns unconvolved surface brightness (perfect seeing), otherwise convolved with PSF kernel + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param unconvolved: if True, returns unconvolved surface brightness (perfect + seeing), otherwise convolved with PSF kernel :return: 2d array of surface brightness pixels """ _, _, kwargs_lens_light_i, kwargs_ps_i, _ = self.select_kwargs( @@ -135,13 +204,21 @@ def lens_surface_brightness(self, kwargs_lens_light, unconvolved=False, k=None): kwargs_source=None, kwargs_lens_light=kwargs_lens_light, kwargs_ps=None, - kwargs_extinction=None) - return self._lens_surface_brightness(kwargs_lens_light_i, unconvolved=unconvolved, k=k) - - def point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconvolved=False, k=None): - """ - - computes the point source positions and paints PSF convolutions on them + kwargs_extinction=None, + ) + return self._lens_surface_brightness( + kwargs_lens_light_i, unconvolved=unconvolved, k=k + ) + + def point_source( + self, + kwargs_ps, + kwargs_lens=None, + kwargs_special=None, + unconvolved=False, + k=None, + ): + """Computes the point source positions and paints PSF convolutions on them. :param kwargs_ps: :param kwargs_lens: @@ -155,80 +232,153 @@ def point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconvo kwargs_source=None, kwargs_lens_light=None, kwargs_ps=kwargs_ps, - kwargs_extinction=None) - return self._point_source(kwargs_ps=kwargs_ps_i, kwargs_lens=kwargs_lens_i, kwargs_special=kwargs_special, - unconvolved=unconvolved, k=k) - - def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, inv_bool=False): + kwargs_extinction=None, + ) + return self._point_source( + kwargs_ps=kwargs_ps_i, + kwargs_lens=kwargs_lens_i, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + k=k, + ) + + def image_linear_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + inv_bool=False, + ): + """Computes the image (lens and source surface brightness with a given lens + model). + + The linear parameters are computed with a weighted linear least square + optimization (i.e. flux normalization of the brightness profiles) + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the + purpose of the covariance matrix. + :return: 1d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data """ - computes the image (lens and source surface brightness with a given lens model). - The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles) - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix. - :return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data - """ - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( - kwargs_lens, - kwargs_source, - kwargs_lens_light, - kwargs_ps, - kwargs_extinction) - wls_model, error_map, cov_param, param = self._image_linear_solve(kwargs_lens_i, kwargs_source_i, - kwargs_lens_light_i, kwargs_ps_i, - kwargs_extinction_i, kwargs_special, inv_bool=inv_bool) - # For the interfometric likelihood method, + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction + ) + wls_model, error_map, cov_param, param = self._image_linear_solve( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + kwargs_special, + inv_bool=inv_bool, + ) + # For the interfometric likelihood method, # return the array2 of [array1, array2] of the model output of _image_linear_solver. if self.Data.likelihood_method() == "interferometry_natwt": wls_model = wls_model[1] return wls_model, error_map, cov_param, param - def likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, source_marg=False, linear_prior=None, - check_positive_flux=False, linear_solver=True): - """ - computes the likelihood of the data given a model - This is specified with the non-linear parameters and a linear inversion and prior marginalisation. + def likelihood_data_given_model( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + linear_solver=True, + ): + """Computes the likelihood of the data given a model This is specified with the + non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: - :param check_positive_flux: bool, if True, checks whether the linear inversion resulted in non-negative flux - components and applies a punishment in the likelihood if so. - :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images) + :param check_positive_flux: bool, if True, checks whether the linear inversion + resulted in non-negative flux components and applies a punishment in the + likelihood if so. + :return: log likelihood (natural logarithm) (sum of the log likelihoods of the + individual images) """ # generate image - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( - kwargs_lens, - kwargs_source, - kwargs_lens_light, - kwargs_ps, - kwargs_extinction) - return self._likelihood_data_given_model(kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, - kwargs_ps_i, kwargs_extinction_i, kwargs_special, - source_marg=source_marg, linear_prior=linear_prior, - check_positive_flux=check_positive_flux, - linear_solver=self._linear_solver) - - def num_param_linear(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None): + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction + ) + return self._likelihood_data_given_model( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + kwargs_special, + source_marg=source_marg, + linear_prior=linear_prior, + check_positive_flux=check_positive_flux, + linear_solver=self._linear_solver, + ) + + def num_param_linear( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + ): """ :return: number of linear coefficients to be solved for in the linear inversion """ if self._linear_solver is False: return 0 - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) - num = self._num_param_linear(kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i) + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + num = self._num_param_linear( + kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i + ) return num - def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None): - """ - computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients + def linear_response_matrix( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + ): + """Computes the linear response matrix (m x n), with n beeing the data size and + m being the coefficients. :param kwargs_lens: :param kwargs_source: @@ -236,24 +386,39 @@ def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_le :param kwargs_ps: :return: """ - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( - kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction) - A = self._linear_response_matrix(kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, - kwargs_extinction_i, kwargs_special) + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction + ) + A = self._linear_response_matrix( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + kwargs_special, + ) return A - def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param, model_index_select=True): - """ - variance of the linear source reconstruction in the source plane coordinates, - computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors - of the basis set. + def error_map_source( + self, kwargs_source, x_grid, y_grid, cov_param, model_index_select=True + ): + """Variance of the linear source reconstruction in the source plane coordinates, + computed by the diagonal elements of the covariance matrix of the source + reconstruction as a sum of the errors of the basis set. :param kwargs_source: keyword arguments of source model :param x_grid: x-axis of positions to compute error map :param y_grid: y-axis of positions to compute error map :param cov_param: covariance matrix of liner inversion parameters - :param model_index_select: boolean, if True, selects the model components of this band (default). If False, - assumes input kwargs_source is already selected list. + :param model_index_select: boolean, if True, selects the model components of + this band (default). If False, assumes input kwargs_source is already + selected list. :return: diagonal covariance errors at the positions (x_grid, y_grid) """ if self._index_source is None or model_index_select is False: @@ -263,20 +428,32 @@ def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param, model_index return self._error_map_source(kwargs_source_i, x_grid, y_grid, cov_param) def error_response(self, kwargs_lens, kwargs_ps, kwargs_special): - """ - returns the 1d array of the error estimate corresponding to the data response + """Returns the 1d array of the error estimate corresponding to the data + response. - :return: 1d numpy array of response, 2d array of additional errors (e.g. point source uncertainties) + :return: 1d numpy array of response, 2d array of additional errors (e.g. point + source uncertainties) """ - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( kwargs_lens, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=kwargs_ps, - kwargs_extinction=None) - return self._error_response(kwargs_lens_i, kwargs_ps_i, kwargs_special=kwargs_special) - - def update_linear_kwargs(self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): + kwargs_extinction=None, + ) + return self._error_response( + kwargs_lens_i, kwargs_ps_i, kwargs_special=kwargs_special + ) + + def update_linear_kwargs( + self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): """ links linear parameters to kwargs arguments @@ -286,24 +463,39 @@ def update_linear_kwargs(self, param, kwargs_lens, kwargs_source, kwargs_lens_li :param param: linear parameter vector corresponding to the response matrix :return: updated list of kwargs with linear parameter values """ - kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i = self.select_kwargs( - kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction=None) - return self._update_linear_kwargs(param, kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i) + ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) = self.select_kwargs( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction=None, + ) + return self._update_linear_kwargs( + param, kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i + ) def extinction_map(self, kwargs_extinction=None, kwargs_special=None): - """ - differential extinction per pixel + """Differential extinction per pixel. - :param kwargs_extinction: list of keyword arguments corresponding to the optical depth models tau, such that extinction is exp(-tau) + :param kwargs_extinction: list of keyword arguments corresponding to the optical + depth models tau, such that extinction is exp(-tau) :param kwargs_special: keyword arguments, additional parameter to the extinction :return: 2d array of size of the image """ - _, _, _, _, kwargs_extinction_i = self.select_kwargs(kwargs_extinction=kwargs_extinction) + _, _, _, _, kwargs_extinction_i = self.select_kwargs( + kwargs_extinction=kwargs_extinction + ) return self._extinction_map(kwargs_extinction_i, kwargs_special) def linear_param_from_kwargs(self, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - inverse function of update_linear() returning the linear amplitude list for the keyword argument list + """Inverse function of update_linear() returning the linear amplitude list for + the keyword argument list. :param kwargs_source: :param kwargs_lens_light: @@ -315,13 +507,22 @@ def linear_param_from_kwargs(self, kwargs_source, kwargs_lens_light, kwargs_ps): kwargs_source=kwargs_source, kwargs_lens_light=kwargs_lens_light, kwargs_ps=kwargs_ps, - kwargs_extinction=None) - return self._linear_param_from_kwargs(kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i) - - def select_kwargs(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None): - """ - select subset of kwargs lists referenced to this imaging band + kwargs_extinction=None, + ) + return self._linear_param_from_kwargs( + kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i + ) + + def select_kwargs( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + ): + """Select subset of kwargs lists referenced to this imaging band. :param kwargs_lens: :param kwargs_source: @@ -348,5 +549,13 @@ def select_kwargs(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light= if self._index_optical_depth is None or kwargs_extinction is None: kwargs_extinction_i = kwargs_extinction else: - kwargs_extinction_i = [kwargs_extinction[k] for k in self._index_optical_depth] - return kwargs_lens_i, kwargs_source_i, kwargs_lens_light_i, kwargs_ps_i, kwargs_extinction_i + kwargs_extinction_i = [ + kwargs_extinction[k] for k in self._index_optical_depth + ] + return ( + kwargs_lens_i, + kwargs_source_i, + kwargs_lens_light_i, + kwargs_ps_i, + kwargs_extinction_i, + ) diff --git a/lenstronomy/ImSim/Numerics/adaptive_numerics.py b/lenstronomy/ImSim/Numerics/adaptive_numerics.py index ce568ca21..0d4b87845 100644 --- a/lenstronomy/ImSim/Numerics/adaptive_numerics.py +++ b/lenstronomy/ImSim/Numerics/adaptive_numerics.py @@ -1,9 +1,12 @@ -from lenstronomy.ImSim.Numerics.numba_convolution import SubgridNumbaConvolution, NumbaConvolution +from lenstronomy.ImSim.Numerics.numba_convolution import ( + SubgridNumbaConvolution, + NumbaConvolution, +) from lenstronomy.ImSim.Numerics.convolution import PixelKernelConvolution from lenstronomy.Util import kernel_util from lenstronomy.Util import image_util -__all__ = ['AdaptiveConvolution'] +__all__ = ["AdaptiveConvolution"] class AdaptiveConvolution(object): @@ -19,8 +22,18 @@ class AdaptiveConvolution(object): adaptive solution is 1 + 2 - 3 """ - def __init__(self, kernel_super, supersampling_factor, conv_supersample_pixels, supersampling_kernel_size=None, - compute_pixels=None, nopython=True, cache=True, parallel=False): + + def __init__( + self, + kernel_super, + supersampling_factor, + conv_supersample_pixels, + supersampling_kernel_size=None, + compute_pixels=None, + nopython=True, + cache=True, + parallel=False, + ): """ :param kernel_super: convolution kernel in units of super sampled pixels provided, odd length per axis @@ -33,8 +46,10 @@ def __init__(self, kernel_super, supersampling_factor, conv_supersample_pixels, :param cache: bool, numba jit setting to use cache :param parallel: bool, numba jit setting to use parallel mode """ - kernel = kernel_util.degrade_kernel(kernel_super, degrading_factor=supersampling_factor) - self._low_res_conv = PixelKernelConvolution(kernel, convolution_type='fft') + kernel = kernel_util.degrade_kernel( + kernel_super, degrading_factor=supersampling_factor + ) + self._low_res_conv = PixelKernelConvolution(kernel, convolution_type="fft") if supersampling_kernel_size is None: supersampling_kernel_size = len(kernel) @@ -42,13 +57,28 @@ def __init__(self, kernel_super, supersampling_factor, conv_supersample_pixels, if n_cut_super % 2 == 0: n_cut_super += 1 kernel_super_cut = image_util.cut_edges(kernel_super, n_cut_super) - kernel_cut = kernel_util.degrade_kernel(kernel_super_cut, degrading_factor=supersampling_factor) + kernel_cut = kernel_util.degrade_kernel( + kernel_super_cut, degrading_factor=supersampling_factor + ) - self._low_res_partial = NumbaConvolution(kernel_cut, conv_supersample_pixels, compute_pixels=compute_pixels, - nopython=nopython, cache=cache, parallel=parallel, memory_raise=True) - self._hig_res_partial = SubgridNumbaConvolution(kernel_super_cut, supersampling_factor, conv_supersample_pixels, - compute_pixels=compute_pixels, nopython=nopython, cache=cache, - parallel=parallel) # , kernel_size=len(kernel_cut)) + self._low_res_partial = NumbaConvolution( + kernel_cut, + conv_supersample_pixels, + compute_pixels=compute_pixels, + nopython=nopython, + cache=cache, + parallel=parallel, + memory_raise=True, + ) + self._hig_res_partial = SubgridNumbaConvolution( + kernel_super_cut, + supersampling_factor, + conv_supersample_pixels, + compute_pixels=compute_pixels, + nopython=nopython, + cache=cache, + parallel=parallel, + ) # , kernel_size=len(kernel_cut)) self._supersampling_factor = supersampling_factor def re_size_convolve(self, image_low_res, image_high_res): @@ -61,7 +91,11 @@ def re_size_convolve(self, image_low_res, image_high_res): image_low_res_conv = self._low_res_conv.convolution2d(image_low_res) image_low_res_partial_conv = self._low_res_partial.convolve2d(image_low_res) image_high_res_partial_conv = self._hig_res_partial.convolve2d(image_high_res) - return image_low_res_conv + image_high_res_partial_conv - image_low_res_partial_conv + return ( + image_low_res_conv + + image_high_res_partial_conv + - image_low_res_partial_conv + ) def convolve2d(self, image_high_res): """ @@ -69,5 +103,7 @@ def convolve2d(self, image_high_res): :param image_high_res: supersampled image/model to be convolved on a regular pixel grid :return: convolved and re-sized image """ - image_low_res = image_util.re_size(image_high_res, factor=self._supersampling_factor) + image_low_res = image_util.re_size( + image_high_res, factor=self._supersampling_factor + ) return self.re_size_convolve(image_low_res, image_high_res) diff --git a/lenstronomy/ImSim/Numerics/convolution.py b/lenstronomy/ImSim/Numerics/convolution.py index 143faa76f..52227dce0 100644 --- a/lenstronomy/ImSim/Numerics/convolution.py +++ b/lenstronomy/ImSim/Numerics/convolution.py @@ -6,6 +6,7 @@ import lenstronomy.Util.image_util as image_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() _rfft_mt_safe = True # (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e') @@ -24,26 +25,25 @@ def _centered(arr, newshape): @export class PixelKernelConvolution(object): - """ - class to compute convolutions for a given pixelized kernel (fft, grid) - """ - def __init__(self, kernel, convolution_type='fft_static'): + """Class to compute convolutions for a given pixelized kernel (fft, grid)""" + + def __init__(self, kernel, convolution_type="fft_static"): """ :param kernel: 2d array, convolution kernel :param convolution_type: string, 'fft', 'grid', 'fft_static' mode of 2d convolution """ self._kernel = kernel - if convolution_type not in ['fft', 'grid', 'fft_static']: - raise ValueError('convolution_type %s not supported!' % convolution_type) + if convolution_type not in ["fft", "grid", "fft_static"]: + raise ValueError("convolution_type %s not supported!" % convolution_type) self._type = convolution_type self._pre_computed = False def pixel_kernel(self, num_pix=None): - """ - access pixelated kernel + """Access pixelated kernel. - :param num_pix: size of returned kernel (odd number per axis). If None, return the original kernel. + :param num_pix: size of returned kernel (odd number per axis). If None, return + the original kernel. :return: pixel kernel centered """ if num_pix is not None: @@ -52,7 +52,7 @@ def pixel_kernel(self, num_pix=None): def copy_transpose(self): """ - + :return: copy of the class with kernel set to the transpose of original one """ return PixelKernelConvolution(self._kernel.T, convolution_type=self._type) @@ -63,19 +63,18 @@ def convolution2d(self, image): :param image: 2d array (image) to be convolved :return: fft convolution """ - if self._type == 'fft': - image_conv = signal.fftconvolve(image, self._kernel, mode='same') - elif self._type == 'fft_static': - image_conv = self._static_fft(image, mode='same') - elif self._type == 'grid': - image_conv = signal.convolve2d(image, self._kernel, mode='same') + if self._type == "fft": + image_conv = signal.fftconvolve(image, self._kernel, mode="same") + elif self._type == "fft_static": + image_conv = self._static_fft(image, mode="same") + elif self._type == "grid": + image_conv = signal.convolve2d(image, self._kernel, mode="same") else: - raise ValueError('convolution_type %s not supported!' % self._type) + raise ValueError("convolution_type %s not supported!" % self._type) return image_conv - def _static_fft(self, image, mode='same'): - """ - scipy fft convolution with saved static fft kernel + def _static_fft(self, image, mode="same"): + """Scipy fft convolution with saved static fft kernel. :param image: 2d numpy array to be convolved :return: @@ -83,9 +82,25 @@ def _static_fft(self, image, mode='same'): in1 = image in1 = np.asarray(in1) if self._pre_computed is False: - self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2 = self._static_pre_compute(image) + ( + self._s1, + self._s2, + self._complex_result, + self._shape, + self._fshape, + self._fslice, + self._sp2, + ) = self._static_pre_compute(image) self._pre_computed = True - s1, s2, complex_result, shape, fshape, fslice, sp2 = self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2 + s1, s2, complex_result, shape, fshape, fslice, sp2 = ( + self._s1, + self._s2, + self._complex_result, + self._shape, + self._fshape, + self._fslice, + self._sp2, + ) # if in1.ndim == in2.ndim == 0: # scalar inputs # return in1 * in2 # elif not in1.ndim == in2.ndim: @@ -95,8 +110,8 @@ def _static_fft(self, image, mode='same'): # Check that input sizes are compatible with 'valid' mode # if _inputs_swap_needed(mode, s1, s2): - # Convolution is commutative; order doesn't have any effect on output - # only applicable for 'valid' mode + # Convolution is commutative; order doesn't have any effect on output + # only applicable for 'valid' mode # in1, s1, in2, s2 = in2, s2, in1, s1 # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make @@ -104,7 +119,7 @@ def _static_fft(self, image, mode='same'): if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)): try: sp1 = np.fft.rfftn(in1, fshape) - ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy()) + ret = np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy() finally: if not _rfft_mt_safe: _rfft_lock.release() @@ -125,12 +140,11 @@ def _static_fft(self, image, mode='same'): elif mode == "valid": return _centered(ret, s1 - s2 + 1) else: - raise ValueError("Acceptable mode flags are 'valid'," - " 'same', or 'full'.") + raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") def _static_pre_compute(self, image): - """ - pre-compute Fourier transformed kernel and shape quantities to speed up convolution + """Pre-compute Fourier transformed kernel and shape quantities to speed up + convolution. :param image: 2d numpy array :return: @@ -139,8 +153,9 @@ def _static_pre_compute(self, image): in2 = self._kernel s1 = np.array(in1.shape) s2 = np.array(in2.shape) - complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or - np.issubdtype(in2.dtype, np.complexfloating)) + complex_result = np.issubdtype(in1.dtype, np.complexfloating) or np.issubdtype( + in2.dtype, np.complexfloating + ) shape = s1 + s2 - 1 # Check that input sizes are compatible with 'valid' mode @@ -180,10 +195,16 @@ def re_size_convolve(self, image_low_res, image_high_res=None): @export class SubgridKernelConvolution(object): - """ - class to compute the convolution on a supersampled grid with partial convolution computed on the regular grid - """ - def __init__(self, kernel_supersampled, supersampling_factor, supersampling_kernel_size=None, convolution_type='fft_static'): + """Class to compute the convolution on a supersampled grid with partial convolution + computed on the regular grid.""" + + def __init__( + self, + kernel_supersampled, + supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft_static", + ): """ :param kernel_supersampled: kernel in supersampled pixels @@ -202,11 +223,18 @@ def __init__(self, kernel_supersampled, supersampling_factor, supersampling_kern kernel_low_res, kernel_high_res = np.zeros((3, 3)), kernel_supersampled self._low_res_convolution = False else: - kernel_low_res, kernel_high_res = kernel_util.split_kernel(kernel_supersampled, supersampling_kernel_size, - self._supersampling_factor) + kernel_low_res, kernel_high_res = kernel_util.split_kernel( + kernel_supersampled, + supersampling_kernel_size, + self._supersampling_factor, + ) self._low_res_convolution = True - self._low_res_conv = PixelKernelConvolution(kernel_low_res, convolution_type=convolution_type) - self._high_res_conv = PixelKernelConvolution(kernel_high_res, convolution_type=convolution_type) + self._low_res_conv = PixelKernelConvolution( + kernel_low_res, convolution_type=convolution_type + ) + self._high_res_conv = PixelKernelConvolution( + kernel_high_res, convolution_type=convolution_type + ) def convolution2d(self, image): """ @@ -216,7 +244,9 @@ def convolution2d(self, image): """ image_high_res_conv = self._high_res_conv.convolution2d(image) - image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor) + image_resized_conv = image_util.re_size( + image_high_res_conv, self._supersampling_factor + ) if self._low_res_convolution is True: image_resized = image_util.re_size(image, self._supersampling_factor) image_resized_conv += self._low_res_conv.convolution2d(image_resized) @@ -229,7 +259,9 @@ def re_size_convolve(self, image_low_res, image_high_res): :return: convolved and re-sized image """ image_high_res_conv = self._high_res_conv.convolution2d(image_high_res) - image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor) + image_resized_conv = image_util.re_size( + image_high_res_conv, self._supersampling_factor + ) if self._low_res_convolution is True: image_resized_conv += self._low_res_conv.convolution2d(image_low_res) return image_resized_conv @@ -237,14 +269,19 @@ def re_size_convolve(self, image_low_res, image_high_res): @export class MultiGaussianConvolution(object): - """ - class to perform a convolution consisting of multiple 2d Gaussians - This is aimed to lead to a speed-up without significant loss of accuracy do to the simplified convolution kernel - relative to a pixelized kernel. - """ + """Class to perform a convolution consisting of multiple 2d Gaussians This is aimed + to lead to a speed-up without significant loss of accuracy do to the simplified + convolution kernel relative to a pixelized kernel.""" - def __init__(self, sigma_list, fraction_list, pixel_scale, supersampling_factor=1, supersampling_convolution=False, - truncation=2): + def __init__( + self, + sigma_list, + fraction_list, + pixel_scale, + supersampling_factor=1, + supersampling_convolution=False, + truncation=2, + ): """ :param sigma_list: list of std value of Gaussian kernel @@ -265,8 +302,7 @@ def __init__(self, sigma_list, fraction_list, pixel_scale, supersampling_factor= self._supersampling_convolution = supersampling_convolution def convolution2d(self, image): - """ - 2d convolution + """2d convolution. :param image: 2d numpy array, image to be convolved :return: convolved image, 2d numpy array @@ -274,11 +310,25 @@ def convolution2d(self, image): image_conv = None for i in range(self._num_gaussians): if image_conv is None: - image_conv = ndimage.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest', - truncate=self._truncation) * self._fraction_list[i] + image_conv = ( + ndimage.gaussian_filter( + image, + self._sigmas_scaled[i], + mode="nearest", + truncate=self._truncation, + ) + * self._fraction_list[i] + ) else: - image_conv += ndimage.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest', - truncate=self._truncation) * self._fraction_list[i] + image_conv += ( + ndimage.gaussian_filter( + image, + self._sigmas_scaled[i], + mode="nearest", + truncate=self._truncation, + ) + * self._fraction_list[i] + ) return image_conv def re_size_convolve(self, image_low_res, image_high_res): @@ -289,19 +339,21 @@ def re_size_convolve(self, image_low_res, image_high_res): """ if self._supersampling_convolution is True: image_high_res_conv = self.convolution2d(image_high_res) - image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor) + image_resized_conv = image_util.re_size( + image_high_res_conv, self._supersampling_factor + ) else: image_resized_conv = self.convolution2d(image_low_res) return image_resized_conv def pixel_kernel(self, num_pix): - """ - computes a pixelized kernel from the MGE parameters + """Computes a pixelized kernel from the MGE parameters. :param num_pix: int, size of kernel (odd number per axis) :return: pixel kernel centered """ from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian + mg = MultiGaussian() x, y = util.make_grid(numPix=num_pix, deltapix=self._pixel_scale) kernel = mg.function(x, y, amp=self._fraction_list, sigma=self._sigmas_scaled) @@ -311,9 +363,9 @@ def pixel_kernel(self, num_pix): @export class FWHMGaussianConvolution(object): - """ - uses a two-dimensional Gaussian function with same FWHM of given kernel as approximation - """ + """Uses a two-dimensional Gaussian function with same FWHM of given kernel as + approximation.""" + def __init__(self, kernel, truncation=4): """ @@ -325,22 +377,22 @@ def __init__(self, kernel, truncation=4): self._truncation = truncation def convolution2d(self, image): - """ - 2d convolution + """2d convolution. :param image: 2d numpy array, image to be convolved :return: convolved image, 2d numpy array """ - image_conv = ndimage.filters.gaussian_filter(image, self._sigma, mode='nearest', truncate=self._truncation) + image_conv = ndimage.filters.gaussian_filter( + image, self._sigma, mode="nearest", truncate=self._truncation + ) return image_conv @export class MGEConvolution(object): - """ - approximates a 2d kernel with an azimuthal Multi-Gaussian expansion - """ + """Approximates a 2d kernel with an azimuthal Multi-Gaussian expansion.""" + def __init__(self, kernel, pixel_scale, order=1): """ @@ -349,8 +401,12 @@ def __init__(self, kernel, pixel_scale, order=1): """ amps, sigmas, norm = kernel_util.mge_kernel(kernel, order=order) # make instance o MultiGaussian convolution kernel - self._mge_conv = MultiGaussianConvolution(sigma_list=sigmas*pixel_scale, fraction_list=np.array(amps) / np.sum(amps), - pixel_scale=pixel_scale, truncation=4) + self._mge_conv = MultiGaussianConvolution( + sigma_list=sigmas * pixel_scale, + fraction_list=np.array(amps) / np.sum(amps), + pixel_scale=pixel_scale, + truncation=4, + ) self._kernel = kernel # store difference between MGE approximation and real kernel diff --git a/lenstronomy/ImSim/Numerics/grid.py b/lenstronomy/ImSim/Numerics/grid.py index 8f0ffa04a..e678668cc 100644 --- a/lenstronomy/ImSim/Numerics/grid.py +++ b/lenstronomy/ImSim/Numerics/grid.py @@ -4,16 +4,25 @@ from lenstronomy.Data.coord_transforms import Coordinates1D from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class AdaptiveGrid(Coordinates1D): - """ - manages a super-sampled grid on the partial image - """ - def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_indexes, - supersampling_factor, flux_evaluate_indexes=None): + """Manages a super-sampled grid on the partial image.""" + + def __init__( + self, + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampling_indexes, + supersampling_factor, + flux_evaluate_indexes=None, + ): """ :param nx: number of pixels in x-axis @@ -34,7 +43,9 @@ def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersa flux_evaluate_indexes = np.ones_like(self._x_grid, dtype=bool) supersampled_indexes1d = util.image2array(supersampling_indexes) self._high_res_indexes1d = (supersampled_indexes1d) & (flux_evaluate_indexes) - self._low_res_indexes1d = (np.invert(supersampled_indexes1d)) & (flux_evaluate_indexes) + self._low_res_indexes1d = (np.invert(supersampled_indexes1d)) & ( + flux_evaluate_indexes + ) self._supersampling_factor = supersampling_factor self._num_sub = supersampling_factor * supersampling_factor self._x_low_res = self._x_grid[self._low_res_indexes1d] @@ -61,8 +72,8 @@ def flux_array2image_low_high(self, flux_array, high_res_return=True): (needs more computation and is only needed when convolution is performed on the supersampling level) :return: 2d array, 2d array, corresponding to (partial) images in low and high resolution (to be convolved) """ - low_res_values = flux_array[0:self._num_low_res] - high_res_values = flux_array[self._num_low_res:] + low_res_values = flux_array[0 : self._num_low_res] + high_res_values = flux_array[self._num_low_res :] image_low_res = self._merge_low_high_res(low_res_values, high_res_values) if high_res_return is True: image_high_res_partial = self._high_res_image(high_res_values) @@ -76,13 +87,12 @@ def _high_res_coordinates(self): :return: 1d arrays of subpixel grid coordinates """ - if not hasattr(self, '_x_sub_grid'): + if not hasattr(self, "_x_sub_grid"): self._subpixel_coordinates() return self._x_high_res, self._y_high_res def _merge_low_high_res(self, low_res_values, supersampled_values): - """ - adds/overwrites the supersampled values on the image + """Adds/overwrites the supersampled values on the image. :param low_res_values: 1d array of image with low resolution :param supersampled_values: values of the supersampled sub-pixels @@ -101,13 +111,19 @@ def _high_res_image(self, supersampled_values): :param supersampled_values: 1d array of supersampled values corresponding to coordinates :return: 2d array of supersampled image (zeros outside supersampled frame) """ - high_res = np.zeros((self._nx * self._supersampling_factor, self._ny * self._supersampling_factor)) + high_res = np.zeros( + ( + self._nx * self._supersampling_factor, + self._ny * self._supersampling_factor, + ) + ) count = 0 for i in range(self._supersampling_factor): for j in range(self._supersampling_factor): - selected = supersampled_values[count::self._num_sub] - high_res[i::self._supersampling_factor, j::self._supersampling_factor] = self._array2image_subset( - selected) + selected = supersampled_values[count :: self._num_sub] + high_res[ + i :: self._supersampling_factor, j :: self._supersampling_factor + ] = self._array2image_subset(selected) count += 1 return high_res @@ -123,19 +139,30 @@ def _subpixel_coordinates(self): count = 0 for i in range(self._supersampling_factor): for j in range(self._supersampling_factor): - x_ij = 1. / self._supersampling_factor / 2. + j / float(self._supersampling_factor) - 1. / 2 - y_ij = 1. / self._supersampling_factor / 2. + i / float(self._supersampling_factor) - 1. / 2 + x_ij = ( + 1.0 / self._supersampling_factor / 2.0 + + j / float(self._supersampling_factor) + - 1.0 / 2 + ) + y_ij = ( + 1.0 / self._supersampling_factor / 2.0 + + i / float(self._supersampling_factor) + - 1.0 / 2 + ) delta_ra, delta_dec = self.map_pix2coord(x_ij, y_ij) delta_ra0, delta_dec_0 = self.map_pix2coord(0, 0) - x_sub_grid[count::self._num_sub] = x_grid_select + delta_ra - delta_ra0 - y_sub_grid[count::self._num_sub] = y_grid_select + delta_dec - delta_dec_0 + x_sub_grid[count :: self._num_sub] = ( + x_grid_select + delta_ra - delta_ra0 + ) + y_sub_grid[count :: self._num_sub] = ( + y_grid_select + delta_dec - delta_dec_0 + ) count += 1 self._x_high_res = x_sub_grid self._y_high_res = y_sub_grid def _average_subgrid(self, subgrid_values): - """ - averages the values over a pixel + """Averages the values over a pixel. :param subgrid_values: values (e.g. flux) of subgrid coordinates :return: 1d array of size of the supersampled pixels @@ -144,11 +171,8 @@ def _average_subgrid(self, subgrid_values): return np.mean(values_2d, axis=1) def _array2image_subset(self, array): - """ - maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask indices - :param array: 1d array - :return: 2d array - """ + """Maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask + indices :param array: 1d array :return: 2d array.""" grid1d = np.zeros(self._nx * self._ny) grid1d[self._high_res_indexes1d] = array grid2d = util.array2image(grid1d, self._nx, self._ny) @@ -157,11 +181,18 @@ def _array2image_subset(self, array): @export class RegularGrid(Coordinates1D): - """ - manages a super-sampled grid on the partial image - """ - def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_factor=1, - flux_evaluate_indexes=None): + """Manages a super-sampled grid on the partial image.""" + + def __init__( + self, + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampling_factor=1, + flux_evaluate_indexes=None, + ): """ :param nx: number of pixels in x-axis @@ -182,9 +213,13 @@ def __init__(self, nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersa flux_evaluate_indexes = np.ones_like(self._x_grid, dtype=bool) else: flux_evaluate_indexes = util.image2array(flux_evaluate_indexes) - self._compute_indexes = self._subgrid_index(flux_evaluate_indexes, self._supersampling_factor, self._nx, self._ny) + self._compute_indexes = self._subgrid_index( + flux_evaluate_indexes, self._supersampling_factor, self._nx, self._ny + ) - x_grid_sub, y_grid_sub = util.make_subgrid(self._x_grid, self._y_grid, self._supersampling_factor) + x_grid_sub, y_grid_sub = util.make_subgrid( + self._x_grid, self._y_grid, self._supersampling_factor + ) self._ra_subgrid = x_grid_sub[self._compute_indexes] self._dec_subgrid = y_grid_sub[self._compute_indexes] @@ -198,19 +233,18 @@ def coordinates_evaluate(self): @property def grid_points_spacing(self): - """ - effective spacing between coordinate points, after supersampling - :return: sqrt(pixel_area)/supersampling_factor - """ + """Effective spacing between coordinate points, after supersampling :return: + sqrt(pixel_area)/supersampling_factor.""" return self.pixel_width / self._supersampling_factor @property def num_grid_points_axes(self): - """ - effective number of points along each axes, after supersampling - :return: number of pixels per axis, nx*supersampling_factor ny*supersampling_factor - """ - return self._nx * self._supersampling_factor, self._ny * self._supersampling_factor + """Effective number of points along each axes, after supersampling :return: + number of pixels per axis, nx*supersampling_factor ny*supersampling_factor.""" + return ( + self._nx * self._supersampling_factor, + self._ny * self._supersampling_factor, + ) @property def supersampling_factor(self): @@ -243,19 +277,22 @@ def _subgrid_index(idex_mask, subgrid_res, nx, ny): :return: 1d array of equivalent mask in subgrid resolution """ idex_sub = np.repeat(idex_mask, subgrid_res, axis=0) - idex_sub = util.array2image(idex_sub, nx=nx, ny=ny*subgrid_res) + idex_sub = util.array2image(idex_sub, nx=nx, ny=ny * subgrid_res) idex_sub = np.repeat(idex_sub, subgrid_res, axis=0) idex_sub = util.image2array(idex_sub) return idex_sub def _array2image(self, array): - """ - maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask indices + """Maps a 1d array into a (nx, ny) 2d grid with array populating the idex_mask + indices. :param array: 1d array :return: """ - nx, ny = self._nx * self._supersampling_factor, self._ny * self._supersampling_factor + nx, ny = ( + self._nx * self._supersampling_factor, + self._ny * self._supersampling_factor, + ) grid1d = np.zeros((nx * ny)) grid1d[self._compute_indexes] = array grid2d = util.array2image(grid1d, nx, ny) diff --git a/lenstronomy/ImSim/Numerics/numba_convolution.py b/lenstronomy/ImSim/Numerics/numba_convolution.py index adaf12d83..4a75f41ff 100644 --- a/lenstronomy/ImSim/Numerics/numba_convolution.py +++ b/lenstronomy/ImSim/Numerics/numba_convolution.py @@ -4,17 +4,25 @@ from lenstronomy.ImSim.Numerics.partial_image import PartialImage from lenstronomy.Util import image_util -__all__ = ['NumbaConvolution'] +__all__ = ["NumbaConvolution"] class NumbaConvolution(object): - """ - class to convolve explicit pixels only + """Class to convolve explicit pixels only. the convolution is inspired by pyautolens: https://github.com/Jammy2211/PyAutoLens """ - def __init__(self, kernel, conv_pixels, compute_pixels=None, nopython=True, cache=True, parallel=False, - memory_raise=True): + + def __init__( + self, + kernel, + conv_pixels, + compute_pixels=None, + nopython=True, + cache=True, + parallel=False, + memory_raise=True, + ): """ :param kernel: convolution kernel in units of the image pixels provided, odd length per axis @@ -47,20 +55,39 @@ def __init__(self, kernel, conv_pixels, compute_pixels=None, nopython=True, cach self.kernel_max_size = kernel_shape[0] * kernel_shape[1] image_index = 0 - if self._partialInput.num_partial * self.kernel_max_size > 10 ** 9 and self._memory_raise is True: - raise ValueError("kernel length %s combined with data size %s requires %s memory elements, which might" - "exceed the memory limit and thus gives a raise. If you wish to ignore this raise, set" - " memory_raise=False" % (self.kernel_max_size, self._partialInput.num_partial, self._partialInput.num_partial * self.kernel_max_size)) - self._image_frame_indexes = np.zeros((self._partialInput.num_partial, self.kernel_max_size), dtype='int') - self._image_frame_psfs = np.zeros((self._partialInput.num_partial, self.kernel_max_size)) - self._image_frame_lengths = np.zeros((self._partialInput.num_partial), dtype='int') + if ( + self._partialInput.num_partial * self.kernel_max_size > 10**9 + and self._memory_raise is True + ): + raise ValueError( + "kernel length %s combined with data size %s requires %s memory elements, which might" + "exceed the memory limit and thus gives a raise. If you wish to ignore this raise, set" + " memory_raise=False" + % ( + self.kernel_max_size, + self._partialInput.num_partial, + self._partialInput.num_partial * self.kernel_max_size, + ) + ) + self._image_frame_indexes = np.zeros( + (self._partialInput.num_partial, self.kernel_max_size), dtype="int" + ) + self._image_frame_psfs = np.zeros( + (self._partialInput.num_partial, self.kernel_max_size) + ) + self._image_frame_lengths = np.zeros( + (self._partialInput.num_partial), dtype="int" + ) for x in range(index_array_input.shape[0]): for y in range(index_array_input.shape[1]): if conv_pixels[x][y]: - image_frame_psfs, image_frame_indexes, frame_length = self._pre_compute_frame_kernel((x, y), - self._kernel[:, :], - compute_pixels, - index_array_out) + ( + image_frame_psfs, + image_frame_indexes, + frame_length, + ) = self._pre_compute_frame_kernel( + (x, y), self._kernel[:, :], compute_pixels, index_array_out + ) self._image_frame_indexes[image_index, :] = image_frame_indexes self._image_frame_psfs[image_index, :] = image_frame_psfs @@ -68,17 +95,19 @@ def __init__(self, kernel, conv_pixels, compute_pixels=None, nopython=True, cach image_index += 1 def convolve2d(self, image): - """ - 2d convolution + """2d convolution. :param image: 2d numpy array, image to be convolved :return: convolved image, 2d numpy array """ image_array_partial = self._partialInput.partial_array(image) - conv_array = self._convolve_jit(image_array_partial, num_data=self._partialOutput.num_partial, - image_frame_kernels=self._image_frame_psfs, - image_frame_indexes=self._image_frame_indexes, - image_frame_lengths=self._image_frame_lengths) + conv_array = self._convolve_jit( + image_array_partial, + num_data=self._partialOutput.num_partial, + image_frame_kernels=self._image_frame_psfs, + image_frame_indexes=self._image_frame_indexes, + image_frame_lengths=self._image_frame_lengths, + ) conv_image = self._partialOutput.image_from_partial(conv_array) return conv_image @@ -103,8 +132,8 @@ def _pre_compute_frame_kernel(image_index, kernel, mask, index_array): kx2 = int((kx - 1) / 2) ky2 = int((ky - 1) / 2) frame_counter = 0 - frame_kernels = np.zeros(kx*ky) - frame_indexes = np.zeros(kx*ky) + frame_kernels = np.zeros(kx * ky) + frame_indexes = np.zeros(kx * ky) for i in range(kx): for j in range(ky): @@ -119,7 +148,13 @@ def _pre_compute_frame_kernel(image_index, kernel, mask, index_array): @staticmethod @numba_util.jit() - def _convolve_jit(image_array, num_data, image_frame_kernels, image_frame_indexes, image_frame_lengths): + def _convolve_jit( + image_array, + num_data, + image_frame_kernels, + image_frame_indexes, + image_frame_lengths, + ): """ :param image_array: selected subset of image in 1d array conventions @@ -129,25 +164,46 @@ def _convolve_jit(image_array, num_data, image_frame_kernels, image_frame_indexe :return: """ conv_array = np.zeros(num_data) - for image_index in range(len(image_array)): # loop through pixels that are to be blurred + for image_index in range( + len(image_array) + ): # loop through pixels that are to be blurred value = image_array[image_index] # value of pixel that gets blurred - frame_length = image_frame_lengths[image_index] # number of pixels that gets assigned a fraction of the convolution - frame_indexes = image_frame_indexes[image_index] # list of 1d indexes that get added flux from the blurred image - frame_kernels = image_frame_kernels[image_index] # values of kernel for each frame indexes - for kernel_index in range(frame_length): # loop through all pixels that are impacted by the kernel of the pixel being blurred - vector_index = frame_indexes[kernel_index] # 1d coordinate of pixel to be added value + frame_length = image_frame_lengths[ + image_index + ] # number of pixels that gets assigned a fraction of the convolution + frame_indexes = image_frame_indexes[ + image_index + ] # list of 1d indexes that get added flux from the blurred image + frame_kernels = image_frame_kernels[ + image_index + ] # values of kernel for each frame indexes + for kernel_index in range( + frame_length + ): # loop through all pixels that are impacted by the kernel of the pixel being blurred + vector_index = frame_indexes[ + kernel_index + ] # 1d coordinate of pixel to be added value kernel = frame_kernels[kernel_index] # kernel response of pixel conv_array[vector_index] += value * kernel # ad value to pixel return conv_array class SubgridNumbaConvolution(object): - """ - class that inputs a supersampled grid and convolution kernel and computes the response on the regular grid - This makes use of the regualr NumbaConvolution class as a loop through the different sub-pixel positions - """ + """Class that inputs a supersampled grid and convolution kernel and computes the + response on the regular grid This makes use of the regualr NumbaConvolution class as + a loop through the different sub-pixel positions.""" - def __init__(self, kernel_super, supersampling_factor, conv_pixels, compute_pixels=None, kernel_size=None, nopython=True, cache=True, parallel=False): + def __init__( + self, + kernel_super, + supersampling_factor, + conv_pixels, + compute_pixels=None, + kernel_size=None, + nopython=True, + cache=True, + parallel=False, + ): """ :param kernel_super: convolution kernel in units of super sampled pixels provided, odd length per axis @@ -172,7 +228,14 @@ def __init__(self, kernel_super, supersampling_factor, conv_pixels, compute_pixe kernel = self._partial_kernel(kernel_super, i, j) if kernel_size is not None: kernel = image_util.cut_edges(kernel, kernel_size) - numba_conv = NumbaConvolution(kernel, conv_pixels, compute_pixels=compute_pixels, nopython=nopython, cache=cache, parallel=parallel) + numba_conv = NumbaConvolution( + kernel, + conv_pixels, + compute_pixels=compute_pixels, + nopython=nopython, + cache=cache, + parallel=parallel, + ) self._numba_conv_list.append(numba_conv) def convolve2d(self, image_high_res): @@ -198,7 +261,9 @@ def _partial_image(self, image_high_res, i, j): :param j: index of super-sampled position in second axis :return: 2d array only selected the specific supersampled position within a regular pixel """ - return image_high_res[i::self._supersampling_factor, j::self._supersampling_factor] + return image_high_res[ + i :: self._supersampling_factor, j :: self._supersampling_factor + ] def _partial_kernel(self, kernel_super, i, j): """ @@ -217,7 +282,9 @@ def _partial_kernel(self, kernel_super, i, j): delta = int((n_match - n - self._supersampling_factor) / 2) + 1 i0 = delta # index where to start kernel for i=0 j0 = delta # index where to start kernel for j=0 (should be symmetric) - kernel_super_match[i0 + i:i0 + i + n, j0 + j:j0 + j + n] = kernel_super - #kernel_super_match = image_util.cut_edges(kernel_super_match, numPix=n) - kernel = image_util.re_size(kernel_super_match, factor=self._supersampling_factor) + kernel_super_match[i0 + i : i0 + i + n, j0 + j : j0 + j + n] = kernel_super + # kernel_super_match = image_util.cut_edges(kernel_super_match, numPix=n) + kernel = image_util.re_size( + kernel_super_match, factor=self._supersampling_factor + ) return kernel diff --git a/lenstronomy/ImSim/Numerics/numerics.py b/lenstronomy/ImSim/Numerics/numerics.py index 09a12fdbe..6afa07e28 100644 --- a/lenstronomy/ImSim/Numerics/numerics.py +++ b/lenstronomy/ImSim/Numerics/numerics.py @@ -1,22 +1,39 @@ from lenstronomy.ImSim.Numerics.grid import RegularGrid, AdaptiveGrid -from lenstronomy.ImSim.Numerics.convolution import SubgridKernelConvolution, PixelKernelConvolution, MultiGaussianConvolution +from lenstronomy.ImSim.Numerics.convolution import ( + SubgridKernelConvolution, + PixelKernelConvolution, + MultiGaussianConvolution, +) from lenstronomy.ImSim.Numerics.point_source_rendering import PointSourceRendering from lenstronomy.Util import util from lenstronomy.Util import kernel_util import numpy as np -__all__ = ['Numerics'] +__all__ = ["Numerics"] class Numerics(PointSourceRendering): - """ - this classes manages the numerical options and computations of an image. + """This classes manages the numerical options and computations of an image. + The class has two main functions, re_size_convolve() and coordinates_evaluate() """ - def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regular', supersampling_convolution=False, - supersampling_kernel_size=5, flux_evaluate_indexes=None, supersampled_indexes=None, - compute_indexes=None, point_source_supersampling_factor=1, convolution_kernel_size=None, - convolution_type='fft_static', truncation=4): + + def __init__( + self, + pixel_grid, + psf, + supersampling_factor=1, + compute_mode="regular", + supersampling_convolution=False, + supersampling_kernel_size=5, + flux_evaluate_indexes=None, + supersampled_indexes=None, + compute_indexes=None, + point_source_supersampling_factor=1, + convolution_kernel_size=None, + convolution_type="fft_static", + truncation=4, + ): """ :param pixel_grid: PixelGrid() class instance @@ -41,12 +58,17 @@ def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regula :param convolution_kernel_size: int, odd number, size of convolution kernel. If None, takes size of point_source_kernel :param convolution_type: string, 'fft', 'grid', 'fft_static' mode of 2d convolution """ - if compute_mode not in ['regular', 'adaptive']: - raise ValueError('compute_mode specified as %s not valid. Options are "adaptive", "regular"') + if compute_mode not in ["regular", "adaptive"]: + raise ValueError( + 'compute_mode specified as %s not valid. Options are "adaptive", "regular"' + ) # if no super sampling, turn the supersampling convolution off self._psf_type = psf.psf_type if not isinstance(supersampling_factor, int): - raise TypeError('supersampling_factor needs to be an integer! Current type is %s' % type(supersampling_factor)) + raise TypeError( + "supersampling_factor needs to be an integer! Current type is %s" + % type(supersampling_factor) + ) if supersampling_factor == 1: supersampling_convolution = False self._pixel_width = pixel_grid.pixel_width @@ -55,51 +77,104 @@ def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regula ra_at_xy_0, dec_at_xy_0 = pixel_grid.radec_at_xy_0 if supersampled_indexes is None: supersampled_indexes = np.zeros((nx, ny), dtype=bool) - if compute_mode == 'adaptive': # or (compute_mode == 'regular' and supersampling_convolution is False and supersampling_factor > 1): - self._grid = AdaptiveGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampled_indexes, - supersampling_factor, flux_evaluate_indexes) + if ( + compute_mode == "adaptive" + ): # or (compute_mode == 'regular' and supersampling_convolution is False and supersampling_factor > 1): + self._grid = AdaptiveGrid( + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampled_indexes, + supersampling_factor, + flux_evaluate_indexes, + ) else: - self._grid = RegularGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_factor, - flux_evaluate_indexes) - if self._psf_type == 'PIXEL': - if compute_mode == 'adaptive' and supersampling_convolution is True: - from lenstronomy.ImSim.Numerics.adaptive_numerics import AdaptiveConvolution - kernel_super = psf.kernel_point_source_supersampled(supersampling_factor) - kernel_super = self._supersampling_cut_kernel(kernel_super, convolution_kernel_size, supersampling_factor) - self._conv = AdaptiveConvolution(kernel_super, supersampling_factor, - conv_supersample_pixels=supersampled_indexes, - supersampling_kernel_size=supersampling_kernel_size, - compute_pixels=compute_indexes, nopython=True, cache=True, parallel=False) - - elif compute_mode == 'regular' and supersampling_convolution is True: - kernel_super = psf.kernel_point_source_supersampled(supersampling_factor) + self._grid = RegularGrid( + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampling_factor, + flux_evaluate_indexes, + ) + if self._psf_type == "PIXEL": + if compute_mode == "adaptive" and supersampling_convolution is True: + from lenstronomy.ImSim.Numerics.adaptive_numerics import ( + AdaptiveConvolution, + ) + + kernel_super = psf.kernel_point_source_supersampled( + supersampling_factor + ) + kernel_super = self._supersampling_cut_kernel( + kernel_super, convolution_kernel_size, supersampling_factor + ) + self._conv = AdaptiveConvolution( + kernel_super, + supersampling_factor, + conv_supersample_pixels=supersampled_indexes, + supersampling_kernel_size=supersampling_kernel_size, + compute_pixels=compute_indexes, + nopython=True, + cache=True, + parallel=False, + ) + + elif compute_mode == "regular" and supersampling_convolution is True: + kernel_super = psf.kernel_point_source_supersampled( + supersampling_factor + ) if convolution_kernel_size is not None: - kernel_super = psf.kernel_point_source_supersampled(supersampling_factor) - kernel_super = self._supersampling_cut_kernel(kernel_super, convolution_kernel_size, - supersampling_factor) - self._conv = SubgridKernelConvolution(kernel_super, supersampling_factor, - supersampling_kernel_size=supersampling_kernel_size, - convolution_type=convolution_type) + kernel_super = psf.kernel_point_source_supersampled( + supersampling_factor + ) + kernel_super = self._supersampling_cut_kernel( + kernel_super, convolution_kernel_size, supersampling_factor + ) + self._conv = SubgridKernelConvolution( + kernel_super, + supersampling_factor, + supersampling_kernel_size=supersampling_kernel_size, + convolution_type=convolution_type, + ) else: kernel = psf.kernel_point_source - kernel = self._supersampling_cut_kernel(kernel, convolution_kernel_size, - supersampling_factor=1) - self._conv = PixelKernelConvolution(kernel, convolution_type=convolution_type) - - elif self._psf_type == 'GAUSSIAN': + kernel = self._supersampling_cut_kernel( + kernel, convolution_kernel_size, supersampling_factor=1 + ) + self._conv = PixelKernelConvolution( + kernel, convolution_type=convolution_type + ) + + elif self._psf_type == "GAUSSIAN": pixel_scale = pixel_grid.pixel_width fwhm = psf.fwhm # FWHM in units of angle sigma = util.fwhm2sigma(fwhm) sigma_list = [sigma] fraction_list = [1] - self._conv = MultiGaussianConvolution(sigma_list, fraction_list, pixel_scale, supersampling_factor, - supersampling_convolution, truncation=truncation) - elif self._psf_type == 'NONE': + self._conv = MultiGaussianConvolution( + sigma_list, + fraction_list, + pixel_scale, + supersampling_factor, + supersampling_convolution, + truncation=truncation, + ) + elif self._psf_type == "NONE": self._conv = None else: - raise ValueError('psf_type %s not valid! Chose either NONE, GAUSSIAN or PIXEL.' % self._psf_type) - super(Numerics, self).__init__(pixel_grid=pixel_grid, supersampling_factor=point_source_supersampling_factor, - psf=psf) + raise ValueError( + "psf_type %s not valid! Chose either NONE, GAUSSIAN or PIXEL." + % self._psf_type + ) + super(Numerics, self).__init__( + pixel_grid=pixel_grid, + supersampling_factor=point_source_supersampling_factor, + psf=psf, + ) if supersampling_convolution is True: self._high_res_return = True else: @@ -113,14 +188,17 @@ def re_size_convolve(self, flux_array, unconvolved=False): :return: convolved image on regular pixel grid, 2d array """ # add supersampled region to lower resolution on - image_low_res, image_high_res_partial = self._grid.flux_array2image_low_high(flux_array, - high_res_return=self._high_res_return) - if unconvolved is True or self._psf_type == 'NONE': + image_low_res, image_high_res_partial = self._grid.flux_array2image_low_high( + flux_array, high_res_return=self._high_res_return + ) + if unconvolved is True or self._psf_type == "NONE": image_conv = image_low_res else: # convolve low res grid and high res grid - image_conv = self._conv.re_size_convolve(image_low_res, image_high_res_partial) - return image_conv * self._pixel_width ** 2 + image_conv = self._conv.re_size_convolve( + image_low_res, image_high_res_partial + ) + return image_conv * self._pixel_width**2 @property def grid_supersampling_factor(self): @@ -139,7 +217,9 @@ def coordinates_evaluate(self): return self._grid.coordinates_evaluate @staticmethod - def _supersampling_cut_kernel(kernel_super, convolution_kernel_size, supersampling_factor): + def _supersampling_cut_kernel( + kernel_super, convolution_kernel_size, supersampling_factor + ): """ :param kernel_super: super-sampled kernel diff --git a/lenstronomy/ImSim/Numerics/numerics_subframe.py b/lenstronomy/ImSim/Numerics/numerics_subframe.py index 57d9a9bb9..c41421080 100644 --- a/lenstronomy/ImSim/Numerics/numerics_subframe.py +++ b/lenstronomy/ImSim/Numerics/numerics_subframe.py @@ -3,19 +3,30 @@ from lenstronomy.ImSim.Numerics.point_source_rendering import PointSourceRendering from lenstronomy.Data.pixel_grid import PixelGrid -__all__ = ['NumericsSubFrame'] +__all__ = ["NumericsSubFrame"] class NumericsSubFrame(PointSourceRendering): - """ - This class finds the optimal rectangular sub-frame of a data to be modelled that contains all the - flux_evaluate_indexes and performs the numerical calculations only in this frame and then patches zeros around it - to match the full data size. - """ - def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regular', supersampling_convolution=False, - supersampling_kernel_size=5, flux_evaluate_indexes=None, supersampled_indexes=None, - compute_indexes=None, point_source_supersampling_factor=1, convolution_kernel_size=None, - convolution_type='fft_static', truncation=4): + """This class finds the optimal rectangular sub-frame of a data to be modelled that + contains all the flux_evaluate_indexes and performs the numerical calculations only + in this frame and then patches zeros around it to match the full data size.""" + + def __init__( + self, + pixel_grid, + psf, + supersampling_factor=1, + compute_mode="regular", + supersampling_convolution=False, + supersampling_kernel_size=5, + flux_evaluate_indexes=None, + supersampled_indexes=None, + compute_indexes=None, + point_source_supersampling_factor=1, + convolution_kernel_size=None, + convolution_type="fft_static", + truncation=4, + ): """ :param pixel_grid: PixelGrid() class instance @@ -45,18 +56,26 @@ def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regula self._nx, self._ny = pixel_grid.num_pixel_axes self._init_sub_frame(flux_evaluate_indexes) pixel_grid_sub = self._sub_pixel_grid(pixel_grid) - self._numerics_subframe = Numerics(pixel_grid=pixel_grid_sub, psf=psf, - supersampling_factor=supersampling_factor, compute_mode=compute_mode, - supersampling_convolution=supersampling_convolution, - supersampling_kernel_size=supersampling_kernel_size, - flux_evaluate_indexes=self._cut_frame(flux_evaluate_indexes), - supersampled_indexes=self._cut_frame(supersampled_indexes), - compute_indexes=self._cut_frame(compute_indexes), - point_source_supersampling_factor=point_source_supersampling_factor, - convolution_kernel_size=convolution_kernel_size, - convolution_type=convolution_type, truncation=truncation) - super(NumericsSubFrame, self).__init__(pixel_grid=pixel_grid, supersampling_factor=point_source_supersampling_factor, - psf=psf) + self._numerics_subframe = Numerics( + pixel_grid=pixel_grid_sub, + psf=psf, + supersampling_factor=supersampling_factor, + compute_mode=compute_mode, + supersampling_convolution=supersampling_convolution, + supersampling_kernel_size=supersampling_kernel_size, + flux_evaluate_indexes=self._cut_frame(flux_evaluate_indexes), + supersampled_indexes=self._cut_frame(supersampled_indexes), + compute_indexes=self._cut_frame(compute_indexes), + point_source_supersampling_factor=point_source_supersampling_factor, + convolution_kernel_size=convolution_kernel_size, + convolution_type=convolution_type, + truncation=truncation, + ) + super(NumericsSubFrame, self).__init__( + pixel_grid=pixel_grid, + supersampling_factor=point_source_supersampling_factor, + psf=psf, + ) def re_size_convolve(self, flux_array, unconvolved=False): """ @@ -65,7 +84,9 @@ def re_size_convolve(self, flux_array, unconvolved=False): :return: convolved image on regular pixel grid, 2d array """ # add supersampled region to lower resolution on - image_sub_frame = self._numerics_subframe.re_size_convolve(flux_array, unconvolved=unconvolved) + image_sub_frame = self._numerics_subframe.re_size_convolve( + flux_array, unconvolved=unconvolved + ) return self._complete_frame(image_sub_frame) @property @@ -107,16 +128,16 @@ def _complete_frame(self, image_sub_frame): """ if self._subframe_calc is True: image = np.zeros((self._nx, self._ny)) - image[self._x_min_sub:self._x_max_sub + 1, self._y_min_sub:self._y_max_sub + 1] = image_sub_frame + image[ + self._x_min_sub : self._x_max_sub + 1, + self._y_min_sub : self._y_max_sub + 1, + ] = image_sub_frame else: image = image_sub_frame return image def _init_sub_frame(self, flux_evaluate_indexes): - """ - smaller frame that encloses all the idex_mask - :return: - """ + """Smaller frame that encloses all the idex_mask :return:""" if flux_evaluate_indexes is None: self._subframe_calc = False self._x_min_sub, self._y_min_sub = 0, 0 @@ -135,13 +156,15 @@ def _cut_frame(self, image): :return: 2d array of the sub-frame """ if self._subframe_calc is True and image is not None: - return image[self._x_min_sub:self._x_max_sub + 1, self._y_min_sub:self._y_max_sub + 1] + return image[ + self._x_min_sub : self._x_max_sub + 1, + self._y_min_sub : self._y_max_sub + 1, + ] else: return image def _sub_pixel_grid(self, pixel_grid): - """ - creates a PixelGrid instance covering the sub-frame area only + """Creates a PixelGrid instance covering the sub-frame area only. :param pixel_grid: PixelGrid instance of the full image :return: PixelGrid instance @@ -150,9 +173,16 @@ def _sub_pixel_grid(self, pixel_grid): transform_pix2angle = pixel_grid.transform_pix2angle nx_sub = self._x_max_sub - self._x_min_sub + 1 ny_sub = self._y_max_sub - self._y_min_sub + 1 - ra_at_xy_0_sub, dec_at_xy_0_sub = pixel_grid.map_pix2coord(self._x_min_sub, self._y_min_sub) - pixel_grid_sub = PixelGrid(nx=nx_sub, ny=ny_sub, transform_pix2angle=transform_pix2angle, - ra_at_xy_0=ra_at_xy_0_sub, dec_at_xy_0=dec_at_xy_0_sub) + ra_at_xy_0_sub, dec_at_xy_0_sub = pixel_grid.map_pix2coord( + self._x_min_sub, self._y_min_sub + ) + pixel_grid_sub = PixelGrid( + nx=nx_sub, + ny=ny_sub, + transform_pix2angle=transform_pix2angle, + ra_at_xy_0=ra_at_xy_0_sub, + dec_at_xy_0=dec_at_xy_0_sub, + ) else: pixel_grid_sub = pixel_grid return pixel_grid_sub diff --git a/lenstronomy/ImSim/Numerics/partial_image.py b/lenstronomy/ImSim/Numerics/partial_image.py index 1b732e875..e56572156 100644 --- a/lenstronomy/ImSim/Numerics/partial_image.py +++ b/lenstronomy/ImSim/Numerics/partial_image.py @@ -1,14 +1,13 @@ import numpy as np import lenstronomy.Util.util as util -__all__ = ['PartialImage'] +__all__ = ["PartialImage"] class PartialImage(object): - """ - class to deal with the use of partial slicing of a 2d data array, to be used for various computations where only - a subset of pixels need to be know. - """ + """Class to deal with the use of partial slicing of a 2d data array, to be used for + various computations where only a subset of pixels need to be know.""" + def __init__(self, partial_read_bools): """ @@ -34,9 +33,11 @@ def index_array(self): :return: 2d array with indexes (integers) corresponding to the 1d array, -1 when masked """ - if not hasattr(self, '_index_array'): + if not hasattr(self, "_index_array"): full_array = -1 * np.ones(len(self._partial_read_bools_array)) - num_array = np.linspace(start=0, stop=self.num_partial-1, num=self.num_partial) + num_array = np.linspace( + start=0, stop=self.num_partial - 1, num=self.num_partial + ) full_array[self._partial_read_bools_array] = num_array self._index_array = util.array2image(full_array, nx=self._nx, ny=self._ny) return self._index_array diff --git a/lenstronomy/ImSim/Numerics/point_source_rendering.py b/lenstronomy/ImSim/Numerics/point_source_rendering.py index b77f42e8d..a12e6ce3b 100644 --- a/lenstronomy/ImSim/Numerics/point_source_rendering.py +++ b/lenstronomy/ImSim/Numerics/point_source_rendering.py @@ -2,13 +2,12 @@ from lenstronomy.Util import kernel_util import numpy as np -__all__ = ['PointSourceRendering'] +__all__ = ["PointSourceRendering"] class PointSourceRendering(object): - """ - numerics to compute the point source response on an image - """ + """Numerics to compute the point source response on an image.""" + def __init__(self, pixel_grid, supersampling_factor, psf): """ @@ -32,24 +31,36 @@ def point_source_rendering(self, ra_pos, dec_pos, amp): subgrid = self._supersampling_factor x_pos, y_pos = self._pixel_grid.map_coord2pix(ra_pos, dec_pos) # translate coordinates to higher resolution grid - x_pos_subgird = x_pos * subgrid + (subgrid - 1) / 2. - y_pos_subgrid = y_pos * subgrid + (subgrid - 1) / 2. + x_pos_subgird = x_pos * subgrid + (subgrid - 1) / 2.0 + y_pos_subgrid = y_pos * subgrid + (subgrid - 1) / 2.0 kernel_point_source_subgrid = self._kernel_supersampled # initialize grid with higher resolution - subgrid2d = np.zeros((self._nx*subgrid, self._ny*subgrid)) + subgrid2d = np.zeros((self._nx * subgrid, self._ny * subgrid)) # add_layer2image if len(x_pos) > len(amp): - raise ValueError('there are %s images appearing but only %s amplitudes provided!' % (len(x_pos), len(amp))) + raise ValueError( + "there are %s images appearing but only %s amplitudes provided!" + % (len(x_pos), len(amp)) + ) for i in range(len(x_pos)): - subgrid2d = image_util.add_layer2image(subgrid2d, x_pos_subgird[i], y_pos_subgrid[i], amp[i] * kernel_point_source_subgrid) + subgrid2d = image_util.add_layer2image( + subgrid2d, + x_pos_subgird[i], + y_pos_subgrid[i], + amp[i] * kernel_point_source_subgrid, + ) # re-size grid to data resolution grid2d = image_util.re_size(subgrid2d, factor=subgrid) - return grid2d*subgrid**2 + return grid2d * subgrid**2 @property def _kernel_supersampled(self): - if not hasattr(self, '_kernel_supersampled_instance'): - self._kernel_supersampled_instance = self._psf.kernel_point_source_supersampled(self._supersampling_factor, updata_cache=False) + if not hasattr(self, "_kernel_supersampled_instance"): + self._kernel_supersampled_instance = ( + self._psf.kernel_point_source_supersampled( + self._supersampling_factor, updata_cache=False + ) + ) return self._kernel_supersampled_instance def psf_error_map(self, ra_pos, dec_pos, amp, data, fix_psf_error_map=False): @@ -71,6 +82,10 @@ def psf_error_map(self, ra_pos, dec_pos, amp, data, fix_psf_error_map=False): if fix_psf_error_map is True: amp_estimated = amp else: - amp_estimated = kernel_util.estimate_amp(data, x_pos[i], y_pos[i], psf_kernel) - error_map = image_util.add_layer2image(error_map, x_pos[i], y_pos[i], psf_error_map * amp_estimated ** 2) + amp_estimated = kernel_util.estimate_amp( + data, x_pos[i], y_pos[i], psf_kernel + ) + error_map = image_util.add_layer2image( + error_map, x_pos[i], y_pos[i], psf_error_map * amp_estimated**2 + ) return error_map diff --git a/lenstronomy/ImSim/__init__.py b/lenstronomy/ImSim/__init__.py index c3412e3c2..d34951c17 100644 --- a/lenstronomy/ImSim/__init__.py +++ b/lenstronomy/ImSim/__init__.py @@ -1,4 +1,4 @@ -__author__ = 'Simon Birrer' -__email__ = 'sibirrer@gmail.com' -__version__ = '0.1.0' -__credits__ = 'ETH Zurich, UCLA' \ No newline at end of file +__author__ = "Simon Birrer" +__email__ = "sibirrer@gmail.com" +__version__ = "0.1.0" +__credits__ = "ETH Zurich, UCLA" diff --git a/lenstronomy/ImSim/de_lens.py b/lenstronomy/ImSim/de_lens.py index 4844d9a99..82e4be62d 100644 --- a/lenstronomy/ImSim/de_lens.py +++ b/lenstronomy/ImSim/de_lens.py @@ -1,33 +1,34 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import sys from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def get_param_WLS(A, C_D_inv, d, inv_bool=True): - """ - returns the parameter values given + """Returns the parameter values given. :param A: response matrix Nd x Ns (Nd = # data points, Ns = # parameters) :param C_D_inv: inverse covariance matrix of the data, Nd x Nd, diagonal form :param d: data array, 1-d Nd - :param inv_bool: boolean, whether returning also the inverse matrix or just solve the linear system + :param inv_bool: boolean, whether returning also the inverse matrix or just solve + the linear system :return: 1-d array of parameter values """ M = A.T.dot(np.multiply(C_D_inv, A.T).T) if inv_bool: - if np.linalg.cond(M) < 5/sys.float_info.epsilon: + if np.linalg.cond(M) < 5 / sys.float_info.epsilon: M_inv = _stable_inv(M) else: M_inv = np.zeros_like(M) R = A.T.dot(np.multiply(C_D_inv, d)) B = M_inv.dot(R) else: - if np.linalg.cond(M) < 5/sys.float_info.epsilon: + if np.linalg.cond(M) < 5 / sys.float_info.epsilon: R = A.T.dot(np.multiply(C_D_inv, d)) B = _solve_stable(M, R) # try: @@ -43,8 +44,7 @@ def get_param_WLS(A, C_D_inv, d, inv_bool=True): @export def marginalisation_const(M_inv): - """ - get marginalisation constant 1/2 log(M_beta) for flat priors + """Get marginalisation constant 1/2 log(M_beta) for flat priors. :param M_inv: 2D covariance matrix :return: float @@ -52,8 +52,8 @@ def marginalisation_const(M_inv): sign, log_det = np.linalg.slogdet(M_inv) if sign == 0: - return -10**15 - return sign * log_det/2 + return -(10**15) + return sign * log_det / 2 @export @@ -73,14 +73,13 @@ def marginalization_new(M_inv, d_prior=None): v_abs[v_abs > d_prior**2] = d_prior**2 log_det = np.sum(np.log(v_abs)) * np.prod(sign_v) if np.isnan(log_det): - return -10**15 + return -(10**15) m = len(v) - return log_det / 2 + m/2. * np.log(np.pi/2.) - m * np.log(d_prior) + return log_det / 2 + m / 2.0 * np.log(np.pi / 2.0) - m * np.log(d_prior) def _stable_inv(m): - """ - stable linear inversion + """Stable linear inversion. :param m: square matrix to be inverted :return: inverse of M (or zeros) diff --git a/lenstronomy/ImSim/differential_extinction.py b/lenstronomy/ImSim/differential_extinction.py index 4111f2a98..df46f73db 100644 --- a/lenstronomy/ImSim/differential_extinction.py +++ b/lenstronomy/ImSim/differential_extinction.py @@ -1,13 +1,14 @@ from lenstronomy.LightModel.light_model import LightModel import numpy as np -__all__ = ['DifferentialExtinction'] +__all__ = ["DifferentialExtinction"] class DifferentialExtinction(object): - """ - class to compute an extinction (for a specific band/wavelength). This class uses the functionality available in - the LightModel module to describe an optical depth tau_ext to compute the extinction on the sky/image. + """Class to compute an extinction (for a specific band/wavelength). + + This class uses the functionality available in the LightModel module to describe an + optical depth tau_ext to compute the extinction on the sky/image. """ def __init__(self, optical_depth_model=None, tau0_index=0): @@ -28,7 +29,7 @@ def __init__(self, optical_depth_model=None, tau0_index=0): @property def compute_bool(self): """ - :return: True when a differential extinction is set, False otherwise + :return: True when a differential extinction is set, False otherwise """ return self._compute_bool @@ -44,7 +45,7 @@ def extinction(self, x, y, kwargs_extinction=None, kwargs_special=None): if self._compute_bool is False or kwargs_extinction is None: return 1 tau = self._profile.surface_brightness(x, y, kwargs_list=kwargs_extinction) - tau0_list = kwargs_special.get('tau0_list', None) + tau0_list = kwargs_special.get("tau0_list", None) if tau0_list is not None: tau0 = tau0_list[self._tau0_index] else: diff --git a/lenstronomy/ImSim/image2source_mapping.py b/lenstronomy/ImSim/image2source_mapping.py index d93582b93..6753ebb93 100644 --- a/lenstronomy/ImSim/image2source_mapping.py +++ b/lenstronomy/ImSim/image2source_mapping.py @@ -1,14 +1,14 @@ import numpy as np from lenstronomy.Cosmo.background import Background -__all__ = ['Image2SourceMapping'] +__all__ = ["Image2SourceMapping"] class Image2SourceMapping(object): - """ - this class handles multiple source planes and performs the computation of predicted surface brightness at given - image positions. - The class is enable to deal with an arbitrary number of different source planes. There are two different settings: + """This class handles multiple source planes and performs the computation of + predicted surface brightness at given image positions. The class is enable to deal + with an arbitrary number of different source planes. There are two different + settings: Single lens plane modelling: In case of a single deflector, lenstronomy models the reduced deflection angles @@ -42,18 +42,26 @@ def __init__(self, lensModel, sourceModel): if self._multi_lens_plane is True: if self._deflection_scaling_list is not None: - raise ValueError('deflection scaling for different source planes not possible in combination of ' - 'multi-lens plane modeling. You have to specify the redshifts of the sources instead.') + raise ValueError( + "deflection scaling for different source planes not possible in combination of " + "multi-lens plane modeling. You have to specify the redshifts of the sources instead." + ) self._bkg_cosmo = Background(lensModel.cosmo) if self._source_redshift_list is None: self._multi_source_plane = False elif len(self._source_redshift_list) != len(light_model_list): - raise ValueError("length of redshift_list must correspond to length of light_model_list") + raise ValueError( + "length of redshift_list must correspond to length of light_model_list" + ) elif np.max(self._source_redshift_list) > self._lensModel.z_source: - raise ValueError("redshift of source_redshift_list have to be smaler or equal to the one specified in " - "the lens model.") + raise ValueError( + "redshift of source_redshift_list have to be smaler or equal to the one specified in " + "the lens model." + ) else: - self._sorted_source_redshift_index = self._index_ordering(self._source_redshift_list) + self._sorted_source_redshift_index = self._index_ordering( + self._source_redshift_list + ) self._T0z_list = [] for z_stop in self._source_redshift_list: self._T0z_list.append(self._bkg_cosmo.T_xy(0, z_stop)) @@ -62,7 +70,12 @@ def __init__(self, lensModel, sourceModel): self._T_ij_end_list = [] for i, index_source in enumerate(self._sorted_source_redshift_index): z_stop = self._source_redshift_list[index_source] - T_ij_start, T_ij_end = self._lensModel.lens_model.transverse_distance_start_stop(z_start, z_stop, include_z_start=False) + ( + T_ij_start, + T_ij_end, + ) = self._lensModel.lens_model.transverse_distance_start_stop( + z_start, z_stop, include_z_start=False + ) self._T_ij_start_list.append(T_ij_start) self._T_ij_end_list.append(T_ij_end) z_start = z_stop @@ -70,7 +83,9 @@ def __init__(self, lensModel, sourceModel): if self._deflection_scaling_list is None: self._multi_source_plane = False elif len(self._deflection_scaling_list) != len(light_model_list): - raise ValueError('length of scale_factor_list must correspond to length of light_model_list!') + raise ValueError( + "length of scale_factor_list must correspond to length of light_model_list!" + ) def image2source(self, x, y, kwargs_lens, index_source): """ @@ -96,10 +111,14 @@ def image2source(self, x, y, kwargs_lens, index_source): z_stop = self._source_redshift_list[index_source] x_ = np.zeros_like(x) y_ = np.zeros_like(y) - x_comov, y_comov, alpha_x, alpha_y = self._lensModel.lens_model.ray_shooting_partial(x_, y_, x, y, - 0, z_stop, - kwargs_lens, - include_z_start=False) + ( + x_comov, + y_comov, + alpha_x, + alpha_y, + ) = self._lensModel.lens_model.ray_shooting_partial( + x_, y_, x, y, 0, z_stop, kwargs_lens, include_z_start=False + ) T_z = self._T0z_list[index_source] x_source = x_comov / T_z @@ -118,7 +137,9 @@ def image_flux_joint(self, x, y, kwargs_lens, kwargs_source, k=None): """ if self._multi_source_plane is False: x_source, y_source = self._lensModel.ray_shooting(x, y, kwargs_lens) - return self._lightModel.surface_brightness(x_source, y_source, kwargs_source, k=k) + return self._lightModel.surface_brightness( + x_source, y_source, kwargs_source, k=k + ) else: flux = np.zeros_like(x) if self._multi_lens_plane is False: @@ -127,8 +148,10 @@ def image_flux_joint(self, x, y, kwargs_lens, kwargs_source, k=None): scale_factor = self._deflection_scaling_list[i] x_source = x - x_alpha * scale_factor y_source = y - y_alpha * scale_factor - if k is None or k ==i: - flux += self._lightModel.surface_brightness(x_source, y_source, kwargs_source, k=i) + if k is None or k == i: + flux += self._lightModel.surface_brightness( + x_source, y_source, kwargs_source, k=i + ) else: x_comov = np.zeros_like(x) y_comov = np.zeros_like(y) @@ -140,15 +163,31 @@ def image_flux_joint(self, x, y, kwargs_lens, kwargs_source, k=None): if z_stop > z_start: T_ij_start = self._T_ij_start_list[i] T_ij_end = self._T_ij_end_list[i] - x_comov, y_comov, alpha_x, alpha_y = self._lensModel.lens_model.ray_shooting_partial(x_comov, y_comov, alpha_x, alpha_y, z_start, z_stop, - kwargs_lens, include_z_start=False, - T_ij_start=T_ij_start, T_ij_end=T_ij_end) + ( + x_comov, + y_comov, + alpha_x, + alpha_y, + ) = self._lensModel.lens_model.ray_shooting_partial( + x_comov, + y_comov, + alpha_x, + alpha_y, + z_start, + z_stop, + kwargs_lens, + include_z_start=False, + T_ij_start=T_ij_start, + T_ij_end=T_ij_end, + ) T_z = self._T0z_list[index_source] x_source = x_comov / T_z y_source = y_comov / T_z if k is None or k == i: - flux += self._lightModel.surface_brightness(x_source, y_source, kwargs_source, k=index_source) + flux += self._lightModel.surface_brightness( + x_source, y_source, kwargs_source, k=index_source + ) z_start = z_stop return flux @@ -173,7 +212,9 @@ def image_flux_split(self, x, y, kwargs_lens, kwargs_source): scale_factor = self._deflection_scaling_list[i] x_source = x - x_alpha * scale_factor y_source = y - y_alpha * scale_factor - response_i, n_i = self._lightModel.functions_split(x_source, y_source, kwargs_source, k=i) + response_i, n_i = self._lightModel.functions_split( + x_source, y_source, kwargs_source, k=i + ) response += response_i n += n_i else: @@ -188,14 +229,29 @@ def image_flux_split(self, x, y, kwargs_lens, kwargs_source): if z_stop > z_start: T_ij_start = self._T_ij_start_list[i] T_ij_end = self._T_ij_end_list[i] - x_comov, y_comov, alpha_x, alpha_y = self._lensModel.lens_model.ray_shooting_partial(x_comov, - y_comov, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, - include_z_start=False, T_ij_start=T_ij_start, - T_ij_end=T_ij_end) + ( + x_comov, + y_comov, + alpha_x, + alpha_y, + ) = self._lensModel.lens_model.ray_shooting_partial( + x_comov, + y_comov, + alpha_x, + alpha_y, + z_start, + z_stop, + kwargs_lens, + include_z_start=False, + T_ij_start=T_ij_start, + T_ij_end=T_ij_end, + ) T_z = self._T0z_list[index_source] x_source = x_comov / T_z y_source = y_comov / T_z - response_i, n_i = self._lightModel.functions_split(x_source, y_source, kwargs_source, k=index_source) + response_i, n_i = self._lightModel.functions_split( + x_source, y_source, kwargs_source, k=index_source + ) n_i_list.append(n_i) response += response_i n += n_i @@ -235,6 +291,8 @@ def _re_order_split(self, response, n_list): for i, index in enumerate(self._sorted_source_redshift_index): n_i = n_list[index] n_sum = n_sum_list_regular[index] - reshuffled[n_sum:n_sum + n_i] = response[n_sum_sorted:n_sum_sorted + n_i] + reshuffled[n_sum : n_sum + n_i] = response[ + n_sum_sorted : n_sum_sorted + n_i + ] n_sum_sorted += n_i return reshuffled diff --git a/lenstronomy/ImSim/image_linear_solve.py b/lenstronomy/ImSim/image_linear_solve.py index 2b788bc70..5c3290a42 100644 --- a/lenstronomy/ImSim/image_linear_solve.py +++ b/lenstronomy/ImSim/image_linear_solve.py @@ -4,21 +4,32 @@ from lenstronomy.ImSim.Numerics.convolution import PixelKernelConvolution import numpy as np -__all__ = ['ImageLinearFit'] +__all__ = ["ImageLinearFit"] class ImageLinearFit(ImageModel): + """Linear version class, inherits ImageModel. + + When light models use pixel-based profile types, such as 'SLIT_STARLETS', the WLS + linear inversion is replaced by the regularized inversion performed by an external + solver. The current pixel-based solver is provided by the SLITronomy plug-in. """ - linear version class, inherits ImageModel. - - When light models use pixel-based profile types, such as 'SLIT_STARLETS', - the WLS linear inversion is replaced by the regularized inversion performed by an external solver. - The current pixel-based solver is provided by the SLITronomy plug-in. - """ - def __init__(self, data_class, psf_class=None, lens_model_class=None, source_model_class=None, - lens_light_model_class=None, point_source_class=None, extinction_class=None, - kwargs_numerics=None, likelihood_mask=None, - psf_error_map_bool_list=None, kwargs_pixelbased=None, linear_solver=True): + + def __init__( + self, + data_class, + psf_class=None, + lens_model_class=None, + source_model_class=None, + lens_light_model_class=None, + point_source_class=None, + extinction_class=None, + kwargs_numerics=None, + likelihood_mask=None, + psf_error_map_bool_list=None, + kwargs_pixelbased=None, + linear_solver=True, + ): """ :param data_class: ImageData() instance @@ -37,14 +48,22 @@ def __init__(self, data_class, psf_class=None, lens_model_class=None, source_mod :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such that they get overwritten by the linear solver solution. """ - super(ImageLinearFit, self).__init__(data_class, psf_class=psf_class, lens_model_class=lens_model_class, - source_model_class=source_model_class, - lens_light_model_class=lens_light_model_class, - point_source_class=point_source_class, extinction_class=extinction_class, - kwargs_numerics=kwargs_numerics, kwargs_pixelbased=kwargs_pixelbased) + super(ImageLinearFit, self).__init__( + data_class, + psf_class=psf_class, + lens_model_class=lens_model_class, + source_model_class=source_model_class, + lens_light_model_class=lens_light_model_class, + point_source_class=point_source_class, + extinction_class=extinction_class, + kwargs_numerics=kwargs_numerics, + kwargs_pixelbased=kwargs_pixelbased, + ) self._linear_solver = linear_solver if psf_error_map_bool_list is None: - psf_error_map_bool_list = [True] * len(self.PointSource.point_source_type_list) + psf_error_map_bool_list = [True] * len( + self.PointSource.point_source_type_list + ) self._psf_error_map_bool_list = psf_error_map_bool_list if likelihood_mask is None: likelihood_mask = np.ones_like(data_class.data) @@ -54,94 +73,186 @@ def __init__(self, data_class, psf_class=None, lens_model_class=None, source_mod # update the pixel-based solver with the likelihood mask self.PixelSolver.set_likelihood_mask(self.likelihood_mask) - # prepare to use fft convolution for the natwt linear solver - if self.Data.likelihood_method() == 'interferometry_natwt': - self._convolution = PixelKernelConvolution(kernel=self.PSF.kernel_point_source) - - def image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, inv_bool=False): - """ - - computes the image (lens and source surface brightness with a given lens model). - The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles) - However in case of pixel-based modelling, pixel values are constrained by an external solver (e.g. SLITronomy). - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix. - :return: 2d array of surface brightness pixels of the optimal solution of the linear parameters to match the data - """ - return self._image_linear_solve(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, - kwargs_special, inv_bool=inv_bool) - - def _image_linear_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, inv_bool=False): - """ - - computes the image (lens and source surface brightness with a given lens model). - By default, the linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles) - However in case of pixel-based modelling, pixel values are constrained by an external solver (e.g. SLITronomy). - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix. - This has no impact in case of pixel-based modelling. - :return: 2d array of surface brightness pixels of the optimal solution of the linear parameters to match the data + # prepare to use fft convolution for the natwt linear solver + if self.Data.likelihood_method() == "interferometry_natwt": + self._convolution = PixelKernelConvolution( + kernel=self.PSF.kernel_point_source + ) + + def image_linear_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + inv_bool=False, + ): + """Computes the image (lens and source surface brightness with a given lens + model). The linear parameters are computed with a weighted linear least square + optimization (i.e. flux normalization of the brightness profiles) However in + case of pixel-based modelling, pixel values are constrained by an external + solver (e.g. SLITronomy). + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the + purpose of the covariance matrix. + :return: 2d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data + """ + return self._image_linear_solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + inv_bool=inv_bool, + ) + + def _image_linear_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + inv_bool=False, + ): + """Computes the image (lens and source surface brightness with a given lens + model). By default, the linear parameters are computed with a weighted linear + least square optimization (i.e. flux normalization of the brightness profiles) + However in case of pixel-based modelling, pixel values are constrained by an + external solver (e.g. SLITronomy). + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param inv_bool: if True, invert the full linear solver Matrix Ax = y for the + purpose of the covariance matrix. This has no impact in case of pixel-based + modelling. + :return: 2d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data """ if self._pixelbased_bool is True: - model, model_error, cov_param, param = self.image_pixelbased_solve(kwargs_lens, kwargs_source, - kwargs_lens_light, kwargs_ps, - kwargs_extinction, kwargs_special) - elif self.Data.likelihood_method() == 'diagonal': - A = self._linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, kwargs_special) - C_D_response, model_error = self._error_response(kwargs_lens, kwargs_ps, kwargs_special=kwargs_special) + model, model_error, cov_param, param = self.image_pixelbased_solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) + elif self.Data.likelihood_method() == "diagonal": + A = self._linear_response_matrix( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) + C_D_response, model_error = self._error_response( + kwargs_lens, kwargs_ps, kwargs_special=kwargs_special + ) d = self.data_response - param, cov_param, wls_model = de_lens.get_param_WLS(A.T, 1 / C_D_response, d, inv_bool=inv_bool) + param, cov_param, wls_model = de_lens.get_param_WLS( + A.T, 1 / C_D_response, d, inv_bool=inv_bool + ) model = self.array_masked2image(wls_model) - _, _, _, _ = self._update_linear_kwargs(param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) - elif self.Data.likelihood_method() == 'interferometry_natwt': - model, model_error, cov_param, param = self._image_linear_solve_interferometry_natwt(kwargs_lens, - kwargs_source, - kwargs_lens_light, - kwargs_ps, - kwargs_extinction, - kwargs_special) + _, _, _, _ = self._update_linear_kwargs( + param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) + elif self.Data.likelihood_method() == "interferometry_natwt": + ( + model, + model_error, + cov_param, + param, + ) = self._image_linear_solve_interferometry_natwt( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) else: - raise ValueError("likelihood_method %s not supported!" % self.Data.likelihood_method()) + raise ValueError( + "likelihood_method %s not supported!" % self.Data.likelihood_method() + ) return model, model_error, cov_param, param - def image_pixelbased_solve(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, - kwargs_ps=None, kwargs_extinction=None, kwargs_special=None, - init_lens_light_model=None): - """ - computes the image (lens and source surface brightness with a given lens model) using the pixel-based solver. - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles + def image_pixelbased_solve( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + init_lens_light_model=None, + ): + """Computes the image (lens and source surface brightness with a given lens + model) using the pixel-based solver. + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles :param kwargs_ps: keyword arguments corresponding to point sources :param kwargs_extinction: keyword arguments corresponding to dust extinction :param kwargs_special: keyword arguments corresponding to "special" parameters - :param init_lens_light_model: optional initial guess for the lens surface brightness - :return: 2d array of surface brightness pixels of the optimal solution of the linear parameters to match the data - """ - _, model_error = self._error_response(kwargs_lens, kwargs_ps, kwargs_special=kwargs_special) - model, param, _ = self.PixelSolver.solve(kwargs_lens, kwargs_source, kwargs_lens_light=kwargs_lens_light, - kwargs_ps=kwargs_ps, kwargs_special=kwargs_special, - init_lens_light_model=init_lens_light_model) + :param init_lens_light_model: optional initial guess for the lens surface + brightness + :return: 2d array of surface brightness pixels of the optimal solution of the + linear parameters to match the data + """ + _, model_error = self._error_response( + kwargs_lens, kwargs_ps, kwargs_special=kwargs_special + ) + model, param, _ = self.PixelSolver.solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_ps=kwargs_ps, + kwargs_special=kwargs_special, + init_lens_light_model=init_lens_light_model, + ) cov_param = None _, _ = self.update_pixel_kwargs(kwargs_source, kwargs_lens_light) - _, _, _, _ = self._update_linear_kwargs(param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + _, _, _, _ = self._update_linear_kwargs( + param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) return model, model_error, cov_param, param - def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None): - """ - computes the linear response matrix (m x n), with n being the data size and m being the coefficients + def linear_response_matrix( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + ): + """Computes the linear response matrix (m x n), with n being the data size and m + being the coefficients. :param kwargs_lens: lens model keyword argument list :param kwargs_source: extended source model keyword argument list @@ -151,14 +262,20 @@ def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_le :param kwargs_special: special keyword argument list :return: linear response matrix """ - A = self._linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, - kwargs_special) + A = self._linear_response_matrix( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) return A @property def data_response(self): - """ - returns the 1d array of the data element that is fitted for (including masking) + """Returns the 1d array of the data element that is fitted for (including + masking) :return: 1d numpy array """ @@ -166,103 +283,168 @@ def data_response(self): return d def error_response(self, kwargs_lens, kwargs_ps, kwargs_special): - """ - returns the 1d array of the error estimate corresponding to the data response + """Returns the 1d array of the error estimate corresponding to the data + response. - :return: 1d numpy array of response, 2d array of additional errors (e.g. point source uncertainties) + :return: 1d numpy array of response, 2d array of additional errors (e.g. point + source uncertainties) """ - return self._error_response(kwargs_lens, kwargs_ps, kwargs_special=kwargs_special) + return self._error_response( + kwargs_lens, kwargs_ps, kwargs_special=kwargs_special + ) def _error_response(self, kwargs_lens, kwargs_ps, kwargs_special): - """ - returns the 1d array of the error estimate corresponding to the data response + """Returns the 1d array of the error estimate corresponding to the data + response. - :return: 1d numpy array of response, 2d array of additional errors (e.g. point source uncertainties) + :return: 1d numpy array of response, 2d array of additional errors (e.g. point + source uncertainties) """ - model_error = self._error_map_model(kwargs_lens, kwargs_ps, kwargs_special=kwargs_special) + model_error = self._error_map_model( + kwargs_lens, kwargs_ps, kwargs_special=kwargs_special + ) # adding the uncertainties estimated from the data with the ones from the model C_D_response = self.image2array_masked(self.Data.C_D + model_error) return C_D_response, model_error - def likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, source_marg=False, linear_prior=None, - check_positive_flux=False, linear_solver=True): - """ - - computes the likelihood of the data given a model - This is specified with the non-linear parameters and a linear inversion and prior marginalisation. - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions + def likelihood_data_given_model( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + linear_solver=True, + ): + """Computes the likelihood of the data given a model This is specified with the + non-linear parameters and a linear inversion and prior marginalisation. + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions :param kwargs_extinction: :param kwargs_special: :param source_marg: bool, performs a marginalization over the linear parameters :param linear_prior: linear prior width in eigenvalues - :param check_positive_flux: bool, if True, checks whether the linear inversion resulted in non-negative flux - components and applies a punishment in the likelihood if so. - :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such - that they get overwritten by the linear solver solution. + :param check_positive_flux: bool, if True, checks whether the linear inversion + resulted in non-negative flux components and applies a punishment in the + likelihood if so. + :param linear_solver: bool, if True (default) fixes the linear amplitude + parameters 'amp' (avoid sampling) such that they get overwritten by the + linear solver solution. :return: log likelihood (natural logarithm) """ - return self._likelihood_data_given_model(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, - kwargs_extinction, kwargs_special, source_marg, - linear_prior=linear_prior, check_positive_flux=check_positive_flux, - linear_solver=linear_solver) - - def _likelihood_data_given_model(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, source_marg=False, linear_prior=None, - check_positive_flux=False, linear_solver=True): - """ - - computes the likelihood of the data given a model - This is specified with the non-linear parameters and a linear inversion and prior marginalisation. - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions + return self._likelihood_data_given_model( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + source_marg, + linear_prior=linear_prior, + check_positive_flux=check_positive_flux, + linear_solver=linear_solver, + ) + + def _likelihood_data_given_model( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + linear_solver=True, + ): + """Computes the likelihood of the data given a model This is specified with the + non-linear parameters and a linear inversion and prior marginalisation. + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions :param source_marg: bool, performs a marginalization over the linear parameters :param linear_prior: linear prior width in eigenvalues - :param check_positive_flux: bool, if True, checks whether the linear inversion resulted in non-negative flux - components and applies a punishment in the likelihood if so. - :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such - that they get overwritten by the linear solver solution. + :param check_positive_flux: bool, if True, checks whether the linear inversion + resulted in non-negative flux components and applies a punishment in the + likelihood if so. + :param linear_solver: bool, if True (default) fixes the linear amplitude + parameters 'amp' (avoid sampling) such that they get overwritten by the + linear solver solution. :return: log likelihood (natural logarithm) """ # generate image if linear_solver is False: - im_sim = self.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, - kwargs_special) + im_sim = self.image( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + ) cov_matrix = None - model_error = self._error_map_model(kwargs_lens, kwargs_ps=kwargs_ps, kwargs_special=kwargs_special) + model_error = self._error_map_model( + kwargs_lens, kwargs_ps=kwargs_ps, kwargs_special=kwargs_special + ) else: - im_sim, model_error, cov_matrix, param = self._image_linear_solve(kwargs_lens, kwargs_source, - kwargs_lens_light, kwargs_ps, - kwargs_extinction, kwargs_special, - inv_bool=source_marg) + im_sim, model_error, cov_matrix, param = self._image_linear_solve( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + inv_bool=source_marg, + ) # compute X^2 logL = self.Data.log_likelihood(im_sim, self.likelihood_mask, model_error) if self._pixelbased_bool is False: if cov_matrix is not None and source_marg: - marg_const = de_lens.marginalization_new(cov_matrix, d_prior=linear_prior) + marg_const = de_lens.marginalization_new( + cov_matrix, d_prior=linear_prior + ) logL += marg_const if check_positive_flux is True: - bool_ = self.check_positive_flux(kwargs_source, kwargs_lens_light, kwargs_ps) + bool_ = self.check_positive_flux( + kwargs_source, kwargs_lens_light, kwargs_ps + ) if bool_ is False: logL -= 10**8 return logL - def num_param_linear(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): + def num_param_linear( + self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): """ :return: number of linear coefficients to be solved for in the linear inversion """ - return self._num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + return self._num_param_linear( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) - def _num_param_linear(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): + def _num_param_linear( + self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): """ :return: number of linear coefficients to be solved for in the linear inversion @@ -274,11 +456,19 @@ def _num_param_linear(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwarg num += self.PointSource.num_basis(kwargs_ps, kwargs_lens) return num - def _linear_response_matrix(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, - kwargs_extinction=None, kwargs_special=None, unconvolved=False): - """ + def _linear_response_matrix( + self, + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + ): + """Computes the linear response matrix (m x n), with n being the data size and m + being the coefficients. - computes the linear response matrix (m x n), with n being the data size and m being the coefficients The calculation is done by - first (optional) computing differential extinctions - adding linear components of the lensed source(s) @@ -293,13 +483,22 @@ def _linear_response_matrix(self, kwargs_lens, kwargs_source, kwargs_lens_light, :return: response matrix (m x n) """ x_grid, y_grid = self.ImageNumerics.coordinates_evaluate - source_light_response, n_source = self.source_mapping.image_flux_split(x_grid, y_grid, kwargs_lens, - kwargs_source) - extinction = self._extinction.extinction(x_grid, y_grid, kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special) - lens_light_response, n_lens_light = self.LensLightModel.functions_split(x_grid, y_grid, kwargs_lens_light) - - ra_pos, dec_pos, amp, n_points = self.point_source_linear_response_set(kwargs_ps, kwargs_lens, kwargs_special, with_amp=False) + source_light_response, n_source = self.source_mapping.image_flux_split( + x_grid, y_grid, kwargs_lens, kwargs_source + ) + extinction = self._extinction.extinction( + x_grid, + y_grid, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + ) + lens_light_response, n_lens_light = self.LensLightModel.functions_split( + x_grid, y_grid, kwargs_lens_light + ) + + ra_pos, dec_pos, amp, n_points = self.point_source_linear_response_set( + kwargs_ps, kwargs_lens, kwargs_special, with_amp=False + ) num_param = n_points + n_lens_light + n_source num_response = self.num_data_evaluate @@ -308,11 +507,11 @@ def _linear_response_matrix(self, kwargs_lens, kwargs_source, kwargs_lens_light, # response of lensed source profile for i in range(0, n_source): image = source_light_response[i] - + # multiply with primary beam before convolution if self._pb is not None: image *= self._pb_1d - + image *= extinction image = self.ImageNumerics.re_size_convolve(image, unconvolved=unconvolved) A[n, :] = np.nan_to_num(self.image2array_masked(image), copy=False) @@ -320,64 +519,73 @@ def _linear_response_matrix(self, kwargs_lens, kwargs_source, kwargs_lens_light, # response of deflector light profile (or any other un-lensed extended components) for i in range(0, n_lens_light): image = lens_light_response[i] - + # multiply with primary beam before convolution if self._pb is not None: image *= self._pb_1d - + image = self.ImageNumerics.re_size_convolve(image, unconvolved=unconvolved) A[n, :] = np.nan_to_num(self.image2array_masked(image), copy=False) n += 1 # response of point sources for i in range(0, n_points): - # raise warnings when primary beam is attempted to be applied for point sources if self._pb is not None: raise Warning("Antenna primary beam does not apply to point sources!") - - image = self.ImageNumerics.point_source_rendering(ra_pos[i], dec_pos[i], amp[i]) + + image = self.ImageNumerics.point_source_rendering( + ra_pos[i], dec_pos[i], amp[i] + ) A[n, :] = np.nan_to_num(self.image2array_masked(image), copy=False) n += 1 return A * self._flux_scaling - def update_linear_kwargs(self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - - links linear parameters to kwargs arguments + def update_linear_kwargs( + self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): + """Links linear parameters to kwargs arguments. :param param: linear parameter vector corresponding to the response matrix :return: updated list of kwargs with linear parameter values """ - return self._update_linear_kwargs(param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + return self._update_linear_kwargs( + param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) - def _update_linear_kwargs(self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - - links linear parameters to kwargs arguments + def _update_linear_kwargs( + self, param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ): + """Links linear parameters to kwargs arguments. :param param: linear parameter vector corresponding to the response matrix :return: updated list of kwargs with linear parameter values """ i = 0 - kwargs_source, i = self.SourceModel.update_linear(param, i, kwargs_list=kwargs_source) - kwargs_lens_light, i = self.LensLightModel.update_linear(param, i, kwargs_list=kwargs_lens_light) + kwargs_source, i = self.SourceModel.update_linear( + param, i, kwargs_list=kwargs_source + ) + kwargs_lens_light, i = self.LensLightModel.update_linear( + param, i, kwargs_list=kwargs_lens_light + ) kwargs_ps, i = self.PointSource.update_linear(param, i, kwargs_ps, kwargs_lens) return kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps def linear_param_from_kwargs(self, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - inverse function of update_linear() returning the linear amplitude list for the keyword argument list + """Inverse function of update_linear() returning the linear amplitude list for + the keyword argument list. :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return: list of linear coefficients """ - return self._linear_param_from_kwargs(kwargs_source, kwargs_lens_light, kwargs_ps) + return self._linear_param_from_kwargs( + kwargs_source, kwargs_lens_light, kwargs_ps + ) def _linear_param_from_kwargs(self, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - inverse function of update_linear() returning the linear amplitude list for the keyword argument list + """Inverse function of update_linear() returning the linear amplitude list for + the keyword argument list. :param kwargs_source: :param kwargs_lens_light: @@ -391,28 +599,32 @@ def _linear_param_from_kwargs(self, kwargs_source, kwargs_lens_light, kwargs_ps) return param def update_pixel_kwargs(self, kwargs_source, kwargs_lens_light): - """ - - Update kwargs arguments for pixel-based profiles with fixed properties - such as their number of pixels, scale, and center coordinates (fixed to the origin). + """Update kwargs arguments for pixel-based profiles with fixed properties such + as their number of pixels, scale, and center coordinates (fixed to the origin). - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to the superposition of different lens light profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to the + superposition of different lens light profiles :return: updated kwargs_source and kwargs_lens_light """ # in case the source plane grid size has changed, update the kwargs accordingly ss_factor_source = self.SourceNumerics.grid_supersampling_factor - kwargs_source[0]['n_pixels'] = int(self.Data.num_pixel * ss_factor_source**2) #  effective number of pixels in source plane - kwargs_source[0]['scale'] = self.Data.pixel_width / ss_factor_source # effective pixel size of source plane grid + kwargs_source[0]["n_pixels"] = int( + self.Data.num_pixel * ss_factor_source**2 + ) #  effective number of pixels in source plane + kwargs_source[0]["scale"] = ( + self.Data.pixel_width / ss_factor_source + ) # effective pixel size of source plane grid # pixelated reconstructions have no well-defined center, we put it arbitrarily at (0, 0), center of the image - kwargs_source[0]['center_x'] = 0 - kwargs_source[0]['center_y'] = 0 + kwargs_source[0]["center_x"] = 0 + kwargs_source[0]["center_y"] = 0 # do the same if the lens light has been reconstructed if kwargs_lens_light is not None and len(kwargs_lens_light) > 0: - kwargs_lens_light[0]['n_pixels'] = self.Data.num_pixel - kwargs_lens_light[0]['scale'] = self.Data.pixel_width - kwargs_lens_light[0]['center_x'] = 0 - kwargs_lens_light[0]['center_y'] = 0 + kwargs_lens_light[0]["n_pixels"] = self.Data.num_pixel + kwargs_lens_light[0]["scale"] = self.Data.pixel_width + kwargs_lens_light[0]["center_x"] = 0 + kwargs_lens_light[0]["center_y"] = 0 return kwargs_source, kwargs_lens_light def reduced_residuals(self, model, error_map=0): @@ -424,26 +636,20 @@ def reduced_residuals(self, model, error_map=0): """ mask = self.likelihood_mask C_D = self.Data.C_D_model(model) - residual = (model - self.Data.data)/np.sqrt(C_D+np.abs(error_map))*mask + residual = (model - self.Data.data) / np.sqrt(C_D + np.abs(error_map)) * mask return residual def reduced_chi2(self, model, error_map=0): - """ - returns reduced chi2 - :param model: 2d numpy array of a model predicted image - :param error_map: same format as model, additional error component (such as PSF errors) - :return: reduced chi2 - """ + """Returns reduced chi2 :param model: 2d numpy array of a model predicted image + :param error_map: same format as model, additional error component (such as PSF + errors) :return: reduced chi2.""" norm_res = self.reduced_residuals(model, error_map) return np.sum(norm_res**2) / self.num_data_evaluate @property def num_data_evaluate(self): - """ - number of data points to be used in the linear solver - :return: number of evaluated data points - :rtype: int - """ + """Number of data points to be used in the linear solver :return: number of + evaluated data points :rtype: int.""" return int(np.sum(self.likelihood_mask)) def update_data(self, data_class): @@ -456,11 +662,9 @@ def update_data(self, data_class): self.ImageNumerics._PixelGrid = data_class def image2array_masked(self, image): - """ - returns 1d array of values in image that are not masked out for the likelihood computation/linear minimization - :param image: 2d numpy array of full image - :return: 1d array - """ + """Returns 1d array of values in image that are not masked out for the + likelihood computation/linear minimization :param image: 2d numpy array of full + image :return: 1d array.""" array = util.image2array(image) return array[self._mask1d] @@ -477,9 +681,9 @@ def array_masked2image(self, array): return grid2d def _error_map_model(self, kwargs_lens, kwargs_ps, kwargs_special=None): - """ - noise estimate (variances as diagonal of the pixel covariance matrix) resulted from inherent model uncertainties - This term is currently the psf error map + """Noise estimate (variances as diagonal of the pixel covariance matrix) + resulted from inherent model uncertainties This term is currently the psf error + map. :param kwargs_lens: lens model keyword arguments :param kwargs_ps: point source keyword arguments @@ -489,8 +693,8 @@ def _error_map_model(self, kwargs_lens, kwargs_ps, kwargs_special=None): return self._error_map_psf(kwargs_lens, kwargs_ps, kwargs_special) def _error_map_psf(self, kwargs_lens, kwargs_ps, kwargs_special=None): - """ - map of image with error terms (sigma**2) expected from inaccuracies in the PSF modeling + """Map of image with error terms (sigma**2) expected from inaccuracies in the + PSF modeling. :param kwargs_lens: lens model keyword arguments :param kwargs_ps: point source keyword arguments @@ -501,19 +705,26 @@ def _error_map_psf(self, kwargs_lens, kwargs_ps, kwargs_special=None): if self._psf_error_map is True: for k, bool_ in enumerate(self._psf_error_map_bool_list): if bool_ is True: - ra_pos, dec_pos, _ = self.PointSource.point_source_list(kwargs_ps, kwargs_lens=kwargs_lens, k=k, - with_amp=False) + ra_pos, dec_pos, _ = self.PointSource.point_source_list( + kwargs_ps, kwargs_lens=kwargs_lens, k=k, with_amp=False + ) if len(ra_pos) > 0: - ra_pos, dec_pos = self._displace_astrometry(ra_pos, dec_pos, kwargs_special=kwargs_special) - error_map += self.ImageNumerics.psf_error_map(ra_pos, dec_pos, None, self.Data.data, - fix_psf_error_map=False) + ra_pos, dec_pos = self._displace_astrometry( + ra_pos, dec_pos, kwargs_special=kwargs_special + ) + error_map += self.ImageNumerics.psf_error_map( + ra_pos, + dec_pos, + None, + self.Data.data, + fix_psf_error_map=False, + ) return error_map def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param): - """ - variance of the linear source reconstruction in the source plane coordinates, - computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors - of the basis set. + """Variance of the linear source reconstruction in the source plane coordinates, + computed by the diagonal elements of the covariance matrix of the source + reconstruction as a sum of the errors of the basis set. :param kwargs_source: keyword arguments of source model :param x_grid: x-axis of positions to compute error map @@ -524,10 +735,9 @@ def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param): return self._error_map_source(kwargs_source, x_grid, y_grid, cov_param) def _error_map_source(self, kwargs_source, x_grid, y_grid, cov_param): - """ - variance of the linear source reconstruction in the source plane coordinates, - computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors - of the basis set. + """Variance of the linear source reconstruction in the source plane coordinates, + computed by the diagonal elements of the covariance matrix of the source + reconstruction as a sum of the errors of the basis set. :param kwargs_source: keyword arguments of source model :param x_grid: x-axis of positions to compute error map @@ -537,15 +747,23 @@ def _error_map_source(self, kwargs_source, x_grid, y_grid, cov_param): """ error_map = np.zeros_like(x_grid) - basis_functions, n_source = self.SourceModel.functions_split(x_grid, y_grid, kwargs_source) + basis_functions, n_source = self.SourceModel.functions_split( + x_grid, y_grid, kwargs_source + ) basis_functions = np.array(basis_functions) if cov_param is not None: for i in range(len(error_map)): - error_map[i] = basis_functions[:, i].T.dot(cov_param[:n_source, :n_source]).dot(basis_functions[:, i]) + error_map[i] = ( + basis_functions[:, i] + .T.dot(cov_param[:n_source, :n_source]) + .dot(basis_functions[:, i]) + ) return error_map - def point_source_linear_response_set(self, kwargs_ps, kwargs_lens, kwargs_special, with_amp=True): + def point_source_linear_response_set( + self, kwargs_ps, kwargs_lens, kwargs_special, with_amp=True + ): """ :param kwargs_ps: point source keyword argument list @@ -555,11 +773,16 @@ def point_source_linear_response_set(self, kwargs_ps, kwargs_lens, kwargs_specia :return: list of positions and amplitudes split in different basis components with applied astrometric corrections """ - ra_pos, dec_pos, amp, n_points = self.PointSource.linear_response_set(kwargs_ps, kwargs_lens, with_amp=with_amp) + ra_pos, dec_pos, amp, n_points = self.PointSource.linear_response_set( + kwargs_ps, kwargs_lens, with_amp=with_amp + ) if kwargs_special is not None: - if 'delta_x_image' in kwargs_special: - delta_x, delta_y = kwargs_special['delta_x_image'], kwargs_special['delta_y_image'] + if "delta_x_image" in kwargs_special: + delta_x, delta_y = ( + kwargs_special["delta_x_image"], + kwargs_special["delta_y_image"], + ) k = 0 n = len(delta_x) for i in range(n_points): @@ -572,8 +795,8 @@ def point_source_linear_response_set(self, kwargs_ps, kwargs_lens, kwargs_specia return ra_pos, dec_pos, amp, n_points def check_positive_flux(self, kwargs_source, kwargs_lens_light, kwargs_ps): - """ - checks whether the surface brightness profiles contain positive fluxes and returns bool if True + """Checks whether the surface brightness profiles contain positive fluxes and + returns bool if True. :param kwargs_source: source surface brightness keyword argument list :param kwargs_lens_light: lens surface brightness keyword argument list @@ -582,109 +805,132 @@ def check_positive_flux(self, kwargs_source, kwargs_lens_light, kwargs_ps): """ pos_bool_ps = self.PointSource.check_positive_flux(kwargs_ps) if self._pixelbased_bool is True: - # this constraint must be handled by the pixel-based solver + # this constraint must be handled by the pixel-based solver pos_bool_source = True pos_bool_lens_light = True else: - pos_bool_source = self.SourceModel.check_positive_flux_profile(kwargs_source) - pos_bool_lens_light = self.LensLightModel.check_positive_flux_profile(kwargs_lens_light) - if pos_bool_ps is True and pos_bool_source is True and pos_bool_lens_light is True: + pos_bool_source = self.SourceModel.check_positive_flux_profile( + kwargs_source + ) + pos_bool_lens_light = self.LensLightModel.check_positive_flux_profile( + kwargs_lens_light + ) + if ( + pos_bool_ps is True + and pos_bool_source is True + and pos_bool_lens_light is True + ): return True else: return False - + # linear solver for interferometric natwt method - def _image_linear_solve_interferometry_natwt(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, - kwargs_ps=None, kwargs_extinction=None, kwargs_special=None): - """ - 'interferometry_natwt' method does NOT support model_error, cov_param. - The interferometry linear solver just does the linear solving to get the optimal linear amplitudes - and apply the marginalized amplitudes to make the model images. - + def _image_linear_solve_interferometry_natwt( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + ): + """'interferometry_natwt' method does NOT support model_error, cov_param. The + interferometry linear solver just does the linear solving to get the optimal + linear amplitudes and apply the marginalized amplitudes to make the model + images. + :return: model, model_error, cov_param, param model and param are the same returns of self._image_linear_solve_interferometry_natwt_solving(A, d) function model_error =0 and cov_param = None for the interferometric method. - """ - A = self._linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, - kwargs_special, unconvolved=True) + A = self._linear_response_matrix( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + unconvolved=True, + ) d = self.data_response model, param = self._image_linear_solve_interferometry_natwt_solving(A, d) model_error = 0 # just a place holder cov_param = None # just a place holder - _, _, _, _ = self._update_linear_kwargs(param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + _, _, _, _ = self._update_linear_kwargs( + param, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) return model, model_error, cov_param, param def _image_linear_solve_interferometry_natwt_solving(self, A, d): - """ - Linearly solve the amplitude of each light profile response to the natural weighting interferometry images, - based on (placeholder for Nan Zhang's paper). - + """Linearly solve the amplitude of each light profile response to the natural + weighting interferometry images, based on (placeholder for Nan Zhang's paper). + Theories: Suppose there are a set of light responses :math:`\\{x_i\\}`, we want to solve the set of amplitudes :math:`\\{\\alpha_i\\}`, - such that minimizes the chi^2 given by + such that minimizes the chi^2 given by .. math:: \\chi^2 = (d - A_{PSF}\\sum_i \\alpha_i x_i)^TC^{-1}(d - A_{PSF}\\sum_i \\alpha_i x_i), - where :math:`A_{PSF}` is the PSF convolution operation matrix (not to be confused with the input A of this function) + where :math:`A_{PSF}` is the PSF convolution operation matrix (not to be confused with the input A of this function) and :math:`C` is the noise covariance matrix. :math:`d` is the data image. - For natural weighting interferometric images, we have :math:`C = \\sigma^2 A_{PSF}`, + For natural weighting interferometric images, we have :math:`C = \\sigma^2 A_{PSF}`, (see Section 3.2 of https://doi.org/10.1093/mnras/staa2740 for the relation of natural weighting covariance matrix and PSF convolution) - therefore the chi^2 function simplifies to + therefore the chi^2 function simplifies to .. math:: \\chi^2 = \\frac{1}{\\sigma^2}(d^TA_{PSF}^{-1}d + \\sum_{i,j}\\alpha_i\\alpha_j x_i^TA_{PSF}x_j - 2\\sum_{i}x_i^Td), from which the optimal amplitudes :math:`\\{\\alpha_i\\}` can be solved linearly by solving .. math:: \\sum_{j} M_{ij}\\alpha_{j} = b_i, where :math:`M_{ij} = \\frac{1}{\\sigma^2}x_i^TA_{PSF}x_j` and :math:`b_{i} = \\frac{1}{\\sigma^2}x_i^Td`. - + The steps of this function are: (1.) Making the entries :math:`M_{ij}` and :math:`b_i` defined above. (2.) Solve the linear function to get the optimal amplitudes. (3.) Apply these optimal amplitudes to make unconvolved and convolved model images. - The output model images are in the form [array1, array2]. - (Note that this is different from the non-interferometric linear solver of Lenstronomy, + The output model images are in the form [array1, array2]. + (Note that this is different from the non-interferometric linear solver of Lenstronomy, this output form saves time for likelihood computations in imaging_data for interferometric method.) array1 is the unconvolved model image :math:`array1 = \\sum_i \\alpha_i x_i`, where :math:`\\alpha_i` is the solved optimal amplitudes. array2 is the convolved model image :math:`array2 = A_{PSF}\\sum_i \\alpha_i x_i`, where :math:`\\alpha_i`. - + :param A: response of unconvolved light profiles, [x_1, x_2, ...] :param d: data image, d - :return: [array1, array2], [amp_array] + :return: [array1, array2], [amp_array] where the [array1, array2] are unconvolved and convolved model images with solved amplitudes and [amp_array] are the solved optimal amplitudes. - """ num_of_light, num_of_image_pixel = np.shape(A) - + A_convolved = np.zeros(np.shape(A)) - + # convolve each response separately for i in range(num_of_light): - A_convolved[i] = util.image2array(self._convolution._static_fft(util.array2image(A[i]), mode='same')) - - M = np.zeros((num_of_light,num_of_light)) + A_convolved[i] = util.image2array( + self._convolution._static_fft(util.array2image(A[i]), mode="same") + ) + + M = np.zeros((num_of_light, num_of_light)) for i in range(num_of_light): for j in range(num_of_light): if j < i: - M[i,j] = M[j,i] + M[i, j] = M[j, i] else: - M[i,j] = np.sum(A_convolved[j] * A[i]) - + M[i, j] = np.sum(A_convolved[j] * A[i]) + b = np.zeros((num_of_light)) for i in range(num_of_light): b[i] = np.sum(A[i] * (d)) - + param_amps = np.linalg.lstsq(M, b)[0] - + clean_temp = np.zeros((num_of_image_pixel)) dirty_temp = np.zeros((num_of_image_pixel)) for i in range(num_of_light): clean_temp += param_amps[i] * A[i] dirty_temp += param_amps[i] * A_convolved[i] - + clean_model = util.array2image(clean_temp) dirty_model = util.array2image(dirty_temp) - + model = [clean_model, dirty_model] - - return model, param_amps \ No newline at end of file + + return model, param_amps diff --git a/lenstronomy/ImSim/image_model.py b/lenstronomy/ImSim/image_model.py index 2d95c9d78..c18751b4b 100644 --- a/lenstronomy/ImSim/image_model.py +++ b/lenstronomy/ImSim/image_model.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.ImSim.Numerics.numerics_subframe import NumericsSubFrame from lenstronomy.ImSim.image2source_mapping import Image2SourceMapping @@ -10,16 +10,25 @@ import numpy as np -__all__ = ['ImageModel'] +__all__ = ["ImageModel"] class ImageModel(object): - """ - this class uses functions of lens_model and source_model to make a lensed image - """ - def __init__(self, data_class, psf_class, lens_model_class=None, source_model_class=None, - lens_light_model_class=None, point_source_class=None, extinction_class=None, kwargs_numerics=None, - kwargs_pixelbased=None): + """This class uses functions of lens_model and source_model to make a lensed + image.""" + + def __init__( + self, + data_class, + psf_class, + lens_model_class=None, + source_model_class=None, + lens_light_model_class=None, + point_source_class=None, + extinction_class=None, + kwargs_numerics=None, + kwargs_pixelbased=None, + ): """ :param data_class: instance of ImageData() or PixelGrid() class :param psf_class: instance of PSF() class @@ -32,18 +41,20 @@ def __init__(self, data_class, psf_class, lens_model_class=None, source_model_cl (see SLITronomy documentation) """ - self.type = 'single-band' + self.type = "single-band" self.num_bands = 1 self.PSF = psf_class self.Data = data_class - if hasattr(self.Data, 'flux_scaling'): + if hasattr(self.Data, "flux_scaling"): self._flux_scaling = self.Data.flux_scaling else: self._flux_scaling = 1 self.PSF.set_pixel_size(self.Data.pixel_width) if kwargs_numerics is None: kwargs_numerics = {} - self.ImageNumerics = NumericsSubFrame(pixel_grid=self.Data, psf=self.PSF, **kwargs_numerics) + self.ImageNumerics = NumericsSubFrame( + pixel_grid=self.Data, psf=self.PSF, **kwargs_numerics + ) if lens_model_class is None: lens_model_class = LensModel(lens_model_list=[]) self.LensModel = lens_model_class @@ -53,9 +64,13 @@ def __init__(self, data_class, psf_class, lens_model_class=None, source_model_cl if self.PointSource._lensModel is None: self.PointSource.update_lens_model(lens_model_class=lens_model_class) x_center, y_center = self.Data.center - self.PointSource.update_search_window(search_window=np.max(self.Data.width), x_center=x_center, - y_center=y_center, min_distance=self.Data.pixel_width, - only_from_unspecified=True) + self.PointSource.update_search_window( + search_window=np.max(self.Data.width), + x_center=x_center, + y_center=y_center, + min_distance=self.Data.pixel_width, + only_from_unspecified=True, + ) self._psf_error_map = self.PSF.psf_error_map_bool if source_model_class is None: @@ -74,15 +89,30 @@ def __init__(self, data_class, psf_class, lens_model_class=None, source_model_cl kwargs_pixelbased = kwargs_pixelbased.copy() self._pixelbased_bool = self._detect_pixelbased_models() if self._pixelbased_bool is True: - from slitronomy.Util.class_util import create_solver_class # requirement on SLITronomy is exclusively here - self.SourceNumerics = self._setup_pixelbased_source_numerics(kwargs_numerics, kwargs_pixelbased) - self.PixelSolver = create_solver_class(self.Data, self.PSF, self.ImageNumerics, self.SourceNumerics, - self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, - self._extinction, kwargs_pixelbased) + from slitronomy.Util.class_util import ( + create_solver_class, + ) # requirement on SLITronomy is exclusively here + + self.SourceNumerics = self._setup_pixelbased_source_numerics( + kwargs_numerics, kwargs_pixelbased + ) + self.PixelSolver = create_solver_class( + self.Data, + self.PSF, + self.ImageNumerics, + self.SourceNumerics, + self.LensModel, + self.SourceModel, + self.LensLightModel, + self.PointSource, + self._extinction, + kwargs_pixelbased, + ) self.source_mapping = None # handled with pixelated operator else: - - self.source_mapping = Image2SourceMapping(lensModel=lens_model_class, sourceModel=source_model_class) + self.source_mapping = Image2SourceMapping( + lensModel=lens_model_class, sourceModel=source_model_class + ) self._pb = data_class.primary_beam if self._pb is not None: @@ -91,174 +121,270 @@ def __init__(self, data_class, psf_class, lens_model_class=None, source_model_cl self._pb_1d = None def reset_point_source_cache(self, cache=True): - """ - deletes all the cache in the point source class and saves it from then on + """Deletes all the cache in the point source class and saves it from then on. - :param cache: boolean, if True, saves the next occuring point source positions in the cache + :param cache: boolean, if True, saves the next occuring point source positions + in the cache :return: None """ self.PointSource.delete_lens_model_cache() self.PointSource.set_save_cache(cache) def update_psf(self, psf_class): - """ - - update the instance of the class with a new instance of PSF() with a potentially different point spread function + """Update the instance of the class with a new instance of PSF() with a + potentially different point spread function. :param psf_class: :return: no return. Class is updated. """ self.PSF = psf_class self.PSF.set_pixel_size(self.Data.pixel_width) - self.ImageNumerics = NumericsSubFrame(pixel_grid=self.Data, psf=self.PSF, **self._kwargs_numerics) - - def source_surface_brightness(self, kwargs_source, kwargs_lens=None, kwargs_extinction=None, kwargs_special=None, - unconvolved=False, de_lensed=False, k=None, update_pixelbased_mapping=True): - """ - - computes the source surface brightness distribution - - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles + self.ImageNumerics = NumericsSubFrame( + pixel_grid=self.Data, psf=self.PSF, **self._kwargs_numerics + ) + + def source_surface_brightness( + self, + kwargs_source, + kwargs_lens=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + de_lensed=False, + k=None, + update_pixelbased_mapping=True, + ): + """Computes the source surface brightness distribution. + + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles :param kwargs_extinction: list of keyword arguments of extinction model - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) - :param de_lensed: if True: returns the un-lensed source surface brightness profile, otherwise the lensed. + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) + :param de_lensed: if True: returns the un-lensed source surface brightness + profile, otherwise the lensed. :param k: integer, if set, will only return the model of the specific index :return: 2d array of surface brightness pixels """ - return self._source_surface_brightness(kwargs_source, kwargs_lens, kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special, unconvolved=unconvolved, - de_lensed=de_lensed, k=k, - update_pixelbased_mapping=update_pixelbased_mapping) - - def _source_surface_brightness(self, kwargs_source, kwargs_lens=None, kwargs_extinction=None, kwargs_special=None, - unconvolved=False, de_lensed=False, k=None, update_pixelbased_mapping=True): - """ - - computes the source surface brightness distribution - - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles + return self._source_surface_brightness( + kwargs_source, + kwargs_lens, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + de_lensed=de_lensed, + k=k, + update_pixelbased_mapping=update_pixelbased_mapping, + ) + + def _source_surface_brightness( + self, + kwargs_source, + kwargs_lens=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + de_lensed=False, + k=None, + update_pixelbased_mapping=True, + ): + """Computes the source surface brightness distribution. + + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles :param kwargs_extinction: list of keyword arguments of extinction model - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) - :param de_lensed: if True: returns the un-lensed source surface brightness profile, otherwise the lensed. + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) + :param de_lensed: if True: returns the un-lensed source surface brightness + profile, otherwise the lensed. :param k: integer, if set, will only return the model of the specific index :return: 2d array of surface brightness pixels """ if len(self.SourceModel.profile_type_list) == 0: return np.zeros(self.Data.num_pixel_axes) if self._pixelbased_bool is True: - return self._source_surface_brightness_pixelbased(kwargs_source, kwargs_lens=kwargs_lens, - kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special, - unconvolved=unconvolved, de_lensed=de_lensed, k=k, - update_mapping=update_pixelbased_mapping) + return self._source_surface_brightness_pixelbased( + kwargs_source, + kwargs_lens=kwargs_lens, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + de_lensed=de_lensed, + k=k, + update_mapping=update_pixelbased_mapping, + ) else: - return self._source_surface_brightness_analytical(kwargs_source, kwargs_lens=kwargs_lens, - kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special, - unconvolved=unconvolved, de_lensed=de_lensed, k=k) - - def _source_surface_brightness_analytical(self, kwargs_source, kwargs_lens=None, kwargs_extinction=None, kwargs_special=None, - unconvolved=False, de_lensed=False, k=None): - """ - - computes the source surface brightness distribution - - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles + return self._source_surface_brightness_analytical( + kwargs_source, + kwargs_lens=kwargs_lens, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + de_lensed=de_lensed, + k=k, + ) + + def _source_surface_brightness_analytical( + self, + kwargs_source, + kwargs_lens=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + de_lensed=False, + k=None, + ): + """Computes the source surface brightness distribution. + + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles :param kwargs_extinction: list of keyword arguments of extinction model - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) - :param de_lensed: if True: returns the un-lensed source surface brightness profile, otherwise the lensed. + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) + :param de_lensed: if True: returns the un-lensed source surface brightness + profile, otherwise the lensed. :param k: integer, if set, will only return the model of the specific index :return: 2d array of surface brightness pixels """ ra_grid, dec_grid = self.ImageNumerics.coordinates_evaluate if de_lensed is True: - source_light = self.SourceModel.surface_brightness(ra_grid, dec_grid, kwargs_source, k=k) + source_light = self.SourceModel.surface_brightness( + ra_grid, dec_grid, kwargs_source, k=k + ) else: - source_light = self.source_mapping.image_flux_joint(ra_grid, dec_grid, kwargs_lens, kwargs_source, k=k) - source_light *= self._extinction.extinction(ra_grid, dec_grid, kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special) - + source_light = self.source_mapping.image_flux_joint( + ra_grid, dec_grid, kwargs_lens, kwargs_source, k=k + ) + source_light *= self._extinction.extinction( + ra_grid, + dec_grid, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + ) + # multiply with primary beam before convolution if self._pb is not None: source_light *= self._pb_1d - - source_light_final = self.ImageNumerics.re_size_convolve(source_light, unconvolved=unconvolved) - return source_light_final * self._flux_scaling - def _source_surface_brightness_pixelbased(self, kwargs_source, kwargs_lens=None, kwargs_extinction=None, kwargs_special=None, - unconvolved=False, de_lensed=False, k=None, update_mapping=True): - """ - computes the source surface brightness distribution, using pixel-based solver for light profiles (from SLITronomy) + source_light_final = self.ImageNumerics.re_size_convolve( + source_light, unconvolved=unconvolved + ) + return source_light_final * self._flux_scaling - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles + def _source_surface_brightness_pixelbased( + self, + kwargs_source, + kwargs_lens=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + de_lensed=False, + k=None, + update_mapping=True, + ): + """Computes the source surface brightness distribution, using pixel-based solver + for light profiles (from SLITronomy) + + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles :param kwargs_extinction: list of keyword arguments of extinction model - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) - :param de_lensed: if True: returns the un-lensed source surface brightness profile, otherwise the lensed. + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) + :param de_lensed: if True: returns the un-lensed source surface brightness + profile, otherwise the lensed. :param k: integer, if set, will only return the model of the specific index - :param update_mapping: if False, prevent the pixelated lensing mapping to be updated (saves computation time if previously computed). + :param update_mapping: if False, prevent the pixelated lensing mapping to be + updated (saves computation time if previously computed). :return: 2d array of surface brightness pixels """ ra_grid, dec_grid = self.SourceNumerics.coordinates_evaluate - source_light = self.SourceModel.surface_brightness(ra_grid, dec_grid, kwargs_source, k=k) + source_light = self.SourceModel.surface_brightness( + ra_grid, dec_grid, kwargs_source, k=k + ) if de_lensed is True: - source_light = self.SourceNumerics.re_size_convolve(source_light, unconvolved=unconvolved) + source_light = self.SourceNumerics.re_size_convolve( + source_light, unconvolved=unconvolved + ) else: source_mapping = self.PixelSolver.lensingOperator - source_light = source_mapping.source2image(source_light, kwargs_lens=kwargs_lens, kwargs_special=kwargs_special, - update_mapping=update_mapping, original_source_grid=True) - source_light = self.ImageNumerics.re_size_convolve(source_light, unconvolved=unconvolved) + source_light = source_mapping.source2image( + source_light, + kwargs_lens=kwargs_lens, + kwargs_special=kwargs_special, + update_mapping=update_mapping, + original_source_grid=True, + ) + source_light = self.ImageNumerics.re_size_convolve( + source_light, unconvolved=unconvolved + ) # undo flux normalization performed by re_size_convolve (already handled in SLITronomy) source_light_final = source_light / self.Data.pixel_width**2 return source_light_final * self._flux_scaling def lens_surface_brightness(self, kwargs_lens_light, unconvolved=False, k=None): - """ + """Computes the lens surface brightness distribution. - computes the lens surface brightness distribution - - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param unconvolved: if True, returns unconvolved surface brightness (perfect seeing), otherwise convolved with PSF kernel + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param unconvolved: if True, returns unconvolved surface brightness (perfect + seeing), otherwise convolved with PSF kernel :return: 2d array of surface brightness pixels """ - return self._lens_surface_brightness(kwargs_lens_light, unconvolved=unconvolved, k=k) + return self._lens_surface_brightness( + kwargs_lens_light, unconvolved=unconvolved, k=k + ) def _lens_surface_brightness(self, kwargs_lens_light, unconvolved=False, k=None): - """ + """Computes the lens surface brightness distribution. - computes the lens surface brightness distribution - - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param unconvolved: if True, returns unconvolved surface brightness (perfect seeing), otherwise convolved with PSF kernel + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param unconvolved: if True, returns unconvolved surface brightness (perfect + seeing), otherwise convolved with PSF kernel :return: 2d array of surface brightness pixels """ if self._pixelbased_bool is True: if unconvolved is True: - raise ValueError("Lens light pixel-based modelling does not perform deconvolution") + raise ValueError( + "Lens light pixel-based modelling does not perform deconvolution" + ) return self._lens_surface_brightness_pixelbased(kwargs_lens_light, k=k) else: - return self._lens_surface_brightness_analytical(kwargs_lens_light, unconvolved=unconvolved, k=k) - - def _lens_surface_brightness_analytical(self, kwargs_lens_light, unconvolved=False, k=None): - """ - - computes the lens surface brightness distribution - - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param unconvolved: if True, returns unconvolved surface brightness (perfect seeing), otherwise convolved with PSF kernel + return self._lens_surface_brightness_analytical( + kwargs_lens_light, unconvolved=unconvolved, k=k + ) + + def _lens_surface_brightness_analytical( + self, kwargs_lens_light, unconvolved=False, k=None + ): + """Computes the lens surface brightness distribution. + + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param unconvolved: if True, returns unconvolved surface brightness (perfect + seeing), otherwise convolved with PSF kernel :return: 2d array of surface brightness pixels """ ra_grid, dec_grid = self.ImageNumerics.coordinates_evaluate - lens_light = self.LensLightModel.surface_brightness(ra_grid, dec_grid, kwargs_lens_light, k=k) - + lens_light = self.LensLightModel.surface_brightness( + ra_grid, dec_grid, kwargs_lens_light, k=k + ) + # multiply with primary beam before convolution if self._pb is not None: lens_light *= self._pb_1d - - lens_light_final = self.ImageNumerics.re_size_convolve(lens_light, unconvolved=unconvolved) + + lens_light_final = self.ImageNumerics.re_size_convolve( + lens_light, unconvolved=unconvolved + ) return lens_light_final * self._flux_scaling def _lens_surface_brightness_pixelbased(self, kwargs_lens_light, k=None): @@ -271,14 +397,21 @@ def _lens_surface_brightness_pixelbased(self, kwargs_lens_light, k=None): :return: 2d array of surface brightness pixels """ ra_grid, dec_grid = self.ImageNumerics.coordinates_evaluate - lens_light = self.LensLightModel.surface_brightness(ra_grid, dec_grid, kwargs_lens_light, k=k) + lens_light = self.LensLightModel.surface_brightness( + ra_grid, dec_grid, kwargs_lens_light, k=k + ) lens_light_final = util.array2image(lens_light) return lens_light_final * self._flux_scaling - def point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconvolved=False, k=None): - """ - - computes the point source positions and paints PSF convolutions on them + def point_source( + self, + kwargs_ps, + kwargs_lens=None, + kwargs_special=None, + unconvolved=False, + k=None, + ): + """Computes the point source positions and paints PSF convolutions on them. :param kwargs_ps: :param kwargs_lens: @@ -287,13 +420,23 @@ def point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconvo :param k: :return: """ - return self._point_source(kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, kwargs_special=kwargs_special, - unconvolved=unconvolved, k=k) - - def _point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconvolved=False, k=None): - """ - - computes the point source positions and paints PSF convolutions on them + return self._point_source( + kwargs_ps=kwargs_ps, + kwargs_lens=kwargs_lens, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + k=k, + ) + + def _point_source( + self, + kwargs_ps, + kwargs_lens=None, + kwargs_special=None, + unconvolved=False, + k=None, + ): + """Computes the point source positions and paints PSF convolutions on them. :param kwargs_ps: :param kwargs_lens: @@ -305,46 +448,90 @@ def _point_source(self, kwargs_ps, kwargs_lens=None, kwargs_special=None, unconv point_source_image = np.zeros((self.Data.num_pixel_axes)) if unconvolved or self.PointSource is None: return point_source_image - ra_pos, dec_pos, amp = self.PointSource.point_source_list(kwargs_ps, kwargs_lens=kwargs_lens, k=k) + ra_pos, dec_pos, amp = self.PointSource.point_source_list( + kwargs_ps, kwargs_lens=kwargs_lens, k=k + ) # raise warnings when primary beam is attempted to be applied to point sources. if len(ra_pos) != 0 and self._pb is not None: - raise Warning("Antenna primary beam does not apply to point sources in ImageModel!") - ra_pos, dec_pos = self._displace_astrometry(ra_pos, dec_pos, kwargs_special=kwargs_special) - point_source_image += self.ImageNumerics.point_source_rendering(ra_pos, dec_pos, amp) + raise Warning( + "Antenna primary beam does not apply to point sources in ImageModel!" + ) + ra_pos, dec_pos = self._displace_astrometry( + ra_pos, dec_pos, kwargs_special=kwargs_special + ) + point_source_image += self.ImageNumerics.point_source_rendering( + ra_pos, dec_pos, amp + ) return point_source_image * self._flux_scaling - def image(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, unconvolved=False, source_add=True, lens_light_add=True, - point_source_add=True): - """ - - make an image with a realisation of linear parameter values "param" - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) + def image( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + source_add=True, + lens_light_add=True, + point_source_add=True, + ): + """Make an image with a realisation of linear parameter values "param". + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) :param source_add: if True, compute source, otherwise without :param lens_light_add: if True, compute lens light, otherwise without :param point_source_add: if True, add point sources, otherwise without :return: 2d array of surface brightness pixels of the simulation """ - return self._image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_extinction, kwargs_special, - unconvolved, source_add, lens_light_add, point_source_add) - - def _image(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_extinction=None, kwargs_special=None, unconvolved=False, source_add=True, lens_light_add=True, - point_source_add=True): - """ - - make an image with a realisation of linear parameter values "param" - - :param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles - :param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles - :param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles - :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions - :param unconvolved: if True: returns the unconvolved light distribution (prefect seeing) + return self._image( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction, + kwargs_special, + unconvolved, + source_add, + lens_light_add, + point_source_add, + ) + + def _image( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_extinction=None, + kwargs_special=None, + unconvolved=False, + source_add=True, + lens_light_add=True, + point_source_add=True, + ): + """Make an image with a realisation of linear parameter values "param". + + :param kwargs_lens: list of keyword arguments corresponding to the superposition + of different lens profiles + :param kwargs_source: list of keyword arguments corresponding to the + superposition of different source light profiles + :param kwargs_lens_light: list of keyword arguments corresponding to different + lens light surface brightness profiles + :param kwargs_ps: keyword arguments corresponding to "other" parameters, such as + external shear and point source image positions + :param unconvolved: if True: returns the unconvolved light distribution (prefect + seeing) :param source_add: if True, compute source, otherwise without :param lens_light_add: if True, compute lens light, otherwise without :param point_source_add: if True, add point sources, otherwise without @@ -352,100 +539,141 @@ def _image(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, k """ model = np.zeros(self.Data.num_pixel_axes) if source_add is True: - model += self._source_surface_brightness(kwargs_source, kwargs_lens, kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special, unconvolved=unconvolved) + model += self._source_surface_brightness( + kwargs_source, + kwargs_lens, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + ) if lens_light_add is True: - model += self._lens_surface_brightness(kwargs_lens_light, unconvolved=unconvolved) + model += self._lens_surface_brightness( + kwargs_lens_light, unconvolved=unconvolved + ) if point_source_add is True: - model += self._point_source(kwargs_ps, kwargs_lens, kwargs_special=kwargs_special, unconvolved=unconvolved) + model += self._point_source( + kwargs_ps, + kwargs_lens, + kwargs_special=kwargs_special, + unconvolved=unconvolved, + ) return model def extinction_map(self, kwargs_extinction=None, kwargs_special=None): - """ - differential extinction per pixel + """Differential extinction per pixel. - :param kwargs_extinction: list of keyword arguments corresponding to the optical depth models tau, such that extinction is exp(-tau) + :param kwargs_extinction: list of keyword arguments corresponding to the optical + depth models tau, such that extinction is exp(-tau) :param kwargs_special: keyword arguments, additional parameter to the extinction :return: 2d array of size of the image """ return self._extinction_map(kwargs_extinction, kwargs_special) def _extinction_map(self, kwargs_extinction=None, kwargs_special=None): - """ - differential extinction per pixel + """Differential extinction per pixel. - :param kwargs_extinction: list of keyword arguments corresponding to the optical depth models tau, such that extinction is exp(-tau) + :param kwargs_extinction: list of keyword arguments corresponding to the optical + depth models tau, such that extinction is exp(-tau) :param kwargs_special: keyword arguments, additional parameter to the extinction :return: 2d array of size of the image """ ra_grid, dec_grid = self.ImageNumerics.coordinates_evaluate - extinction = self._extinction.extinction(ra_grid, dec_grid, kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special) - print(extinction, 'test extinction') + extinction = self._extinction.extinction( + ra_grid, + dec_grid, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + ) + print(extinction, "test extinction") extinction_array = np.ones_like(ra_grid) * extinction - extinction = self.ImageNumerics.re_size_convolve(extinction_array, unconvolved=True) / self.ImageNumerics.grid_class.pixel_width ** 2 + extinction = ( + self.ImageNumerics.re_size_convolve(extinction_array, unconvolved=True) + / self.ImageNumerics.grid_class.pixel_width**2 + ) return extinction @staticmethod def _displace_astrometry(x_pos, y_pos, kwargs_special=None): - """ - displaces point sources by shifts specified in kwargs_special - - :param x_pos: list of point source positions according to point source model list - :param y_pos: list of point source positions according to point source model list - :param kwargs_special: keyword arguments, can contain 'delta_x_image' and 'delta_y_image' - The list is defined in order of the image positions + """Displaces point sources by shifts specified in kwargs_special. + + :param x_pos: list of point source positions according to point source model + list + :param y_pos: list of point source positions according to point source model + list + :param kwargs_special: keyword arguments, can contain 'delta_x_image' and + 'delta_y_image' The list is defined in order of the image positions :return: shifted image positions in same format as input """ if kwargs_special is not None: - if 'delta_x_image' in kwargs_special: - delta_x, delta_y = kwargs_special['delta_x_image'], kwargs_special['delta_y_image'] + if "delta_x_image" in kwargs_special: + delta_x, delta_y = ( + kwargs_special["delta_x_image"], + kwargs_special["delta_y_image"], + ) delta_x_new = np.zeros(len(x_pos)) - delta_x_new[0:len(delta_x)] = delta_x[:] + delta_x_new[0 : len(delta_x)] = delta_x[:] delta_y_new = np.zeros(len(y_pos)) - delta_y_new[0:len(delta_y)] = delta_y + delta_y_new[0 : len(delta_y)] = delta_y x_pos = x_pos + delta_x_new y_pos = y_pos + delta_y_new return x_pos, y_pos def _detect_pixelbased_models(self): - """ - Returns True if light profiles specific to pixel-based modelling are present in source model list. - Otherwise returns False. + """Returns True if light profiles specific to pixel-based modelling are present + in source model list. Otherwise returns False. - Currently, pixel-based light profiles are: 'SLIT_STARLETS', 'SLIT_STARLETS_GEN2'. + Currently, pixel-based light profiles are: 'SLIT_STARLETS', + 'SLIT_STARLETS_GEN2'. """ source_model_list = self.SourceModel.profile_type_list - if 'SLIT_STARLETS' in source_model_list or 'SLIT_STARLETS_GEN2' in source_model_list: + if ( + "SLIT_STARLETS" in source_model_list + or "SLIT_STARLETS_GEN2" in source_model_list + ): if len(source_model_list) > 1: - raise ValueError("'SLIT_STARLETS' or 'SLIT_STARLETS_GEN2' must be the only source model list for pixel-based modelling") + raise ValueError( + "'SLIT_STARLETS' or 'SLIT_STARLETS_GEN2' must be the only source model list for pixel-based modelling" + ) return True return False def _setup_pixelbased_source_numerics(self, kwargs_numerics, kwargs_pixelbased): - """ - Check if model requirement are compatible with support pixel-based solver, + """Check if model requirement are compatible with support pixel-based solver, and creates a new numerics class specifically for source plane. - :param kwargs_numerics: keyword argument with various numeric description (see ImageNumerics class for options) - :param kwargs_pixelbased: keyword argument with various settings related to the pixel-based solver (see SLITronomy documentation) + :param kwargs_numerics: keyword argument with various numeric description (see + ImageNumerics class for options) + :param kwargs_pixelbased: keyword argument with various settings related to the + pixel-based solver (see SLITronomy documentation) """ # check that the required convolution type is compatible with pixel-based modelling (in current implementation) psf_type = self.PSF.psf_type - supersampling_convolution = kwargs_numerics.get('supersampling_convolution', False) - supersampling_factor = kwargs_numerics.get('supersampling_factor', 1) - compute_mode = kwargs_numerics.get('compute_mode', 'regular') - if psf_type not in ['PIXEL', 'NONE']: - raise ValueError("Only convolution using a pixelated kernel is supported for pixel-based modelling") - if compute_mode != 'regular': - raise ValueError("Only regular coordinate grid is supported for pixel-based modelling") - if (supersampling_convolution is True and supersampling_factor > 1): - raise ValueError("Only non-supersampled convolution is supported for pixel-based modelling") + supersampling_convolution = kwargs_numerics.get( + "supersampling_convolution", False + ) + supersampling_factor = kwargs_numerics.get("supersampling_factor", 1) + compute_mode = kwargs_numerics.get("compute_mode", "regular") + if psf_type not in ["PIXEL", "NONE"]: + raise ValueError( + "Only convolution using a pixelated kernel is supported for pixel-based modelling" + ) + if compute_mode != "regular": + raise ValueError( + "Only regular coordinate grid is supported for pixel-based modelling" + ) + if supersampling_convolution is True and supersampling_factor > 1: + raise ValueError( + "Only non-supersampled convolution is supported for pixel-based modelling" + ) # set up the source numerics with a (possibily) different supersampling resolution - supersampling_factor_source = kwargs_pixelbased.pop('supersampling_factor_source', 1) + supersampling_factor_source = kwargs_pixelbased.pop( + "supersampling_factor_source", 1 + ) kwargs_numerics_source = kwargs_numerics.copy() - kwargs_numerics_source['supersampling_factor'] = supersampling_factor_source - kwargs_numerics_source['compute_mode'] = 'regular' - source_numerics_class = NumericsSubFrame(pixel_grid=self.Data, psf=self.PSF, **kwargs_numerics_source) + kwargs_numerics_source["supersampling_factor"] = supersampling_factor_source + kwargs_numerics_source["compute_mode"] = "regular" + source_numerics_class = NumericsSubFrame( + pixel_grid=self.Data, psf=self.PSF, **kwargs_numerics_source + ) return source_numerics_class diff --git a/lenstronomy/LensModel/LightConeSim/light_cone.py b/lenstronomy/LensModel/LightConeSim/light_cone.py index 97a2f6b31..2c025615f 100644 --- a/lenstronomy/LensModel/LightConeSim/light_cone.py +++ b/lenstronomy/LensModel/LightConeSim/light_cone.py @@ -5,15 +5,15 @@ from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.LensModel.lens_model import LensModel -__all__ = ['LightCone', 'MassSlice'] +__all__ = ["LightCone", "MassSlice"] class LightCone(object): - """ - class to perform multi-plane ray-tracing from convergence maps at different redshifts - From the convergence maps the deflection angles and lensing potential are computed (from different settings) - and then an interpolated grid of all those quantities generate an instance of the lenstronomy LensModel multi-plane - instance. All features of the LensModel module are supported. + """Class to perform multi-plane ray-tracing from convergence maps at different + redshifts From the convergence maps the deflection angles and lensing potential are + computed (from different settings) and then an interpolated grid of all those + quantities generate an instance of the lenstronomy LensModel multi-plane instance. + All features of the LensModel module are supported. Improvements that can be made for accuracy and speed: 1. adaptive mesh integral for the convergence map @@ -34,7 +34,9 @@ def __init__(self, mass_map_list, grid_spacing_list, redshift_list): """ self._mass_slice_list = [] for i in range(len(mass_map_list)): - self._mass_slice_list.append(MassSlice(mass_map_list[i], grid_spacing_list[i], redshift_list[i])) + self._mass_slice_list.append( + MassSlice(mass_map_list[i], grid_spacing_list[i], redshift_list[i]) + ) self._mass_map_list = mass_map_list self._grid_spacing_list = grid_spacing_list self._redshift_list = redshift_list @@ -49,10 +51,15 @@ def cone_instance(self, z_source, cosmo, multi_plane=True, kwargs_interp=None): See description in the Interpolate() class. Only applicable for 'INTERPOL' and 'INTERPOL_SCALED' models. :return: LensModel instance, keyword argument list of lens model """ - lens_model = LensModel(lens_model_list=['INTERPOL'] * len(self._mass_map_list), - lens_redshift_list=self._redshift_list, multi_plane=multi_plane, - z_source_convention=z_source, cosmo=cosmo, z_source=z_source, - kwargs_interp=kwargs_interp) + lens_model = LensModel( + lens_model_list=["INTERPOL"] * len(self._mass_map_list), + lens_redshift_list=self._redshift_list, + multi_plane=multi_plane, + z_source_convention=z_source, + cosmo=cosmo, + z_source=z_source, + kwargs_interp=kwargs_interp, + ) kwargs_lens = [] for mass_slice in self._mass_slice_list: kwargs_lens.append(mass_slice.interpol_instance(z_source, cosmo)) @@ -60,9 +67,8 @@ def cone_instance(self, z_source, cosmo, multi_plane=True, kwargs_interp=None): class MassSlice(object): - """ - class to describe a single mass slice - """ + """Class to describe a single mass slice.""" + def __init__(self, mass_map, grid_spacing, redshift): """ @@ -72,34 +78,59 @@ def __init__(self, mass_map, grid_spacing, redshift): """ nx, ny = np.shape(mass_map) if nx != ny: - raise ValueError('Shape of mass map needs to be square!, set as %s %s' % (nx, ny)) + raise ValueError( + "Shape of mass map needs to be square!, set as %s %s" % (nx, ny) + ) self._mass_map = mass_map self._grid_spacing = grid_spacing self._redshift = redshift - self._f_x_mass, self._f_y_mass = convergence_integrals.deflection_from_kappa_grid(self._mass_map, - self._grid_spacing) - self._f_mass = convergence_integrals.potential_from_kappa_grid(self._mass_map, self._grid_spacing) - x_grid, y_grid = util.make_grid(numPix=len(self._mass_map), deltapix=self._grid_spacing) + ( + self._f_x_mass, + self._f_y_mass, + ) = convergence_integrals.deflection_from_kappa_grid( + self._mass_map, self._grid_spacing + ) + self._f_mass = convergence_integrals.potential_from_kappa_grid( + self._mass_map, self._grid_spacing + ) + x_grid, y_grid = util.make_grid( + numPix=len(self._mass_map), deltapix=self._grid_spacing + ) self._x_axes_mpc, self._y_axes_mpc = util.get_axes(x_grid, y_grid) def interpol_instance(self, z_source, cosmo): - """ - scales the mass map integrals (with units of mass not convergence) into a convergence map for the given - cosmology and source redshift and returns the keyword arguments of the interpolated reduced deflection and - lensing potential. + """Scales the mass map integrals (with units of mass not convergence) into a + convergence map for the given cosmology and source redshift and returns the + keyword arguments of the interpolated reduced deflection and lensing potential. :param z_source: redshift of the source :param cosmo: astropy.cosmology instance - :return: keyword arguments of the interpolation instance with numerically computed deflection angles and lensing - potential + :return: keyword arguments of the interpolation instance with numerically + computed deflection angles and lensing potential """ lens_cosmo = LensCosmo(z_lens=self._redshift, z_source=z_source, cosmo=cosmo) mpc2arcsec = lens_cosmo.dd * const.arcsec x_axes = self._x_axes_mpc / mpc2arcsec # units of arc seconds in grid spacing y_axes = self._y_axes_mpc / mpc2arcsec # units of arc seconds in grid spacing - f_ = self._f_mass / lens_cosmo.sigma_crit_angle / self._grid_spacing ** 2 - f_x = self._f_x_mass / lens_cosmo.sigma_crit_angle / self._grid_spacing ** 2 * mpc2arcsec - f_y = self._f_y_mass / lens_cosmo.sigma_crit_angle / self._grid_spacing ** 2 * mpc2arcsec - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': f_, 'f_x': f_x, 'f_y': f_y} + f_ = self._f_mass / lens_cosmo.sigma_crit_angle / self._grid_spacing**2 + f_x = ( + self._f_x_mass + / lens_cosmo.sigma_crit_angle + / self._grid_spacing**2 + * mpc2arcsec + ) + f_y = ( + self._f_y_mass + / lens_cosmo.sigma_crit_angle + / self._grid_spacing**2 + * mpc2arcsec + ) + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": f_, + "f_x": f_x, + "f_y": f_y, + } return kwargs_interp diff --git a/lenstronomy/LensModel/LineOfSight/LOSModels/__init__.py b/lenstronomy/LensModel/LineOfSight/LOSModels/__init__.py index 1a4baf536..8b1378917 100644 --- a/lenstronomy/LensModel/LineOfSight/LOSModels/__init__.py +++ b/lenstronomy/LensModel/LineOfSight/LOSModels/__init__.py @@ -1 +1 @@ - + diff --git a/lenstronomy/LensModel/LineOfSight/LOSModels/los.py b/lenstronomy/LensModel/LineOfSight/LOSModels/los.py index e1f8e132b..464d3265e 100644 --- a/lenstronomy/LensModel/LineOfSight/LOSModels/los.py +++ b/lenstronomy/LensModel/LineOfSight/LOSModels/los.py @@ -1,20 +1,18 @@ -__author__ = 'pierrefleury' +__author__ = "pierrefleury" -__all__ = ['LOS'] +__all__ = ["LOS"] class LOS(object): - """ - Class allowing one to add tidal line-of-sight effects (convergence and - shear) to single-plane lensing. Stricly speaking, this is not a profile, - but when present in list of lens models, it is automatically recognised by - ModelAPI(), which sets the flag los_effects to True, and thereby leads - LensModel to use SinglePlaneLOS() instead of SinglePlane(). It is however - incompatible with MultiPlane(). - - The key-word arguments are the three line-of-sight convergences, the - two components of the three line-of-sight shears, and the three - line-of-sight rotations, all defined with the convention of + """Class allowing one to add tidal line-of-sight effects (convergence and shear) to + single-plane lensing. Stricly speaking, this is not a profile, but when present in + list of lens models, it is automatically recognised by ModelAPI(), which sets the + flag los_effects to True, and thereby leads LensModel to use SinglePlaneLOS() + instead of SinglePlane(). It is however incompatible with MultiPlane(). + + The key-word arguments are the three line-of-sight convergences, the two components + of the three line-of-sight shears, and the three line-of-sight rotations, all + defined with the convention of https://arxiv.org/abs/2104.08883: kappa_od, kappa_os, kappa_ds, gamma1_od, gamma2_od, gamma1_os, gamma2_os, gamma1_ds, gamma2_ds, omega_od, omega_os, omega_ds @@ -26,11 +24,20 @@ class LOS(object): Instead, it contains the essential building blocks of this modification. """ - param_names = ['kappa_od', 'kappa_os', 'kappa_ds', - 'gamma1_od', 'gamma2_od', - 'gamma1_os', 'gamma2_os', - 'gamma1_ds', 'gamma2_ds', - 'omega_od', 'omega_os', 'omega_ds'] + param_names = [ + "kappa_od", + "kappa_os", + "kappa_ds", + "gamma1_od", + "gamma2_od", + "gamma1_os", + "gamma2_os", + "gamma1_ds", + "gamma2_ds", + "omega_od", + "omega_os", + "omega_ds", + ] lower_limit_default = {pert: -0.5 for pert in param_names} upper_limit_default = {pert: 0.5 for pert in param_names} @@ -39,9 +46,8 @@ def __init__(self, *args, **kwargs): @staticmethod def distort_vector(x, y, kappa=0, gamma1=0, gamma2=0, omega=0): - """ - This function applies a distortion matrix to a vector (x, y) and - returns (x', y') as follows: + """This function applies a distortion matrix to a vector (x, y) and returns (x', + y') as follows: .. math:: \\begin{pmatrix} @@ -76,12 +82,9 @@ def distort_vector(x, y, kappa=0, gamma1=0, gamma2=0, omega=0): return x_, y_ @staticmethod - def left_multiply(f_xx, f_xy, f_yx, f_yy, - kappa=0, gamma1=0, gamma2=0, omega=0): - - """ - Left-multiplies the Hessian matrix of a lens with a distortion matrix - with convergence kappa, shear gamma1, gamma2, and rotation omega: + def left_multiply(f_xx, f_xy, f_yx, f_yy, kappa=0, gamma1=0, gamma2=0, omega=0): + """Left-multiplies the Hessian matrix of a lens with a distortion matrix with + convergence kappa, shear gamma1, gamma2, and rotation omega: .. math:: \\mathsf{H}' @@ -104,20 +107,17 @@ def left_multiply(f_xx, f_xy, f_yx, f_yy, :return: the Hessian left-multiplied by the distortion matrix """ - f__xx = (1 - kappa - gamma1) * f_xx + (- gamma2 + omega) * f_yx - f__xy = (1 - kappa - gamma1) * f_xy + (- gamma2 + omega) * f_yy - f__yx = - (gamma2 + omega) * f_xx + (1 - kappa + gamma1) * f_yx - f__yy = - (gamma2 + omega) * f_xy + (1 - kappa + gamma1) * f_yy + f__xx = (1 - kappa - gamma1) * f_xx + (-gamma2 + omega) * f_yx + f__xy = (1 - kappa - gamma1) * f_xy + (-gamma2 + omega) * f_yy + f__yx = -(gamma2 + omega) * f_xx + (1 - kappa + gamma1) * f_yx + f__yy = -(gamma2 + omega) * f_xy + (1 - kappa + gamma1) * f_yy return f__xx, f__xy, f__yx, f__yy @staticmethod - def right_multiply(f_xx, f_xy, f_yx, f_yy, - kappa=0, gamma1=0, gamma2=0, omega=0): - - """ - Right-multiplies the Hessian matrix of a lens with a distortion matrix - with convergence kappa and shear gamma1, gamma2: + def right_multiply(f_xx, f_xy, f_yx, f_yy, kappa=0, gamma1=0, gamma2=0, omega=0): + """Right-multiplies the Hessian matrix of a lens with a distortion matrix with + convergence kappa and shear gamma1, gamma2: .. math:: \\mathsf{H}' @@ -141,19 +141,19 @@ def right_multiply(f_xx, f_xy, f_yx, f_yy, """ f__xx = (1 - kappa - gamma1) * f_xx - (gamma2 + omega) * f_xy - f__xy = (- gamma2 + omega) * f_xx + (1 - kappa + gamma1) * f_xy + f__xy = (-gamma2 + omega) * f_xx + (1 - kappa + gamma1) * f_xy f__yx = (1 - kappa - gamma1) * f_yx - (gamma2 + omega) * f_yy - f__yy = (- gamma2 + omega) * f_yx + (1 - kappa + gamma1) * f_yy + f__yy = (-gamma2 + omega) * f_yx + (1 - kappa + gamma1) * f_yy return f__xx, f__xy, f__yx, f__yy - + def set_static(self, **kwargs): - """ - pre-computes certain computations that do only relate to the lens model parameters and not to the specific - position where to evaluate the lens model + """Pre-computes certain computations that do only relate to the lens model + parameters and not to the specific position where to evaluate the lens model. :param kwargs: lens model parameters - :return: no return, for certain lens model some private self variables are initiated + :return: no return, for certain lens model some private self variables are + initiated """ pass diff --git a/lenstronomy/LensModel/LineOfSight/LOSModels/los_minimal.py b/lenstronomy/LensModel/LineOfSight/LOSModels/los_minimal.py index 71af86f02..877deb30d 100644 --- a/lenstronomy/LensModel/LineOfSight/LOSModels/los_minimal.py +++ b/lenstronomy/LensModel/LineOfSight/LOSModels/los_minimal.py @@ -1,8 +1,8 @@ -__author__ = ['nataliehogg', 'pierrefleury'] +__author__ = ["nataliehogg", "pierrefleury"] from lenstronomy.LensModel.LineOfSight.LOSModels.los import LOS -__all__ = ['LOSMinimal'] +__all__ = ["LOSMinimal"] class LOSMinimal(LOS): @@ -15,7 +15,15 @@ class LOSMinimal(LOS): gamma2_los, omega_los. """ - param_names = ['kappa_od', 'gamma1_od', 'gamma2_od', 'omega_od', - 'kappa_los', 'gamma1_los', 'gamma2_los', 'omega_los'] + param_names = [ + "kappa_od", + "gamma1_od", + "gamma2_od", + "omega_od", + "kappa_los", + "gamma1_los", + "gamma2_los", + "omega_los", + ] lower_limit_default = {pert: -0.5 for pert in param_names} upper_limit_default = {pert: 0.5 for pert in param_names} diff --git a/lenstronomy/LensModel/LineOfSight/__init__.py b/lenstronomy/LensModel/LineOfSight/__init__.py index 1a4baf536..8b1378917 100644 --- a/lenstronomy/LensModel/LineOfSight/__init__.py +++ b/lenstronomy/LensModel/LineOfSight/__init__.py @@ -1 +1 @@ - + diff --git a/lenstronomy/LensModel/LineOfSight/single_plane_los.py b/lenstronomy/LensModel/LineOfSight/single_plane_los.py index 9847bb806..43ae6a439 100644 --- a/lenstronomy/LensModel/LineOfSight/single_plane_los.py +++ b/lenstronomy/LensModel/LineOfSight/single_plane_los.py @@ -1,16 +1,15 @@ -__author__ = ['nataliehogg', 'pierrefleury', 'danjohnson98'] +__author__ = ["nataliehogg", "pierrefleury", "danjohnson98"] from lenstronomy.LensModel.single_plane import SinglePlane import numpy as np import copy -__all__ = ['SinglePlaneLOS'] +__all__ = ["SinglePlaneLOS"] class SinglePlaneLOS(SinglePlane): - """ - This class is based on the 'SinglePlane' class, modified to include - line-of-sight effects as presented by Fleury et al. in 2104.08883. + """This class is based on the 'SinglePlane' class, modified to include line-of-sight + effects as presented by Fleury et al. in 2104.08883. Are modified: - init (to include a new attribute, self.los) @@ -24,13 +23,16 @@ class SinglePlaneLOS(SinglePlane): corrections. """ - def __init__(self, lens_model_list, index_los, - numerical_alpha_class=None, - lens_redshift_list=None, - z_source_convention=None, - kwargs_interp=None, - kwargs_synthesis=None - ): + def __init__( + self, + lens_model_list, + index_los, + numerical_alpha_class=None, + lens_redshift_list=None, + z_source_convention=None, + kwargs_interp=None, + kwargs_synthesis=None, + ): """ Instance of SinglePlaneLOS() based on the SinglePlane(), except: - argument "index_los" indicating the position of the LOS model in the @@ -47,74 +49,106 @@ def __init__(self, lens_model_list, index_los, # Extract the los model and import its class self._index_los = index_los self._los_model = lens_model_list[index_los] - self.los = self._import_class(self._los_model, custom_class=None, kwargs_interp=None, kwargs_synthesis=kwargs_synthesis) + self.los = self._import_class( + self._los_model, + custom_class=None, + kwargs_interp=None, + kwargs_synthesis=kwargs_synthesis, + ) # Define a separate class for the main lens lens_model_list_wo_los = [ - model for i, model in enumerate(lens_model_list) - if i != index_los] - self._main_lens = SinglePlane(lens_model_list_wo_los, - numerical_alpha_class=numerical_alpha_class, - lens_redshift_list=lens_redshift_list, - z_source_convention=z_source_convention, - kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + model for i, model in enumerate(lens_model_list) if i != index_los + ] + self._main_lens = SinglePlane( + lens_model_list_wo_los, + numerical_alpha_class=numerical_alpha_class, + lens_redshift_list=lens_redshift_list, + z_source_convention=z_source_convention, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) def split_lens_los(self, kwargs): - """ - This function splits the list of key-word arguments given to the lens - model into those that correspond to the lens itself (kwargs_main), and - those that correspond to the line-of-sight corrections (kwargs_los). + """This function splits the list of key-word arguments given to the lens model + into those that correspond to the lens itself (kwargs_main), and those that + correspond to the line-of-sight corrections (kwargs_los). :param kwargs: the list of key-word arguments passed to lenstronomy - :return: a list of kwargs corresponding to the lens and a list of kwargs corresponding to the LOS effects + :return: a list of kwargs corresponding to the lens and a list of kwargs + corresponding to the LOS effects """ kwargs_los = copy.deepcopy(kwargs[self._index_los]) # if 'LOS_MINIMAL' is at play, we set Gamma_os = Gamma_los # and Gamma_ds = Gamma_od - if self._los_model == 'LOS_MINIMAL': - kwargs_los['kappa_os'] = kwargs_los.pop('kappa_los') - kwargs_los['gamma1_os'] = kwargs_los.pop('gamma1_los') - kwargs_los['gamma2_os'] = kwargs_los.pop('gamma2_los') - kwargs_los['omega_os'] = kwargs_los.pop('omega_los') - kwargs_los['kappa_ds'] = kwargs_los['kappa_od'] - kwargs_los['gamma1_ds'] = kwargs_los['gamma1_od'] - kwargs_los['gamma2_ds'] = kwargs_los['gamma2_od'] - kwargs_los['omega_ds'] = kwargs_los['omega_od'] - - kwargs_main = [kwarg for i, kwarg in enumerate(kwargs) - if i != self._index_los] + if self._los_model == "LOS_MINIMAL": + kwargs_los["kappa_os"] = kwargs_los.pop("kappa_los") + kwargs_los["gamma1_os"] = kwargs_los.pop("gamma1_los") + kwargs_los["gamma2_os"] = kwargs_los.pop("gamma2_los") + kwargs_los["omega_os"] = kwargs_los.pop("omega_los") + kwargs_los["kappa_ds"] = kwargs_los["kappa_od"] + kwargs_los["gamma1_ds"] = kwargs_los["gamma1_od"] + kwargs_los["gamma2_ds"] = kwargs_los["gamma2_od"] + kwargs_los["omega_ds"] = kwargs_los["omega_od"] + + kwargs_main = [kwarg for i, kwarg in enumerate(kwargs) if i != self._index_los] return kwargs_main, kwargs_los - def fermat_potential(self, x_image, y_image, kwargs_lens, x_source=None, y_source=None, k=None): - """ - Calculates the Fermat Potential with LOS corrections in the tidal regime + def fermat_potential( + self, x_image, y_image, kwargs_lens, x_source=None, y_source=None, k=None + ): + """Calculates the Fermat Potential with LOS corrections in the tidal regime. :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position - :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs_lens: list of keyword arguments of lens model parameters matching + the lens model classes :return: fermat potential in arcsec**2 as a list """ kwargs_main, kwargs_los = self.split_lens_los(kwargs_lens) # the amplification matrices - A_od = np.array([[1-kwargs_los['kappa_od']-kwargs_los['gamma1_od'], - -kwargs_los['gamma2_od']+kwargs_los['omega_od']], - [-kwargs_los['gamma2_od']-kwargs_los['omega_od'], - 1-kwargs_los['kappa_od']+kwargs_los['gamma1_od']]]) - A_os = np.array([[1-kwargs_los['kappa_os']-kwargs_los['gamma1_os'], - -kwargs_los['gamma2_os']+kwargs_los['omega_os']], - [-kwargs_los['gamma2_os']-kwargs_los['omega_os'], - 1-kwargs_los['kappa_os']+kwargs_los['gamma1_os']]]) - A_ds = np.array([[1-kwargs_los['kappa_ds']-kwargs_los['gamma1_ds'], - -kwargs_los['gamma2_ds']+kwargs_los['omega_ds']], - [-kwargs_los['gamma2_ds']-kwargs_los['omega_ds'], - 1-kwargs_los['kappa_ds']+kwargs_los['gamma1_ds']]]) + A_od = np.array( + [ + [ + 1 - kwargs_los["kappa_od"] - kwargs_los["gamma1_od"], + -kwargs_los["gamma2_od"] + kwargs_los["omega_od"], + ], + [ + -kwargs_los["gamma2_od"] - kwargs_los["omega_od"], + 1 - kwargs_los["kappa_od"] + kwargs_los["gamma1_od"], + ], + ] + ) + A_os = np.array( + [ + [ + 1 - kwargs_los["kappa_os"] - kwargs_los["gamma1_os"], + -kwargs_los["gamma2_os"] + kwargs_los["omega_os"], + ], + [ + -kwargs_los["gamma2_os"] - kwargs_los["omega_os"], + 1 - kwargs_los["kappa_os"] + kwargs_los["gamma1_os"], + ], + ] + ) + A_ds = np.array( + [ + [ + 1 - kwargs_los["kappa_ds"] - kwargs_los["gamma1_ds"], + -kwargs_los["gamma2_ds"] + kwargs_los["omega_ds"], + ], + [ + -kwargs_los["gamma2_ds"] - kwargs_los["omega_ds"], + 1 - kwargs_los["kappa_ds"] + kwargs_los["gamma1_ds"], + ], + ] + ) # the inverse and transposed amplification matrices A_od_tsp = np.transpose(A_od) @@ -125,46 +159,50 @@ def fermat_potential(self, x_image, y_image, kwargs_lens, x_source=None, y_sourc A_LOS = np.dot(np.dot(A_od_tsp, A_ds_inv), A_os) # Angular position where the ray hits the deflector's plane - x_d, y_d = self.los.distort_vector(x_image, y_image, - kappa=kwargs_los['kappa_od'], - omega=kwargs_los['omega_od'], - gamma1=kwargs_los['gamma1_od'], - gamma2=kwargs_los['gamma2_od']) + x_d, y_d = self.los.distort_vector( + x_image, + y_image, + kappa=kwargs_los["kappa_od"], + omega=kwargs_los["omega_od"], + gamma1=kwargs_los["gamma1_od"], + gamma2=kwargs_los["gamma2_od"], + ) # Evaluating the potential of the main lens at this position - effective_potential = self._main_lens.potential(x_d, y_d, kwargs=kwargs_main, k=k) + effective_potential = self._main_lens.potential( + x_d, y_d, kwargs=kwargs_main, k=k + ) # obtaining the source position if x_source is None or y_source is None: x_source, y_source = self.ray_shooting(x_image, y_image, kwargs_lens, k=k) # the source position, modified by A_os_inv - b_x = A_os_inv[0][0]*x_source + A_os_inv[0][1]*y_source - b_y = A_os_inv[1][0]*x_source + A_os_inv[1][1]*y_source + b_x = A_os_inv[0][0] * x_source + A_os_inv[0][1] * y_source + b_y = A_os_inv[1][0] * x_source + A_os_inv[1][1] * y_source # alpha' f_x = x_image - b_x f_y = y_image - b_y # alpha' must then be further distorted by A_LOS - a_x = A_LOS[0][0]*f_x + A_LOS[0][1]*f_y - a_y = A_LOS[1][0]*f_x + A_LOS[1][1]*f_y + a_x = A_LOS[0][0] * f_x + A_LOS[0][1] * f_y + a_y = A_LOS[1][0] * f_x + A_LOS[1][1] * f_y # we can then obtain the geometrical term - geometry = (f_x*a_x + f_y*a_y) / 2 + geometry = (f_x * a_x + f_y * a_y) / 2 return geometry - effective_potential def alpha(self, x, y, kwargs, k=None): - """ - Displacement angle including the line-of-sight corrections + """Displacement angle including the line-of-sight corrections. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters - matching the lens model classes, including line-of-sight corrections + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes, including line-of-sight corrections :param k: only evaluate the k-th lens model :return: deflection angles in units of arcsec """ @@ -172,28 +210,37 @@ def alpha(self, x, y, kwargs, k=None): kwargs_main, kwargs_los = self.split_lens_los(kwargs) # Angular position where the ray hits the deflector's plane - x_d, y_d = self.los.distort_vector(x, y, - kappa=kwargs_los['kappa_od'], - omega=kwargs_los['omega_od'], - gamma1=kwargs_los['gamma1_od'], - gamma2=kwargs_los['gamma2_od']) + x_d, y_d = self.los.distort_vector( + x, + y, + kappa=kwargs_los["kappa_od"], + omega=kwargs_los["omega_od"], + gamma1=kwargs_los["gamma1_od"], + gamma2=kwargs_los["gamma2_od"], + ) # Displacement due to the main lens only f_x, f_y = self._main_lens.alpha(x_d, y_d, kwargs=kwargs_main, k=k) # Correction due to the background convergence, shear and rotation - f_x, f_y = self.los.distort_vector(f_x, f_y, - kappa=kwargs_los['kappa_ds'], - omega=kwargs_los['omega_ds'], - gamma1=kwargs_los['gamma1_ds'], - gamma2=kwargs_los['gamma2_ds']) + f_x, f_y = self.los.distort_vector( + f_x, + f_y, + kappa=kwargs_los["kappa_ds"], + omega=kwargs_los["omega_ds"], + gamma1=kwargs_los["gamma1_ds"], + gamma2=kwargs_los["gamma2_ds"], + ) # Perturbed position in the absence of the main lens - x_os, y_os = self.los.distort_vector(x, y, - kappa=kwargs_los['kappa_os'], - omega=kwargs_los['omega_os'], - gamma1=kwargs_los['gamma1_os'], - gamma2=kwargs_los['gamma2_os']) + x_os, y_os = self.los.distort_vector( + x, + y, + kappa=kwargs_los["kappa_os"], + omega=kwargs_los["omega_os"], + gamma1=kwargs_los["gamma1_os"], + gamma2=kwargs_los["gamma2_os"], + ) # Complete displacement f_x += x - x_os @@ -202,14 +249,14 @@ def alpha(self, x, y, kwargs, k=None): return f_x, f_y def hessian(self, x, y, kwargs, k=None): - """ - Hessian matrix + """Hessian matrix. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model :return: f_xx, f_xy, f_yx, f_yy components """ @@ -217,45 +264,58 @@ def hessian(self, x, y, kwargs, k=None): kwargs_main, kwargs_los = self.split_lens_los(kwargs) # Angular position where the ray hits the deflector's plane - x_d, y_d = self.los.distort_vector(x, y, - kappa=kwargs_los['kappa_od'], - omega=kwargs_los['omega_od'], - gamma1=kwargs_los['gamma1_od'], - gamma2=kwargs_los['gamma2_od']) + x_d, y_d = self.los.distort_vector( + x, + y, + kappa=kwargs_los["kappa_od"], + omega=kwargs_los["omega_od"], + gamma1=kwargs_los["gamma1_od"], + gamma2=kwargs_los["gamma2_od"], + ) # Hessian matrix of the main lens only - f_xx, f_xy, f_yx, f_yy = self._main_lens.hessian(x_d, y_d, kwargs=kwargs_main, k=k) + f_xx, f_xy, f_yx, f_yy = self._main_lens.hessian( + x_d, y_d, kwargs=kwargs_main, k=k + ) # Multiply on the left by (1 - Gamma_ds) f_xx, f_xy, f_yx, f_yy = self.los.left_multiply( - f_xx, f_xy, f_yx, f_yy, - kappa=kwargs_los['kappa_ds'], - omega=kwargs_los['omega_ds'], - gamma1=kwargs_los['gamma1_ds'], - gamma2=kwargs_los['gamma2_ds']) + f_xx, + f_xy, + f_yx, + f_yy, + kappa=kwargs_los["kappa_ds"], + omega=kwargs_los["omega_ds"], + gamma1=kwargs_los["gamma1_ds"], + gamma2=kwargs_los["gamma2_ds"], + ) # Multiply on the right by (1 - Gamma_od) f_xx, f_xy, f_yx, f_yy = self.los.right_multiply( - f_xx, f_xy, f_yx, f_yy, - kappa=kwargs_los['kappa_od'], - omega=kwargs_los['omega_od'], - gamma1=kwargs_los['gamma1_od'], - gamma2=kwargs_los['gamma2_od']) + f_xx, + f_xy, + f_yx, + f_yy, + kappa=kwargs_los["kappa_od"], + omega=kwargs_los["omega_od"], + gamma1=kwargs_los["gamma1_od"], + gamma2=kwargs_los["gamma2_od"], + ) # LOS contribution in the absence of the main lens - f_xx += kwargs_los['kappa_os'] + kwargs_los['gamma1_os'] - f_xy += kwargs_los['gamma2_os'] - kwargs_los['omega_os'] - f_yx += kwargs_los['gamma2_os'] + kwargs_los['omega_os'] - f_yy += kwargs_los['kappa_os'] - kwargs_los['gamma1_os'] + f_xx += kwargs_los["kappa_os"] + kwargs_los["gamma1_os"] + f_xy += kwargs_los["gamma2_os"] - kwargs_los["omega_os"] + f_yx += kwargs_los["gamma2_os"] + kwargs_los["omega_os"] + f_yy += kwargs_los["kappa_os"] - kwargs_los["gamma1_os"] return f_xx, f_xy, f_yx, f_yy def mass_3d(self, r, kwargs, bool_list=None): - """ - Computes the mass within a 3d sphere of radius r *for the main lens only* + """Computes the mass within a 3d sphere of radius r *for the main lens only* :param r: radius (in angular units) - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ @@ -268,8 +328,7 @@ def mass_3d(self, r, kwargs, bool_list=None): return mass_3d def mass_2d(self, r, kwargs, bool_list=None): - """ - Computes the mass enclosed a projected (2d) radius r *for the main lens only* + """Computes the mass enclosed a projected (2d) radius r *for the main lens only* The mass definition is such that: @@ -292,12 +351,12 @@ def mass_2d(self, r, kwargs, bool_list=None): return mass_2d def density(self, r, kwargs, bool_list=None): - """ - 3d mass density at radius r *for the main lens only* - The integral in the LOS projection of this quantity results in the convergence quantity. + """3d mass density at radius r *for the main lens only* The integral in the LOS + projection of this quantity results in the convergence quantity. :param r: radius (in angular units) - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param bool_list: list of bools that are part of the output :return: mass density at radius r (in angular units, modulo epsilon_crit) """ @@ -310,23 +369,25 @@ def density(self, r, kwargs, bool_list=None): return density def potential(self, x, y, kwargs, k=None): - """ - Lensing potential *of the main lens only* - In the presence of LOS corrections, the system generally does not admit - a potential, in the sense that the curl of alpha is generally non-zero + """Lensing potential *of the main lens only* In the presence of LOS corrections, + the system generally does not admit a potential, in the sense that the curl of + alpha is generally non-zero. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model :return: lensing potential in units of arcsec^2 """ - print("Note: The computation of the potential ignores the LOS corrections.\ + print( + "Note: The computation of the potential ignores the LOS corrections.\ In the presence of LOS corrections, a lensing system does not always\ - derive from a potential.") + derive from a potential." + ) # kwargs_main, kwargs_los = self.split_lens_los(kwargs) potential = self._main_lens.potential(x, y, kwargs, k=k) diff --git a/lenstronomy/LensModel/MultiPlane/multi_plane.py b/lenstronomy/LensModel/MultiPlane/multi_plane.py index b64bddd21..db5266bf0 100644 --- a/lenstronomy/LensModel/MultiPlane/multi_plane.py +++ b/lenstronomy/LensModel/MultiPlane/multi_plane.py @@ -3,21 +3,35 @@ from lenstronomy.LensModel.MultiPlane.multi_plane_base import MultiPlaneBase from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class MultiPlane(object): - """ - Multi-plane lensing class with option to assign positions of a selected set of lens models in the observed plane. + """Multi-plane lensing class with option to assign positions of a selected set of + lens models in the observed plane. - The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the - source redshift of the class instance. + The lens model deflection angles are in units of reduced deflections from the + specified redshift of the lens to the source redshift of the class instance. """ - def __init__(self, z_source, lens_model_list, lens_redshift_list, cosmo=None, numerical_alpha_class=None, - observed_convention_index=None, ignore_observed_positions=False, z_source_convention=None, - cosmo_interp=False, z_interp_stop=None, num_z_interp=100, kwargs_interp=None, kwargs_synthesis=None): + def __init__( + self, + z_source, + lens_model_list, + lens_redshift_list, + cosmo=None, + numerical_alpha_class=None, + observed_convention_index=None, + ignore_observed_positions=False, + z_source_convention=None, + cosmo_interp=False, + z_interp_stop=None, + num_z_interp=100, + kwargs_interp=None, + kwargs_synthesis=None, + ): """ :param z_source: source redshift for default computation of reduced lensing quantities @@ -43,14 +57,23 @@ def __init__(self, z_source, lens_model_list, lens_redshift_list, cosmo=None, nu if z_interp_stop is None: z_interp_stop = max(z_source, z_source_convention) if z_interp_stop < max(z_source, z_source_convention): - raise ValueError('z_interp_stop= %s needs to be larger or equal the maximum of z_source=%s and ' - 'z_source_convention=%s' % (z_interp_stop, z_source, z_source_convention)) - self._multi_plane_base = MultiPlaneBase(lens_model_list=lens_model_list, - lens_redshift_list=lens_redshift_list, cosmo=cosmo, - numerical_alpha_class=numerical_alpha_class, - z_source_convention=z_source_convention, cosmo_interp=cosmo_interp, - z_interp_stop=z_interp_stop, num_z_interp=num_z_interp, - kwargs_interp=kwargs_interp, kwargs_synthesis=kwargs_synthesis) + raise ValueError( + "z_interp_stop= %s needs to be larger or equal the maximum of z_source=%s and " + "z_source_convention=%s" + % (z_interp_stop, z_source, z_source_convention) + ) + self._multi_plane_base = MultiPlaneBase( + lens_model_list=lens_model_list, + lens_redshift_list=lens_redshift_list, + cosmo=cosmo, + numerical_alpha_class=numerical_alpha_class, + z_source_convention=z_source_convention, + cosmo_interp=cosmo_interp, + z_interp_stop=z_interp_stop, + num_z_interp=num_z_interp, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) self._set_source_distances(z_source) self._observed_convention_index = observed_convention_index @@ -58,13 +81,14 @@ def __init__(self, z_source, lens_model_list, lens_redshift_list, cosmo=None, nu self._convention = PhysicalLocation() else: assert isinstance(observed_convention_index, list) - self._convention = LensedLocation(self._multi_plane_base, observed_convention_index) + self._convention = LensedLocation( + self._multi_plane_base, observed_convention_index + ) self.ignore_observed_positions = ignore_observed_positions def update_source_redshift(self, z_source): - """ - update instance of this class to compute reduced lensing quantities and time delays to a specific source - redshift + """Update instance of this class to compute reduced lensing quantities and time + delays to a specific source redshift. :param z_source: float; source redshift :return: self variables update to new redshift @@ -75,16 +99,19 @@ def update_source_redshift(self, z_source): self._set_source_distances(z_source) def _set_source_distances(self, z_source): - """ - compute the relevant angular diameter distances to a specific source redshift + """Compute the relevant angular diameter distances to a specific source + redshift. :param z_source: float, source redshift :return: self variables """ self._z_source = z_source - self._T_ij_start, self._T_ij_stop = self._multi_plane_base.transverse_distance_start_stop(z_start=0, - z_stop=z_source, - include_z_start=False) + ( + self._T_ij_start, + self._T_ij_stop, + ) = self._multi_plane_base.transverse_distance_start_stop( + z_start=0, z_stop=z_source, include_z_start=False + ) self._T_z_source = self._multi_plane_base._cosmo_bkg.T_xy(0, z_source) def observed2flat_convention(self, kwargs_lens): @@ -95,16 +122,18 @@ def observed2flat_convention(self, kwargs_lens): """ return self._convention(kwargs_lens) - def ray_shooting(self, theta_x, theta_y, kwargs_lens, check_convention=True, k=None): - """ - ray-tracing (backwards light cone) to the default z_source redshift + def ray_shooting( + self, theta_x, theta_y, kwargs_lens, check_convention=True, k=None + ): + """Ray-tracing (backwards light cone) to the default z_source redshift. - :param theta_x: angle in x-direction on the image - (usually arc seconds, in the same convention as lensing deflection angles) - :param theta_y: angle in y-direction on the image - (usually arc seconds, in the same convention as lensing deflection angles) + :param theta_x: angle in x-direction on the image (usually arc seconds, in the + same convention as lensing deflection angles) + :param theta_y: angle in y-direction on the image (usually arc seconds, in the + same convention as lensing deflection angles) :param kwargs_lens: lens model keyword argument list - :param check_convention: flag to check the image position convention (leave this alone) + :param check_convention: flag to check the image position convention (leave this + alone) :return: angles in the source plane """ self._check_raise(k=k) @@ -114,19 +143,38 @@ def ray_shooting(self, theta_x, theta_y, kwargs_lens, check_convention=True, k=N y = np.zeros_like(theta_y, dtype=float) alpha_x = np.array(theta_x) alpha_y = np.array(theta_y) - x, y, _, _ = self._multi_plane_base.ray_shooting_partial(x, y, alpha_x, alpha_y, z_start=0, - z_stop=self._z_source, - kwargs_lens=kwargs_lens, T_ij_start=self._T_ij_start, - T_ij_end=self._T_ij_stop) + x, y, _, _ = self._multi_plane_base.ray_shooting_partial( + x, + y, + alpha_x, + alpha_y, + z_start=0, + z_stop=self._z_source, + kwargs_lens=kwargs_lens, + T_ij_start=self._T_ij_start, + T_ij_end=self._T_ij_stop, + ) beta_x, beta_y = self.co_moving2angle_source(x, y) return beta_x, beta_y - def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, include_z_start=False, - check_convention=True, T_ij_start=None, T_ij_end=None): - """ - ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y) at - redshift z_start and then backwards to redshift z_stop + def ray_shooting_partial( + self, + x, + y, + alpha_x, + alpha_y, + z_start, + z_stop, + kwargs_lens, + include_z_start=False, + check_convention=True, + T_ij_start=None, + T_ij_end=None, + ): + """Ray-tracing through parts of the coin, starting with (x,y) co-moving + distances and angles (alpha_x, alpha_y) at redshift z_start and then backwards + to redshift z_stop. :param x: co-moving position [Mpc] / angle definition :param y: co-moving position [Mpc] / angle definition @@ -135,106 +183,147 @@ def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_l :param z_start: redshift of start of computation :param z_stop: redshift where output is computed :param kwargs_lens: lens model keyword argument list - :param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as - the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed! - This can lead to duplications in the computation of deflection angles. - :param check_convention: flag to check the image position convention (leave this alone) - :param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow. - If not set, will compute the distance each time this function gets executed. - :param T_ij_end: transverse angular distance between the last lens plane being computed and z_end. If not set, - will compute the distance each time this function gets executed. - :return: co-moving position (modulo angle definition) and angles at redshift z_stop + :param include_z_start: bool, if True, includes the computation of the + deflection angle at the same redshift as the start of the ray-tracing. + ATTENTION: deflection angles at the same redshift as z_stop will be + computed! This can lead to duplications in the computation of deflection + angles. + :param check_convention: flag to check the image position convention (leave this + alone) + :param T_ij_start: transverse angular distance between the starting redshift to + the first lens plane to follow. If not set, will compute the distance each + time this function gets executed. + :param T_ij_end: transverse angular distance between the last lens plane being + computed and z_end. If not set, will compute the distance each time this + function gets executed. + :return: co-moving position (modulo angle definition) and angles at redshift + z_stop """ if check_convention and not self.ignore_observed_positions: kwargs_lens = self._convention(kwargs_lens) - return self._multi_plane_base.ray_shooting_partial(x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, - include_z_start=include_z_start, T_ij_start=T_ij_start, - T_ij_end=T_ij_end) + return self._multi_plane_base.ray_shooting_partial( + x, + y, + alpha_x, + alpha_y, + z_start, + z_stop, + kwargs_lens, + include_z_start=include_z_start, + T_ij_start=T_ij_start, + T_ij_end=T_ij_end, + ) def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False): - """ - computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and - the first deflector afterwards and the last deflector before the end of the ray-tracing. + """Computes the transverse distance (T_ij) that is required by the ray-tracing + between the starting redshift and the first deflector afterwards and the last + deflector before the end of the ray-tracing. :param z_start: redshift of the start of the ray-tracing :param z_stop: stop of ray-tracing :param include_z_start: bool, i :return: T_ij_start, T_ij_end """ - return self._multi_plane_base.transverse_distance_start_stop(z_start, z_stop, include_z_start) + return self._multi_plane_base.transverse_distance_start_stop( + z_start, z_stop, include_z_start + ) def arrival_time(self, theta_x, theta_y, kwargs_lens, check_convention=True): - """ - light travel time relative to a straight path through the coordinate (0,0) - Negative sign means earlier arrival time + """Light travel time relative to a straight path through the coordinate (0,0) + Negative sign means earlier arrival time. :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: lens model keyword argument list :return: travel time in unit of days """ - dt_geo, dt_grav = self.geo_shapiro_delay(theta_x, theta_y, kwargs_lens, check_convention=check_convention) + dt_geo, dt_grav = self.geo_shapiro_delay( + theta_x, theta_y, kwargs_lens, check_convention=check_convention + ) return dt_geo + dt_grav def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, check_convention=True): - """ - geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0) - Negative sign means earlier arrival time + """Geometric and Shapiro (gravitational) light travel time relative to a + straight path through the coordinate (0,0) Negative sign means earlier arrival + time. :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: lens model keyword argument list - :param check_convention: boolean, if True goes through the lens model list and checks whether the positional - conventions are satisfied. + :param check_convention: boolean, if True goes through the lens model list and + checks whether the positional conventions are satisfied. :return: geometric delay, gravitational delay [days] """ if check_convention and not self.ignore_observed_positions: kwargs_lens = self._convention(kwargs_lens) - return self._multi_plane_base.geo_shapiro_delay(theta_x, theta_y, kwargs_lens, z_stop=self._z_source, - T_z_stop=self._T_z_source, T_ij_end=self._T_ij_stop) + return self._multi_plane_base.geo_shapiro_delay( + theta_x, + theta_y, + kwargs_lens, + z_stop=self._z_source, + T_z_stop=self._T_z_source, + T_ij_end=self._T_ij_stop, + ) def alpha(self, theta_x, theta_y, kwargs_lens, check_convention=True, k=None): - """ - reduced deflection angle + """Reduced deflection angle. :param theta_x: angle in x-direction :param theta_y: angle in y-direction :param kwargs_lens: lens model kwargs - :param check_convention: flag to check the image position convention (leave this alone) + :param check_convention: flag to check the image position convention (leave this + alone) :return: deflection angles in x and y directions """ self._check_raise(k=k) - beta_x, beta_y = self.ray_shooting(theta_x, theta_y, kwargs_lens, check_convention=check_convention) + beta_x, beta_y = self.ray_shooting( + theta_x, theta_y, kwargs_lens, check_convention=check_convention + ) alpha_x = theta_x - beta_x alpha_y = theta_y - beta_y return alpha_x, alpha_y - def hessian(self, theta_x, theta_y, kwargs_lens, k=None, diff=0.00000001, check_convention=True): - """ - computes the hessian components f_xx, f_yy, f_xy from f_x and f_y with numerical differentiation + def hessian( + self, + theta_x, + theta_y, + kwargs_lens, + k=None, + diff=0.00000001, + check_convention=True, + ): + """Computes the hessian components f_xx, f_yy, f_xy from f_x and f_y with + numerical differentiation. :param theta_x: x-position (preferentially arcsec) :type theta_x: numpy array :param theta_y: y-position (preferentially arcsec) :type theta_y: numpy array - :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs_lens: list of keyword arguments of lens model parameters matching + the lens model classes :param diff: numerical differential step (float) - :param check_convention: boolean, if True goes through the lens model list and checks whether the positional - conventions are satisfied. + :param check_convention: boolean, if True goes through the lens model list and + checks whether the positional conventions are satisfied. :return: f_xx, f_xy, f_yx, f_yy """ self._check_raise(k=k) if check_convention and not self.ignore_observed_positions: kwargs_lens = self._convention(kwargs_lens) - alpha_ra, alpha_dec = self.alpha(theta_x, theta_y, kwargs_lens, check_convention=False) + alpha_ra, alpha_dec = self.alpha( + theta_x, theta_y, kwargs_lens, check_convention=False + ) - alpha_ra_dx, alpha_dec_dx = self.alpha(theta_x + diff, theta_y, kwargs_lens, check_convention=False) - alpha_ra_dy, alpha_dec_dy = self.alpha(theta_x, theta_y + diff, kwargs_lens, check_convention=False) + alpha_ra_dx, alpha_dec_dx = self.alpha( + theta_x + diff, theta_y, kwargs_lens, check_convention=False + ) + alpha_ra_dy, alpha_dec_dy = self.alpha( + theta_x, theta_y + diff, kwargs_lens, check_convention=False + ) dalpha_rara = (alpha_ra_dx - alpha_ra) / diff dalpha_radec = (alpha_ra_dy - alpha_ra) / diff @@ -248,8 +337,7 @@ def hessian(self, theta_x, theta_y, kwargs_lens, k=None, diff=0.00000001, check_ return f_xx, f_xy, f_yx, f_yy def co_moving2angle_source(self, x, y): - """ - special case of the co_moving2angle definition at the source redshift + """Special case of the co_moving2angle definition at the source redshift. :param x: co-moving distance :param y: co-moving distance @@ -281,23 +369,24 @@ def set_dynamic(self): @staticmethod def _check_raise(k=None): - """ - checks whether no option to select a specific subset of deflector models is selected, as this feature is not - yet supported in multi-plane + """Checks whether no option to select a specific subset of deflector models is + selected, as this feature is not yet supported in multi-plane. - :param k: parameter that optionally indicates a sub-set of lens models being executed for single plane + :param k: parameter that optionally indicates a sub-set of lens models being + executed for single plane :return: None, optional raise """ if k is not None: - raise ValueError('no specific selection of a subset of lens models supported in multi-plane mode. Please' - 'use single plane mode or generate new instance of LensModel of the subset of profiles.') + raise ValueError( + "no specific selection of a subset of lens models supported in multi-plane mode. Please" + "use single plane mode or generate new instance of LensModel of the subset of profiles." + ) @export class PhysicalLocation(object): - """ - center_x and center_y kwargs correspond to angular location of deflectors without lensing along the LOS - """ + """center_x and center_y kwargs correspond to angular location of deflectors without + lensing along the LOS.""" def __call__(self, kwargs_lens): return kwargs_lens @@ -305,11 +394,9 @@ def __call__(self, kwargs_lens): @export class LensedLocation(object): - """ - center_x and center_y kwargs correspond to observed (lensed) locations of deflectors - given a model for the line of sight structure, compute the angular position of the deflector without lensing - contribution along the LOS - """ + """center_x and center_y kwargs correspond to observed (lensed) locations of + deflectors given a model for the line of sight structure, compute the angular + position of the deflector without lensing contribution along the LOS.""" def __init__(self, multiplane_instance, observed_convention_index): """ @@ -334,17 +421,25 @@ def __init__(self, multiplane_instance, observed_convention_index): self._inds = inds[sort] def __call__(self, kwargs_lens): - new_kwargs = deepcopy(kwargs_lens) for ind in self._inds: - theta_x = kwargs_lens[ind]['center_x'] - theta_y = kwargs_lens[ind]['center_y'] + theta_x = kwargs_lens[ind]["center_x"] + theta_y = kwargs_lens[ind]["center_y"] zstop = self._multiplane._lens_redshift_list[ind] - x, y, _, _ = self._multiplane.ray_shooting_partial(0, 0, theta_x, theta_y, 0, zstop, new_kwargs, - T_ij_start=None, T_ij_end=None) + x, y, _, _ = self._multiplane.ray_shooting_partial( + 0, + 0, + theta_x, + theta_y, + 0, + zstop, + new_kwargs, + T_ij_start=None, + T_ij_end=None, + ) T = self._multiplane._T_z_list[ind] - new_kwargs[ind]['center_x'] = x / T - new_kwargs[ind]['center_y'] = y / T + new_kwargs[ind]["center_x"] = x / T + new_kwargs[ind]["center_y"] = y / T return new_kwargs diff --git a/lenstronomy/LensModel/MultiPlane/multi_plane_base.py b/lenstronomy/LensModel/MultiPlane/multi_plane_base.py index 92a312506..2f31cc8be 100644 --- a/lenstronomy/LensModel/MultiPlane/multi_plane_base.py +++ b/lenstronomy/LensModel/MultiPlane/multi_plane_base.py @@ -3,21 +3,29 @@ from lenstronomy.LensModel.profile_list_base import ProfileListBase import lenstronomy.Util.constants as const -__all__ = ['MultiPlaneBase'] +__all__ = ["MultiPlaneBase"] class MultiPlaneBase(ProfileListBase): + """Multi-plane lensing class. + The lens model deflection angles are in units of reduced deflections from the + specified redshift of the lens to the source redshift of the class instance. """ - Multi-plane lensing class - The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the - source redshift of the class instance. - """ - - def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None, - numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100, - kwargs_interp=None, kwargs_synthesis=None): + def __init__( + self, + lens_model_list, + lens_redshift_list, + z_source_convention, + cosmo=None, + numerical_alpha_class=None, + cosmo_interp=False, + z_interp_stop=None, + num_z_interp=100, + kwargs_interp=None, + kwargs_synthesis=None, + ): """ A description of the recursive multi-plane formalism can be found e.g. here: https://arxiv.org/abs/1312.1536 @@ -34,22 +42,32 @@ def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cos """ if z_interp_stop is None: z_interp_stop = z_source_convention - self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp) + self._cosmo_bkg = Background( + cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp + ) self._z_source_convention = z_source_convention if len(lens_redshift_list) > 0: z_lens_max = np.max(lens_redshift_list) if z_lens_max >= z_source_convention: - raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the ' - 'reduced lens model quantities not allowed (leads to negative reduced deflection ' - 'angles!' % (z_lens_max, z_source_convention)) + raise ValueError( + "deflector redshifts higher or equal the source redshift convention (%s >= %s for the " + "reduced lens model quantities not allowed (leads to negative reduced deflection " + "angles!" % (z_lens_max, z_source_convention) + ) if not len(lens_model_list) == len(lens_redshift_list): - raise ValueError("The length of lens_model_list does not correspond to redshift_list") + raise ValueError( + "The length of lens_model_list does not correspond to redshift_list" + ) self._lens_redshift_list = lens_redshift_list - super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class, - lens_redshift_list=lens_redshift_list, - z_source_convention=z_source_convention, kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + super(MultiPlaneBase, self).__init__( + lens_model_list, + numerical_alpha_class=numerical_alpha_class, + lens_redshift_list=lens_redshift_list, + z_source_convention=z_source_convention, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) if len(lens_model_list) < 1: self._sorted_redshift_index = [] @@ -64,8 +82,10 @@ def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cos self._reduced2physical_factor = [] else: z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index] - z_source_array = np.ones(z_sort.shape)*z_source_convention - self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array) + z_source_array = np.ones(z_sort.shape) * z_source_convention + self._reduced2physical_factor = self._cosmo_bkg.d_xy( + 0, z_source_convention + ) / self._cosmo_bkg.d_xy(z_sort, z_source_array) for idex in self._sorted_redshift_index: z_lens = self._lens_redshift_list[idex] if z_before == z_lens: @@ -77,11 +97,22 @@ def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cos self._T_z_list.append(T_z) z_before = z_lens - def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, - include_z_start=False, T_ij_start=None, T_ij_end=None): - """ - ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y) - at redshift z_start and then backwards to redshift z_stop + def ray_shooting_partial( + self, + x, + y, + alpha_x, + alpha_y, + z_start, + z_stop, + kwargs_lens, + include_z_start=False, + T_ij_start=None, + T_ij_end=None, + ): + """Ray-tracing through parts of the coin, starting with (x,y) co-moving + distances and angles (alpha_x, alpha_y) at redshift z_start and then backwards + to redshift z_stop. :param x: co-moving position [Mpc] :param y: co-moving position [Mpc] @@ -90,14 +121,17 @@ def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_l :param z_start: redshift of start of computation :param z_stop: redshift where output is computed :param kwargs_lens: lens model keyword argument list - :param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as - the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed - always! - This can lead to duplications in the computation of deflection angles. - :param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow. - If not set, will compute the distance each time this function gets executed. - :param T_ij_end: transverse angular distance between the last lens plane being computed and z_end. - If not set, will compute the distance each time this function gets executed. + :param include_z_start: bool, if True, includes the computation of the + deflection angle at the same redshift as the start of the ray-tracing. + ATTENTION: deflection angles at the same redshift as z_stop will be computed + always! This can lead to duplications in the computation of deflection + angles. + :param T_ij_start: transverse angular distance between the starting redshift to + the first lens plane to follow. If not set, will compute the distance each + time this function gets executed. + :param T_ij_end: transverse angular distance between the last lens plane being + computed and z_end. If not set, will compute the distance each time this + function gets executed. :return: co-moving position and angles at redshift z_stop """ x = np.array(x, dtype=float) @@ -111,7 +145,10 @@ def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_l for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] - if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: + if ( + self._start_condition(include_z_start, z_lens, z_start) + and z_lens <= z_stop + ): if first_deflector is True: if T_ij_start is None: if z_start == 0: @@ -124,7 +161,9 @@ def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_l else: delta_T = self._T_ij_list[i] x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T) - alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) + alpha_x, alpha_y = self._add_deflection( + x, y, alpha_x, alpha_y, kwargs_lens, i + ) z_lens_last = z_lens if T_ij_end is None: if z_lens_last == z_stop: @@ -137,14 +176,14 @@ def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_l return x, y, alpha_x, alpha_y def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False): - """ - computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and - the first deflector afterwards and the last deflector before the end of the ray-tracing. + """Computes the transverse distance (T_ij) that is required by the ray-tracing + between the starting redshift and the first deflector afterwards and the last + deflector before the end of the ray-tracing. :param z_start: redshift of the start of the ray-tracing :param z_stop: stop of ray-tracing - :param include_z_start: boolean, if True includes the computation of the starting position if the first - deflector is at z_start + :param include_z_start: boolean, if True includes the computation of the + starting position if the first deflector is at z_start :return: T_ij_start, T_ij_end """ z_lens_last = z_start @@ -152,7 +191,10 @@ def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False) T_ij_start = None for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._lens_redshift_list[idex] - if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: + if ( + self._start_condition(include_z_start, z_lens, z_start) + and z_lens <= z_stop + ): if first_deflector is True: T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens) first_deflector = False @@ -160,17 +202,20 @@ def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False) T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop) return T_ij_start, T_ij_end - def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None): - """ - geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0) - Negative sign means earlier arrival time + def geo_shapiro_delay( + self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None + ): + """Geometric and Shapiro (gravitational) light travel time relative to a + straight path through the coordinate (0,0) Negative sign means earlier arrival + time. :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: lens model keyword argument list :param z_stop: redshift of the source to stop the backwards ray-tracing :param T_z_stop: optional, transversal angular distance from z=0 to z_stop - :param T_ij_end: optional, transversal angular distance between the last lensing plane and the source plane + :param T_ij_end: optional, transversal angular distance between the last lensing + plane and the source plane :return: dt_geo, dt_shapiro, [days] """ dt_grav = np.zeros_like(theta_x, dtype=float) @@ -193,11 +238,15 @@ def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None T_i = self._T_z_list[i - 1] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j - dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) + dt_geo_new = self._geometrical_delay( + beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij + ) dt_geo += dt_geo_new x, y = x_new, y_new dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens) - alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) + alpha_x, alpha_y = self._add_deflection( + x, y, alpha_x, alpha_y, kwargs_lens, i + ) dt_grav += dt_grav_new z_lens_last = z_lens @@ -211,7 +260,9 @@ def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None T_i = self._T_z_list[i] beta_i_x, beta_i_y = x / T_i, y / T_i beta_j_x, beta_j_y = x_new / T_j, y_new / T_j - dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij) + dt_geo_new = self._geometrical_delay( + beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij + ) dt_geo += dt_geo_new return dt_geo, dt_grav @@ -230,8 +281,7 @@ def _index_ordering(redshift_list): return sort_index def _reduced2physical_deflection(self, alpha_reduced, index_lens): - """ - alpha_reduced = D_ds/Ds alpha_physical + """alpha_reduced = D_ds/Ds alpha_physical. :param alpha_reduced: reduced deflection angle :param index_lens: integer, index of the deflector plane @@ -253,7 +303,9 @@ def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens): theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k]) - delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention) + delay_days = self._lensing_potential2time_delay( + potential, z_lens, z_source=self._z_source_convention + ) return -delay_days @staticmethod @@ -271,12 +323,14 @@ def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij): """ d_beta_x = beta_j_x - beta_i_x d_beta_y = beta_j_y - beta_i_y - tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2 - return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2 + tau_ij = ( + T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2 + ) + return tau_ij * (d_beta_x**2 + d_beta_y**2) / 2 def _lensing_potential2time_delay(self, potential, z_lens, z_source): - """ - transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0 + """Transforms the lensing potential (in units arcsec^2) to a gravitational time- + delay as measured at z=0. :param potential: lensing potential :param z_lens: redshift of the deflector @@ -288,8 +342,7 @@ def _lensing_potential2time_delay(self, potential, z_lens, z_source): return delay_days def _co_moving2angle(self, x, y, index): - """ - transforms co-moving distances Mpc into angles on the sky (radian) + """Transforms co-moving distances Mpc into angles on the sky (radian) :param x: co-moving distance :param y: co-moving distance @@ -303,10 +356,9 @@ def _co_moving2angle(self, x, y, index): @staticmethod def _ray_step(x, y, alpha_x, alpha_y, delta_T): - """ - ray propagation with small angle approximation - The difference to _ray_step_add() is that the previous input position (x, y) do NOT get overwritten and are - still accessible. + """Ray propagation with small angle approximation The difference to + _ray_step_add() is that the previous input position (x, y) do NOT get + overwritten and are still accessible. :param x: co-moving x-position :param y: co-moving y-position @@ -321,9 +373,8 @@ def _ray_step(x, y, alpha_x, alpha_y, delta_T): @staticmethod def _ray_step_add(x, y, alpha_x, alpha_y, delta_T): - """ - ray propagation with small angle approximation - The difference to _ray_step() is that the previous input position (x, y) do get overwritten, which is faster. + """Ray propagation with small angle approximation The difference to _ray_step() + is that the previous input position (x, y) do get overwritten, which is faster. :param x: co-moving x-position :param y: co-moving y-position @@ -337,20 +388,24 @@ def _ray_step_add(x, y, alpha_x, alpha_y, delta_T): return x, y def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index): - """ - adds the physical deflection angle of a single lens plane to the deflection field + """Adds the physical deflection angle of a single lens plane to the deflection + field. :param x: co-moving distance at the deflector plane :param y: co-moving distance at the deflector plane :param alpha_x: physical angle (radian) before the deflector plane :param alpha_y: physical angle (radian) before the deflector plane :param kwargs_lens: lens model parameter kwargs - :param index: index of the lens model to be added in sorted redshift list convention - :return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective) + :param index: index of the lens model to be added in sorted redshift list + convention + :return: updated physical deflection after deflector plane (in a backwards ray- + tracing perspective) """ theta_x, theta_y = self._co_moving2angle(x, y, index) k = self._sorted_redshift_index[index] - alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k]) + alpha_x_red, alpha_y_red = self.func_list[k].derivatives( + theta_x, theta_y, **kwargs_lens[k] + ) alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index) alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index) return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys diff --git a/lenstronomy/LensModel/Profiles/__init__.py b/lenstronomy/LensModel/Profiles/__init__.py index 145b8edf3..d34951c17 100644 --- a/lenstronomy/LensModel/Profiles/__init__.py +++ b/lenstronomy/LensModel/Profiles/__init__.py @@ -1,4 +1,4 @@ -__author__ = 'Simon Birrer' -__email__ = 'sibirrer@gmail.com' -__version__ = '0.1.0' -__credits__ = 'ETH Zurich, UCLA' +__author__ = "Simon Birrer" +__email__ = "sibirrer@gmail.com" +__version__ = "0.1.0" +__credits__ = "ETH Zurich, UCLA" diff --git a/lenstronomy/LensModel/Profiles/arc_perturbations.py b/lenstronomy/LensModel/Profiles/arc_perturbations.py index c36e6a3fb..84918c760 100644 --- a/lenstronomy/LensModel/Profiles/arc_perturbations.py +++ b/lenstronomy/LensModel/Profiles/arc_perturbations.py @@ -3,13 +3,13 @@ from lenstronomy.Util import derivative_util import numpy as np -__all__ = ['ArcPerturbations'] +__all__ = ["ArcPerturbations"] class ArcPerturbations(LensProfileBase): - """ - uses radial and tangential fourier modes within a specific range in both directions to perturb a lensing potential - """ + """Uses radial and tangential fourier modes within a specific range in both + directions to perturb a lensing potential.""" + def __init__(self): super(ArcPerturbations, self).__init__() self._2_pi = np.pi * 2 @@ -91,9 +91,28 @@ def hessian(self, x, y, coeff, d_r, d_phi, center_x, center_y): d_theta_dyy = derivative_util.d_phi_dyy(x_, y_) d_theta_dxy = derivative_util.d_phi_dxy(x_, y_) - f_xx = d_phi_dr2 * dr_dx**2 + d_phi_dr * dr_dxx + d_phi_d_theta2 * d_theta_dx**2 + d_phi_d_theta * d_theta_dxx + 2 * d_phi_dr_dtheta * dr_dx * d_theta_dx - f_yy = d_phi_dr2 * dr_dy**2 + d_phi_dr * dr_dyy + d_phi_d_theta2 * d_theta_dy**2 + d_phi_d_theta * d_theta_dyy + 2 * d_phi_dr_dtheta * dr_dy * d_theta_dy - f_xy = d_phi_dr2 * dr_dx * dr_dy + d_phi_dr * dr_dxy + d_phi_d_theta2 * d_theta_dx * d_theta_dy + d_phi_d_theta * d_theta_dxy + d_phi_dr_dtheta * dr_dx * d_theta_dy + d_phi_dr_dtheta * dr_dy * d_theta_dx + f_xx = ( + d_phi_dr2 * dr_dx**2 + + d_phi_dr * dr_dxx + + d_phi_d_theta2 * d_theta_dx**2 + + d_phi_d_theta * d_theta_dxx + + 2 * d_phi_dr_dtheta * dr_dx * d_theta_dx + ) + f_yy = ( + d_phi_dr2 * dr_dy**2 + + d_phi_dr * dr_dyy + + d_phi_d_theta2 * d_theta_dy**2 + + d_phi_d_theta * d_theta_dyy + + 2 * d_phi_dr_dtheta * dr_dy * d_theta_dy + ) + f_xy = ( + d_phi_dr2 * dr_dx * dr_dy + + d_phi_dr * dr_dxy + + d_phi_d_theta2 * d_theta_dx * d_theta_dy + + d_phi_d_theta * d_theta_dxy + + d_phi_dr_dtheta * dr_dx * d_theta_dy + + d_phi_dr_dtheta * dr_dy * d_theta_dx + ) return f_xx * coeff, f_xy * coeff, f_xy * coeff, f_yy * coeff @staticmethod @@ -104,12 +123,11 @@ def _phi_r(r, d_r): :param d_r: period of radial sinusoidal in units of angle :return: radial component of the potential """ - return np.cos(r/d_r) + return np.cos(r / d_r) @staticmethod def _d_phi_r(r, d_r): - """ - radial derivatives + """Radial derivatives. :param r: numpy array, radius :param d_r: period of radial sinusoidal in units of angle @@ -119,8 +137,7 @@ def _d_phi_r(r, d_r): @staticmethod def _d_phi_r2(r, d_r): - """ - radial second derivatives + """Radial second derivatives. :param r: numpy array, radius :param d_r: period of radial sinusoidal in units of angle @@ -140,8 +157,7 @@ def _phi_theta(theta, d_theta): @staticmethod def _d_phi_theta(theta, d_theta): - """ - tangential derivatives + """Tangential derivatives. :param theta: numpy array, angle :param d_theta: period of tangential sinusoidal in radian @@ -151,8 +167,7 @@ def _d_phi_theta(theta, d_theta): @staticmethod def _d_phi_theta2(r, d_theta): - """ - tangential derivatives + """Tangential derivatives. :param r: numpy array, radius :param d_theta: period of tangential sinusoidal in radian diff --git a/lenstronomy/LensModel/Profiles/base_profile.py b/lenstronomy/LensModel/Profiles/base_profile.py index d6674996c..84c049588 100644 --- a/lenstronomy/LensModel/Profiles/base_profile.py +++ b/lenstronomy/LensModel/Profiles/base_profile.py @@ -1,48 +1,49 @@ -__all__ = ['LensProfileBase'] +__all__ = ["LensProfileBase"] class LensProfileBase(object): - """ - this class acts as the base class of all lens model functions and indicates raise statements and default outputs - if these functions are not defined in the specific lens model class - """ + """This class acts as the base class of all lens model functions and indicates raise + statements and default outputs if these functions are not defined in the specific + lens model class.""" def __init__(self, *args, **kwargs): self._static = False def function(self, *args, **kwargs): - """ - lensing potential - (only needed for specific calculations, such as time delays) + """Lensing potential (only needed for specific calculations, such as time + delays) :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('function definition is not defined in the profile you want to execute.') + raise ValueError( + "function definition is not defined in the profile you want to execute." + ) def derivatives(self, *args, **kwargs): - """ - deflection angles + """Deflection angles. :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('derivatives definition is not defined in the profile you want to execute.') + raise ValueError( + "derivatives definition is not defined in the profile you want to execute." + ) def hessian(self, *args, **kwargs): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2. :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('hessian definition is not defined in the profile you want to execute.') + raise ValueError( + "hessian definition is not defined in the profile you want to execute." + ) def density_lens(self, *args, **kwargs): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. - (optional definition) + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. (optional definition) .. math:: \\kappa(x, y) = \\int_{-\\infty}^{\\infty} \\rho(x, y, z) dz @@ -50,23 +51,24 @@ def density_lens(self, *args, **kwargs): :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('density_lens definition is not defined in the profile you want to execute.') + raise ValueError( + "density_lens definition is not defined in the profile you want to execute." + ) def mass_3d_lens(self, *args, **kwargs): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units - The input parameter are identical as for the derivatives definition. - (optional definition) + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units The input parameter are identical as for the derivatives + definition. (optional definition) :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('mass_3d_lens definition is not defined in the profile you want to execute.') + raise ValueError( + "mass_3d_lens definition is not defined in the profile you want to execute." + ) def mass_2d_lens(self, *args, **kwargs): - """ - two-dimensional enclosed mass at radius r - (optional definition) + """Two-dimensional enclosed mass at radius r (optional definition) .. math:: M_{2d}(R) = \\int_{0}^{R} \\rho_{2d}(r) 2\\pi r dr @@ -83,15 +85,17 @@ def mass_2d_lens(self, *args, **kwargs): :param kwargs: keywords of the profile :return: raise as definition is not defined """ - raise ValueError('mass_2d_lens definition is not defined in the profiel you want to execute.') + raise ValueError( + "mass_2d_lens definition is not defined in the profiel you want to execute." + ) def set_static(self, **kwargs): - """ - pre-computes certain computations that do only relate to the lens model parameters and not to the specific - position where to evaluate the lens model + """Pre-computes certain computations that do only relate to the lens model + parameters and not to the specific position where to evaluate the lens model. :param kwargs: lens model parameters - :return: no return, for certain lens model some private self variables are initiated + :return: no return, for certain lens model some private self variables are + initiated """ pass diff --git a/lenstronomy/LensModel/Profiles/chameleon.py b/lenstronomy/LensModel/Profiles/chameleon.py index 916ead01f..3b732e076 100644 --- a/lenstronomy/LensModel/Profiles/chameleon.py +++ b/lenstronomy/LensModel/Profiles/chameleon.py @@ -5,18 +5,34 @@ import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Chameleon(LensProfileBase): - """ - class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile - - """ - param_names = ['alpha_1', 'w_c', 'w_t', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'alpha_1': 0, 'w_c': 0, 'w_t': 0, 'e1': -0.8, 'e2': -0.8, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'alpha_1': 100, 'w_c': 100, 'w_t': 100, 'e1': 0.8, 'e2': 0.8, 'center_x': 100, 'center_y': 100} + """Class of the Chameleon model (See Suyu+2014) an elliptical truncated double + isothermal profile.""" + + param_names = ["alpha_1", "w_c", "w_t", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "alpha_1": 0, + "w_c": 0, + "w_t": 0, + "e1": -0.8, + "e2": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "alpha_1": 100, + "w_c": 100, + "w_t": 100, + "e1": 0.8, + "e2": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self, static=False): self._nie_1 = NIE() @@ -39,9 +55,15 @@ def function(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): :return: lensing potential """ - theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2) - f_1 = self._nie_1.function(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y) - f_2 = self._nie_2.function(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y) + theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert( + alpha_1, w_c, w_t, e1, e2 + ) + f_1 = self._nie_1.function( + x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y + ) + f_2 = self._nie_2.function( + x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y + ) f_ = f_1 - f_2 return f_ @@ -59,9 +81,15 @@ def derivatives(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): :param center_y: dec center :return: deflection angles (RA, DEC) """ - theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2) - f_x_1, f_y_1 = self._nie_1.derivatives(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y) - f_x_2, f_y_2 = self._nie_2.derivatives(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y) + theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert( + alpha_1, w_c, w_t, e1, e2 + ) + f_x_1, f_y_1 = self._nie_1.derivatives( + x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y + ) + f_x_2, f_y_2 = self._nie_2.derivatives( + x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y + ) f_x = f_x_1 - f_x_2 f_y = f_y_1 - f_y_2 return f_x, f_y @@ -80,9 +108,15 @@ def hessian(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): :param center_y: dec center :return: second derivatives of the lensing potential (Hessian: f_xx, f_xy, f_yx, f_yy) """ - theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2) - f_xx_1, f_xy_1, f_yx_1, f_yy_1 = self._nie_1.hessian(x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y) - f_xx_2, f_xy_2, f_yx_2, f_yy_2 = self._nie_2.hessian(x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y) + theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert( + alpha_1, w_c, w_t, e1, e2 + ) + f_xx_1, f_xy_1, f_yx_1, f_yy_1 = self._nie_1.hessian( + x, y, theta_E_conv, e1, e2, s_scale_1, center_x, center_y + ) + f_xx_2, f_xy_2, f_yx_2, f_yy_2 = self._nie_2.hessian( + x, y, theta_E_conv, e1, e2, s_scale_2, center_x, center_y + ) f_xx = f_xx_1 - f_xx_2 f_yy = f_yy_1 - f_yy_2 f_xy = f_xy_1 - f_xy_2 @@ -90,8 +124,7 @@ def hessian(self, x, y, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): return f_xx, f_xy, f_yx, f_yy def density_lens(self, r, alpha_1, w_c, w_t, e1=0, e2=0, center_x=0, center_y=0): - """ - spherical average density as a function of 3d radius + """Spherical average density as a function of 3d radius. :param r: 3d radius :param alpha_1: deflection angle at 1 (arcseconds) from the center @@ -103,15 +136,20 @@ def density_lens(self, r, alpha_1, w_c, w_t, e1=0, e2=0, center_x=0, center_y=0) :param center_y: dec center :return: matter density at 3d radius r """ - theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2) - f_1 = self._nie_1.density_lens(r, theta_E_conv, e1, e2, s_scale_1, center_x, center_y) - f_2 = self._nie_2.density_lens(r, theta_E_conv, e1, e2, s_scale_2, center_x, center_y) + theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert( + alpha_1, w_c, w_t, e1, e2 + ) + f_1 = self._nie_1.density_lens( + r, theta_E_conv, e1, e2, s_scale_1, center_x, center_y + ) + f_2 = self._nie_2.density_lens( + r, theta_E_conv, e1, e2, s_scale_2, center_x, center_y + ) f_ = f_1 - f_2 return f_ def mass_3d_lens(self, r, alpha_1, w_c, w_t, e1=0, e2=0, center_x=0, center_y=0): - """ - mass enclosed 3d radius + """Mass enclosed 3d radius. :param r: 3d radius :param alpha_1: deflection angle at 1 (arcseconds) from the center @@ -123,16 +161,21 @@ def mass_3d_lens(self, r, alpha_1, w_c, w_t, e1=0, e2=0, center_x=0, center_y=0) :param center_y: dec center :return: mass enclosed 3d radius r """ - theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert(alpha_1, w_c, w_t, e1, e2) - m_1 = self._nie_1.mass_3d_lens(r, theta_E_conv, e1, e2, s_scale_1, center_x, center_y) - m_2 = self._nie_2.mass_3d_lens(r, theta_E_conv, e1, e2, s_scale_2, center_x, center_y) + theta_E_conv, w_c, w_t, s_scale_1, s_scale_2 = self.param_convert( + alpha_1, w_c, w_t, e1, e2 + ) + m_1 = self._nie_1.mass_3d_lens( + r, theta_E_conv, e1, e2, s_scale_1, center_x, center_y + ) + m_2 = self._nie_2.mass_3d_lens( + r, theta_E_conv, e1, e2, s_scale_2, center_x, center_y + ) m_ = m_1 - m_2 return m_ def param_convert(self, alpha_1, w_c, w_t, e1, e2): - """ - convert the parameter alpha_1 (deflection angle one arcsecond from the center) into the - "Einstein radius" scale parameter of the two NIE profiles + """Convert the parameter alpha_1 (deflection angle one arcsecond from the + center) into the "Einstein radius" scale parameter of the two NIE profiles. :param alpha_1: deflection angle at 1 (arcseconds) from the center :param w_c: see Suyu+2014 @@ -142,7 +185,13 @@ def param_convert(self, alpha_1, w_c, w_t, e1, e2): :return: """ if self._static is True: - return self._theta_convert_static, self._w_c_static, self._w_t_stactic, self._s_scale_1_static, self._s_scale_2_static + return ( + self._theta_convert_static, + self._w_c_static, + self._w_t_stactic, + self._s_scale_1_static, + self._s_scale_2_static, + ) return self._param_convert(alpha_1, w_c, w_t, e1, e2) def _param_convert(self, alpha_1, w_c, w_t, e1, e2): @@ -150,14 +199,18 @@ def _param_convert(self, alpha_1, w_c, w_t, e1, e2): return 0, w_t, w_c, 1, 1 s_scale_1 = w_c s_scale_2 = w_t - f_x_1, f_y_1 = self._nie_1.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_1) - f_x_2, f_y_2 = self._nie_2.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_2) + f_x_1, f_y_1 = self._nie_1.derivatives( + 1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_1 + ) + f_x_2, f_y_2 = self._nie_2.derivatives( + 1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_2 + ) f_x = f_x_1 - f_x_2 theta_E_convert = alpha_1 / f_x phi_G, q = param_util.ellipticity2phi_q(e1, e2) # TODO: is this next conversion really needed since the NIE definition is already in the average sense? - s_scale_1 = np.sqrt(4 * w_c ** 2 / (1. + q) ** 2) - s_scale_2 = np.sqrt(4 * w_t ** 2 / (1. + q) ** 2) + s_scale_1 = np.sqrt(4 * w_c**2 / (1.0 + q) ** 2) + s_scale_2 = np.sqrt(4 * w_t**2 / (1.0 + q) ** 2) return theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 def set_static(self, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): @@ -173,9 +226,29 @@ def set_static(self, alpha_1, w_c, w_t, e1, e2, center_x=0, center_y=0): :return: """ self._static = True - self._theta_convert_static, self._w_c_static, self._w_t_stactic, self._s_scale_1_static, self._s_scale_2_static = self._param_convert(alpha_1, w_c, w_t, e1, e2) - self._nie_1.set_static(self._theta_convert_static, e1, e2, self._s_scale_1_static, center_x, center_y) - self._nie_2.set_static(self._theta_convert_static, e1, e2, self._s_scale_2_static, center_x, center_y) + ( + self._theta_convert_static, + self._w_c_static, + self._w_t_stactic, + self._s_scale_1_static, + self._s_scale_2_static, + ) = self._param_convert(alpha_1, w_c, w_t, e1, e2) + self._nie_1.set_static( + self._theta_convert_static, + e1, + e2, + self._s_scale_1_static, + center_x, + center_y, + ) + self._nie_2.set_static( + self._theta_convert_static, + e1, + e2, + self._s_scale_2_static, + center_x, + center_y, + ) def set_dynamic(self): """ @@ -183,15 +256,15 @@ def set_dynamic(self): :return: """ self._static = False - if hasattr(self, '_theta_convert_static'): + if hasattr(self, "_theta_convert_static"): del self._theta_convert_static - if hasattr(self, '_w_c_static'): + if hasattr(self, "_w_c_static"): del self._w_c_static - if hasattr(self, '_w_t_stactic'): + if hasattr(self, "_w_t_stactic"): del self._w_t_stactic - if hasattr(self, '_s_scale_1_static'): + if hasattr(self, "_s_scale_1_static"): del self._s_scale_1_static - if hasattr(self, '_s_scale_2_static'): + if hasattr(self, "_s_scale_2_static"): del self._s_scale_2_static self._nie_1.set_dynamic() self._nie_2.set_dynamic() @@ -199,24 +272,74 @@ def set_dynamic(self): @export class DoubleChameleon(LensProfileBase): - """ - class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile - - """ - param_names = ['alpha_1', 'ratio', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', 'e12', 'e22', 'center_x', 'center_y'] - lower_limit_default = {'alpha_1': 0, 'ratio': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8, - 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'alpha_1': 100, 'ratio': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8, - 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8, - 'center_x': 100, 'center_y': 100} + """Class of the Chameleon model (See Suyu+2014) an elliptical truncated double + isothermal profile.""" + + param_names = [ + "alpha_1", + "ratio", + "w_c1", + "w_t1", + "e11", + "e21", + "w_c2", + "w_t2", + "e12", + "e22", + "center_x", + "center_y", + ] + lower_limit_default = { + "alpha_1": 0, + "ratio": 0, + "w_c1": 0, + "w_t1": 0, + "e11": -0.8, + "e21": -0.8, + "w_c2": 0, + "w_t2": 0, + "e12": -0.8, + "e22": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "alpha_1": 100, + "ratio": 100, + "w_c1": 100, + "w_t1": 100, + "e11": 0.8, + "e21": 0.8, + "w_c2": 100, + "w_t2": 100, + "e12": 0.8, + "e22": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._chameleon_1 = Chameleon() self._chameleon_2 = Chameleon() super(DoubleChameleon, self).__init__() - def function(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def function( + self, + x, + y, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: ra-coordinate :param y: dec-coordinate @@ -235,11 +358,39 @@ def function(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, :return: lensing potential """ - f_1 = self._chameleon_1.function(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self._chameleon_2.function(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + f_1 = self._chameleon_1.function( + x, + y, + alpha_1 / (1.0 + 1.0 / ratio), + w_c1, + w_t1, + e11, + e21, + center_x, + center_y, + ) + f_2 = self._chameleon_2.function( + x, y, alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_1 + f_2 - def derivatives(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def derivatives( + self, + x, + y, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: ra-coordinate :param y: dec-coordinate @@ -257,11 +408,39 @@ def derivatives(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e1 :param center_y: dec center :return: deflection angles (RA, DEC) """ - f_x1, f_y1 = self._chameleon_1.derivatives(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_x2, f_y2 = self._chameleon_2.derivatives(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + f_x1, f_y1 = self._chameleon_1.derivatives( + x, + y, + alpha_1 / (1.0 + 1.0 / ratio), + w_c1, + w_t1, + e11, + e21, + center_x, + center_y, + ) + f_x2, f_y2 = self._chameleon_2.derivatives( + x, y, alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_x1 + f_x2, f_y1 + f_y2 - def hessian(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def hessian( + self, + x, + y, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: ra-coordinate :param y: dec-coordinate @@ -279,11 +458,43 @@ def hessian(self, x, y, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e :param center_y: dec center :return: second derivatives of the lensing potential (Hessian: f_xx, f_yy, f_xy) """ - f_xx1, f_xy1, f_yx1, f_yy1, = self._chameleon_1.hessian(x, y, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_xx2, f_xy2, f_yx2, f_yy2 = self._chameleon_2.hessian(x, y, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + ( + f_xx1, + f_xy1, + f_yx1, + f_yy1, + ) = self._chameleon_1.hessian( + x, + y, + alpha_1 / (1.0 + 1.0 / ratio), + w_c1, + w_t1, + e11, + e21, + center_x, + center_y, + ) + f_xx2, f_xy2, f_yx2, f_yy2 = self._chameleon_2.hessian( + x, y, alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_xx1 + f_xx2, f_xy1 + f_xy2, f_xy1 + f_xy2, f_yy1 + f_yy2 - def density_lens(self, r, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def density_lens( + self, + r, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param r: 3d radius :param alpha_1: deflection angle at 1 (arcseconds) from the center @@ -301,11 +512,30 @@ def density_lens(self, r, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, :return: 3d density at radius r """ - f_1 = self._chameleon_1.density_lens(r, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self._chameleon_2.density_lens(r, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + f_1 = self._chameleon_1.density_lens( + r, alpha_1 / (1.0 + 1.0 / ratio), w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self._chameleon_2.density_lens( + r, alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_1 + f_2 - def mass_3d_lens(self, r, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def mass_3d_lens( + self, + r, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param r: 3d radius :param alpha_1: deflection angle at 1 (arcseconds) from the center @@ -323,13 +553,35 @@ def mass_3d_lens(self, r, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, :return: mass enclosed 3d radius """ - m_1 = self._chameleon_1.mass_3d_lens(r, alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - m_2 = self._chameleon_2.mass_3d_lens(r, alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + m_1 = self._chameleon_1.mass_3d_lens( + r, alpha_1 / (1.0 + 1.0 / ratio), w_c1, w_t1, e11, e21, center_x, center_y + ) + m_2 = self._chameleon_2.mass_3d_lens( + r, alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return m_1 + m_2 - def set_static(self, alpha_1, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): - self._chameleon_1.set_static(alpha_1 / (1. + 1. / ratio), w_c1, w_t1, e11, e21, center_x, center_y) - self._chameleon_2.set_static(alpha_1 / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + def set_static( + self, + alpha_1, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): + self._chameleon_1.set_static( + alpha_1 / (1.0 + 1.0 / ratio), w_c1, w_t1, e11, e21, center_x, center_y + ) + self._chameleon_2.set_static( + alpha_1 / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) def set_dynamic(self): self._chameleon_1.set_dynamic() @@ -338,20 +590,66 @@ def set_dynamic(self): @export class TripleChameleon(LensProfileBase): - """ - class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile - - """ - param_names = ['alpha_1', 'ratio12', 'ratio13', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', 'e12', 'e22', 'w_c3', 'w_t3', 'e13', - 'e23', 'center_x', 'center_y'] - lower_limit_default = {'alpha_1': 0, 'ratio12': 0, 'ratio13': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8, - 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8, - 'w_c3': 0, 'w_t3': 0, 'e13': -0.8, 'e23': -0.8, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'alpha_1': 100, 'ratio12': 100, 'ratio13': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8, - 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8, - 'w_c3': 100, 'w_t3': 100, 'e13': 0.8, 'e23': 0.8, - 'center_x': 100, 'center_y': 100} + """Class of the Chameleon model (See Suyu+2014) an elliptical truncated double + isothermal profile.""" + + param_names = [ + "alpha_1", + "ratio12", + "ratio13", + "w_c1", + "w_t1", + "e11", + "e21", + "w_c2", + "w_t2", + "e12", + "e22", + "w_c3", + "w_t3", + "e13", + "e23", + "center_x", + "center_y", + ] + lower_limit_default = { + "alpha_1": 0, + "ratio12": 0, + "ratio13": 0, + "w_c1": 0, + "w_t1": 0, + "e11": -0.8, + "e21": -0.8, + "w_c2": 0, + "w_t2": 0, + "e12": -0.8, + "e22": -0.8, + "w_c3": 0, + "w_t3": 0, + "e13": -0.8, + "e23": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "alpha_1": 100, + "ratio12": 100, + "ratio13": 100, + "w_c1": 100, + "w_t1": 100, + "e11": 0.8, + "e21": 0.8, + "w_c2": 100, + "w_t2": 100, + "e12": 0.8, + "e22": 0.8, + "w_c3": 100, + "w_t3": 100, + "e13": 0.8, + "e23": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._chameleon_1 = Chameleon() @@ -368,13 +666,33 @@ def _ratio_definition(alpha_1, ratio12, ratio13): :param ratio13: ratio of first to third amplitude :return: amplitudes of individual chameleon profiles """ - amp1 = alpha_1 / (1. + 1. / ratio12 + 1. / ratio13) + amp1 = alpha_1 / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 return amp1, amp2, amp3 - def function(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def function( + self, + x, + y, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param alpha_1: @@ -393,13 +711,39 @@ def function(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, :return: """ amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) - f_1 = self._chameleon_1.function(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self._chameleon_2.function(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - f_3 = self._chameleon_3.function(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y) + f_1 = self._chameleon_1.function( + x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self._chameleon_2.function( + x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + f_3 = self._chameleon_3.function( + x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) return f_1 + f_2 + f_3 - def derivatives(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, - e23, center_x=0, center_y=0): + def derivatives( + self, + x, + y, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param alpha_1: @@ -418,13 +762,39 @@ def derivatives(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c :return: """ amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) - f_x1, f_y1 = self._chameleon_1.derivatives(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - f_x2, f_y2 = self._chameleon_2.derivatives(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - f_x3, f_y3 = self._chameleon_3.derivatives(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y) + f_x1, f_y1 = self._chameleon_1.derivatives( + x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + f_x2, f_y2 = self._chameleon_2.derivatives( + x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + f_x3, f_y3 = self._chameleon_3.derivatives( + x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) return f_x1 + f_x2 + f_x3, f_y1 + f_y2 + f_y3 - def hessian(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def hessian( + self, + x, + y, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param alpha_1: @@ -443,13 +813,43 @@ def hessian(self, x, y, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w :return: """ amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) - f_xx1, f_xy1, f_yx1, f_yy1 = self._chameleon_1.hessian(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - f_xx2, f_xy2, f_yx2, f_yy2 = self._chameleon_2.hessian(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - f_xx3, f_xy3, f_yx3, f_yy3 = self._chameleon_3.hessian(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y) - return f_xx1 + f_xx2 + f_xx3, f_xy1 + f_xy2 + f_xy3, f_yx1 + f_yx2 + f_yx3, f_yy1 + f_yy2 + f_yy3 - - def density_lens(self, r, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + f_xx1, f_xy1, f_yx1, f_yy1 = self._chameleon_1.hessian( + x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + f_xx2, f_xy2, f_yx2, f_yy2 = self._chameleon_2.hessian( + x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + f_xx3, f_xy3, f_yx3, f_yy3 = self._chameleon_3.hessian( + x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) + return ( + f_xx1 + f_xx2 + f_xx3, + f_xy1 + f_xy2 + f_xy3, + f_yx1 + f_yx2 + f_yx3, + f_yy1 + f_yy2 + f_yy3, + ) + + def density_lens( + self, + r, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param r: 3d radius @@ -469,13 +869,38 @@ def density_lens(self, r, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, :return: density at radius r (spherical average) """ amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) - f_1 = self._chameleon_1.density_lens(r, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self._chameleon_2.density_lens(r, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - f_3 = self._chameleon_3.density_lens(r, amp3, w_c3, w_t3, e13, e23, center_x, center_y) + f_1 = self._chameleon_1.density_lens( + r, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self._chameleon_2.density_lens( + r, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + f_3 = self._chameleon_3.density_lens( + r, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) return f_1 + f_2 + f_3 - def mass_3d_lens(self, r, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def mass_3d_lens( + self, + r, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param r: 3d radius @@ -495,13 +920,37 @@ def mass_3d_lens(self, r, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, :return: mass enclosed 3d radius """ amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) - m_1 = self._chameleon_1.mass_3d_lens(r, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - m_2 = self._chameleon_2.mass_3d_lens(r, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - m_3 = self._chameleon_3.mass_3d_lens(r, amp3, w_c3, w_t3, e13, e23, center_x, center_y) + m_1 = self._chameleon_1.mass_3d_lens( + r, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + m_2 = self._chameleon_2.mass_3d_lens( + r, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + m_3 = self._chameleon_3.mass_3d_lens( + r, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) return m_1 + m_2 + m_3 - def set_static(self, alpha_1, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def set_static( + self, + alpha_1, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): amp1, amp2, amp3 = self._ratio_definition(alpha_1, ratio12, ratio13) self._chameleon_1.set_static(amp1, w_c1, w_t1, e11, e21, center_x, center_y) self._chameleon_2.set_static(amp2, w_c2, w_t2, e12, e22, center_x, center_y) @@ -515,34 +964,88 @@ def set_dynamic(self): @export class DoubleChameleonPointMass(LensProfileBase): - """ - class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile - - """ - param_names = ['alpha_1', 'ratio_chameleon', 'ratio_pointmass', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', - 'e12', 'e22', 'center_x', 'center_y'] - lower_limit_default = {'alpha_1': 0, 'ratio_chameleon': 0, 'ratio_pointmass': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, - 'e21': -0.8, 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'alpha_1': 100, 'ratio_chameleon': 100, 'ratio_pointmass': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8, - 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8, - 'center_x': 100, 'center_y': 100} + """Class of the Chameleon model (See Suyu+2014) an elliptical truncated double + isothermal profile.""" + + param_names = [ + "alpha_1", + "ratio_chameleon", + "ratio_pointmass", + "w_c1", + "w_t1", + "e11", + "e21", + "w_c2", + "w_t2", + "e12", + "e22", + "center_x", + "center_y", + ] + lower_limit_default = { + "alpha_1": 0, + "ratio_chameleon": 0, + "ratio_pointmass": 0, + "w_c1": 0, + "w_t1": 0, + "e11": -0.8, + "e21": -0.8, + "w_c2": 0, + "w_t2": 0, + "e12": -0.8, + "e22": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "alpha_1": 100, + "ratio_chameleon": 100, + "ratio_pointmass": 100, + "w_c1": 100, + "w_t1": 100, + "e11": 0.8, + "e21": 0.8, + "w_c2": 100, + "w_t2": 100, + "e12": 0.8, + "e22": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.chameleon = DoubleChameleon() self.pointMass = PointMass() super(DoubleChameleonPointMass, self).__init__() - def function(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, - center_x=0, center_y=0): - """ - #TODO chose better parameterization for combining point mass and Chameleon profiles + def function( + self, + x, + y, + alpha_1, + ratio_pointmass, + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): + """#TODO chose better parameterization for combining point mass and Chameleon + profiles :param x: ra-coordinate :param y: dec-coordinate :param alpha_1: deflection angle at 1 (arcseconds) from the center - :param ratio_pointmass: ratio of point source Einstein radius to combined Chameleon deflection angle at r=1 - :param ratio_chameleon: ratio in deflection angles at r=1 for the two Chameleon profiles + :param ratio_pointmass: ratio of point source Einstein radius to combined + Chameleon deflection angle at r=1 + :param ratio_chameleon: ratio in deflection angles at r=1 for the two Chameleon + profiles :param w_c1: Suyu+2014 for first profile :param w_t1: Suyu+2014 for first profile :param e11: ellipticity parameter for first profile @@ -555,13 +1058,45 @@ def function(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, :param center_y: dec center :return: lensing potential """ - f_1 = self.pointMass.function(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y) - f_2 = self.chameleon.function(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1, e11, e21, - w_c2, w_t2, e12, e22, center_x, center_y) + f_1 = self.pointMass.function( + x, y, alpha_1 / (1.0 + 1.0 / ratio_pointmass), center_x, center_y + ) + f_2 = self.chameleon.function( + x, + y, + alpha_1 / (1.0 + ratio_pointmass), + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x, + center_y, + ) return f_1 + f_2 - def derivatives(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, - center_x=0, center_y=0): + def derivatives( + self, + x, + y, + alpha_1, + ratio_pointmass, + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: @@ -581,13 +1116,45 @@ def derivatives(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t :param center_y: dec center :return: """ - f_x1, f_y1 = self.pointMass.derivatives(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y) - f_x2, f_y2 = self.chameleon.derivatives(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1, - e11, e21, w_c2, w_t2, e12, e22, center_x, center_y) + f_x1, f_y1 = self.pointMass.derivatives( + x, y, alpha_1 / (1.0 + 1.0 / ratio_pointmass), center_x, center_y + ) + f_x2, f_y2 = self.chameleon.derivatives( + x, + y, + alpha_1 / (1.0 + ratio_pointmass), + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x, + center_y, + ) return f_x1 + f_x2, f_y1 + f_y2 - def hessian(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, - center_x=0, center_y=0): + def hessian( + self, + x, + y, + alpha_1, + ratio_pointmass, + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: @@ -607,7 +1174,23 @@ def hessian(self, x, y, alpha_1, ratio_pointmass, ratio_chameleon, w_c1, w_t1, e :param center_y: dec center :return: """ - f_xx1, f_xy1, f_yx1, f_yy1 = self.pointMass.hessian(x, y, alpha_1 / (1. + 1. / ratio_pointmass), center_x, center_y) - f_xx2, f_xy2, f_yx2, f_yy2 = self.chameleon.hessian(x, y, alpha_1 / (1. + ratio_pointmass), ratio_chameleon, w_c1, w_t1, - e11, e21, w_c2, w_t2, e12, e22, center_x, center_y) + f_xx1, f_xy1, f_yx1, f_yy1 = self.pointMass.hessian( + x, y, alpha_1 / (1.0 + 1.0 / ratio_pointmass), center_x, center_y + ) + f_xx2, f_xy2, f_yx2, f_yy2 = self.chameleon.hessian( + x, + y, + alpha_1 / (1.0 + ratio_pointmass), + ratio_chameleon, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x, + center_y, + ) return f_xx1 + f_xx2, f_xy1 + f_xy2, f_yx1 + f_yx2, f_yy1 + f_yy2 diff --git a/lenstronomy/LensModel/Profiles/cnfw.py b/lenstronomy/LensModel/Profiles/cnfw.py index 480cdaded..66459e772 100644 --- a/lenstronomy/LensModel/Profiles/cnfw.py +++ b/lenstronomy/LensModel/Profiles/cnfw.py @@ -1,11 +1,11 @@ -__author__ = 'dgilman', 'sibirrer' +__author__ = "dgilman", "sibirrer" import numpy as np from scipy.integrate import quad from lenstronomy.LensModel.Profiles.nfw import NFW from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CNFW'] +__all__ = ["CNFW"] class CNFW(LensProfileBase): @@ -15,16 +15,27 @@ class CNFW(LensProfileBase): alpha_Rs is the normalization equivalent to the deflection angle at rs in the absence of a core """ - model_name = 'CNFW' + + model_name = "CNFW" _s = 0.001 # numerical limit for minimal radius - param_names = ['Rs', 'alpha_Rs', 'r_core', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'r_core': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100} + param_names = ["Rs", "alpha_Rs", "r_core", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "r_core": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "r_core": 100, + "center_x": 100, + "center_y": 100, + } def __init__(self): - """ - - """ + """""" self._nfw = NFW() super(CNFW, self).__init__() @@ -42,13 +53,13 @@ def function(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) rho0 = self._alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs, r_core=r_core) if isinstance(r, int) or isinstance(r, float): return self._num_integral_potential(r, Rs, rho0, r_core) else: - #TODO: currently the numerical integral is done one by one. More efficient is sorting the radial list and + # TODO: currently the numerical integral is done one by one. More efficient is sorting the radial list and # then perform one numerical integral reading out to the radial points f_ = [] for i in range(len(r)): @@ -62,19 +73,20 @@ def _num_integral_potential(self, r, Rs, rho0, r_core): :param r_core: :return: """ + def _integrand(x): return self.alpha_r(x, Rs, rho0, r_core) + f_ = quad(_integrand, 0, r)[0] return f_ def derivatives(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): - rho0 = self._alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs, r_core=r_core) if Rs < 0.0000001: Rs = 0.0000001 x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, self._s) f_r = self.alpha_r(R, Rs, rho0, r_core) f_x = f_r * x_ / R @@ -82,18 +94,15 @@ def derivatives(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): + # raise Exception('Hessian for truncated nfw profile not yet implemented.') - #raise Exception('Hessian for truncated nfw profile not yet implemented.') - - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy - """ + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy.""" rho0 = self._alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs, r_core=r_core) if Rs < 0.0001: Rs = 0.0001 x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) kappa = self.density_2d(x_, y_, Rs, rho0, r_core) gamma1, gamma2 = self.cnfwGamma(R, Rs, rho0, r_core, x_, y_) @@ -103,8 +112,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def density(self, R, Rs, rho0, r_core): - """ - three dimensional truncated NFW profile + """Three dimensional truncated NFW profile. :param R: radius of interest :type R: float/numpy array @@ -115,21 +123,20 @@ def density(self, R, Rs, rho0, r_core): :return: rho(R) density """ - M0 = 4*np.pi*rho0 * Rs ** 3 - return (M0/4/np.pi) * ((r_core + R)*(R + Rs)**2) ** -1 + M0 = 4 * np.pi * rho0 * Rs**3 + return (M0 / 4 / np.pi) * ((r_core + R) * (R + Rs) ** 2) ** -1 def density_lens(self, R, Rs, alpha_Rs, r_core): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. + The integral in the LOS projection of this quantity results in the convergence + quantity. """ rho0 = self._alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs, r_core=r_core) return self.density(R, Rs, rho0, r_core) def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0): - """ - projected two dimenstional NFW profile (kappa*Sigma_crit) + """Projected two dimenstional NFW profile (kappa*Sigma_crit) :param x: radius of interest :type x: float/numpy array @@ -141,15 +148,14 @@ def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) - b = r_core * Rs ** -1 - x = R * Rs ** -1 + R = np.sqrt(x_**2 + y_**2) + b = r_core * Rs**-1 + x = R * Rs**-1 Fx = self._F(x, b) return 2 * rho0 * Rs * Fx def mass_3d(self, R, Rs, rho0, r_core): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param R: :param Rs: @@ -157,17 +163,20 @@ def mass_3d(self, R, Rs, rho0, r_core): :param r_core: :return: """ - b = r_core * Rs ** -1 - x = R * Rs ** -1 + b = r_core * Rs**-1 + x = R * Rs**-1 M_0 = 4 * np.pi * Rs**3 * rho0 - return M_0 * (x * (1+x) ** -1 * (-1+b) ** -1 + (-1+b) ** -2 * - ((2*b-1)*np.log(1/(1+x)) + b **2 * np.log(x / b + 1))) + return M_0 * ( + x * (1 + x) ** -1 * (-1 + b) ** -1 + + (-1 + b) ** -2 + * ((2 * b - 1) * np.log(1 / (1 + x)) + b**2 * np.log(x / b + 1)) + ) def mass_3d_lens(self, R, Rs, alpha_Rs, r_core): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units. :return: """ @@ -175,8 +184,7 @@ def mass_3d_lens(self, R, Rs, alpha_Rs, r_core): return self.mass_3d(R, Rs, rho0, r_core) def alpha_r(self, R, Rs, rho0, r_core): - """ - deflection angel of NFW profile along the radial direction + """Deflection angel of NFW profile along the radial direction. :param R: radius of interest :type R: float/numpy array @@ -184,19 +192,18 @@ def alpha_r(self, R, Rs, rho0, r_core): :type Rs: float :return: Epsilon(R) projected density at radius R """ - #R = np.maximum(R, self._s) + # R = np.maximum(R, self._s) x = R / Rs x = np.maximum(x, self._s) - b = r_core * Rs ** -1 - #b = max(b, 0.000001) + b = r_core * Rs**-1 + # b = max(b, 0.000001) gx = self._G(x, b) - a = 4*rho0*Rs**2*gx/x + a = 4 * rho0 * Rs**2 * gx / x return a def cnfwGamma(self, R, Rs, rho0, r_core, ax_x, ax_y): - """ - - shear gamma of NFW profile (times Sigma_crit) along the projection to coordinate 'axis' + """Shear gamma of NFW profile (times Sigma_crit) along the projection to + coordinate 'axis'. :param R: radius of interest :type R: float/numpy array @@ -211,77 +218,72 @@ def cnfwGamma(self, R, Rs, rho0, r_core, ax_x, ax_y): R = max(R, c) else: R[R <= c] = c - x = R * Rs ** -1 - b = r_core * Rs ** -1 + x = R * Rs**-1 + b = r_core * Rs**-1 b = max(b, c) gx = self._G(x, b) Fx = self._F(x, b) - a = 2 * rho0 * Rs * (2 * gx / x ** 2 - Fx) # /x #2*rho0*Rs*(2*gx/x**2 - Fx)*axis/x - return a * (ax_y ** 2 - ax_x ** 2) / R ** 2, -a * 2 * (ax_x * ax_y) / R ** 2 + a = ( + 2 * rho0 * Rs * (2 * gx / x**2 - Fx) + ) # /x #2*rho0*Rs*(2*gx/x**2 - Fx)*axis/x + return a * (ax_y**2 - ax_x**2) / R**2, -a * 2 * (ax_x * ax_y) / R**2 def mass_2d(self, R, Rs, rho0, r_core): - - """ - analytic solution of the projection integral - (convergence) - - """ + """Analytic solution of the projection integral (convergence)""" x = R / Rs b = r_core / Rs b = max(b, 0.000001) gx = self._G(x, b) - m_2d = 4*np.pi*rho0*Rs*R**2*gx/x**2 + m_2d = 4 * np.pi * rho0 * Rs * R**2 * gx / x**2 return m_2d def _alpha2rho0(self, alpha_Rs, Rs, r_core): - - b = r_core * Rs ** -1 - gx = self._G(1., b) - rho0 = alpha_Rs * (4 * Rs ** 2 * gx) ** -1 + b = r_core * Rs**-1 + gx = self._G(1.0, b) + rho0 = alpha_Rs * (4 * Rs**2 * gx) ** -1 return rho0 def _rho2alpha(self, rho0, Rs, r_core): - - b = r_core * Rs ** -1 - gx = self._G(1., b) - alpha = 4*Rs ** 2*gx*rho0 + b = r_core * Rs**-1 + gx = self._G(1.0, b) + alpha = 4 * Rs**2 * gx * rho0 return alpha def _nfw_func(self, x): - """ - Classic NFW function in terms of arctanh and arctan - :param x: r/Rs - :return: - """ + """Classic NFW function in terms of arctanh and arctan :param x: r/Rs + :return:""" - #c = 0.000000001 + # c = 0.000000001 if isinstance(x, np.ndarray): - #x[np.where(x 1) - nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5) - nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5) + nfwvals[inds1] = (1 - x[inds1] ** 2) ** -0.5 * np.arctanh( + (1 - x[inds1] ** 2) ** 0.5 + ) + nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -0.5 * np.arctan( + (x[inds2] ** 2 - 1) ** 0.5 + ) return nfwvals elif isinstance(x, float) or isinstance(x, int): - #x = max(x, c) + # x = max(x, c) if x == 1: return 1 if x < 1: - return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5) + return (1 - x**2) ** -0.5 * np.arctanh((1 - x**2) ** 0.5) else: - return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5) + return (x**2 - 1) ** -0.5 * np.arctan((x**2 - 1) ** 0.5) - def _F(self, X, b, c = 0.001): - """ - analytic solution of the projection integral + def _F(self, X, b, c=0.001): + """Analytic solution of the projection integral. :param X: a dimensionless quantity, either r/rs or r/rc :type X: float >0 @@ -293,73 +295,91 @@ def _F(self, X, b, c = 0.001): prefac = (b - 1) ** -2 if isinstance(X, np.ndarray): - X[np.where(X == 1)] = 1 - c output = np.empty_like(X) - inds1 = np.where(np.absolute(X - b)=c) + inds2 = np.where(np.absolute(X - b) >= c) - output[inds2] = prefac * ((X[inds2] ** 2 - 1) ** -1 * (1 - b - - (1 - b * X[inds2] ** 2) * self._nfw_func(X[inds2])) - - self._nfw_func(X[inds2] * b ** -1)) + output[inds2] = prefac * ( + (X[inds2] ** 2 - 1) ** -1 + * (1 - b - (1 - b * X[inds2] ** 2) * self._nfw_func(X[inds2])) + - self._nfw_func(X[inds2] * b**-1) + ) else: - if X == 1: - X = 1-c + X = 1 - c - if np.absolute(X - b)0 """ if b == 1: - b = 1+c + b = 1 + c - b2 = b ** 2 + b2 = b**2 x2 = X**2 fac = (1 - b) ** 2 - prefac = fac ** -1 + prefac = fac**-1 if isinstance(X, np.ndarray): - output = np.ones_like(X) inds1 = np.where(np.absolute(X - b) <= c) inds2 = np.where(np.absolute(X - b) > c) - output[inds1] = prefac * (2*(1-2*b+b**3)*self._nfw_func(b) + fac * (-1.38692 + np.log(b2)) - b2*np.log(b2)) - - output[inds2] = prefac * (fac * np.log(0.25 * x2[inds2]) - b2 * np.log(b2) + - 2 * (b2 - x2[inds2]) * self._nfw_func(X[inds2] * b**-1) + - 2 * (1+b*(x2[inds2] - 2)) * self._nfw_func(X[inds2])) - return 0.5*output + output[inds1] = prefac * ( + 2 * (1 - 2 * b + b**3) * self._nfw_func(b) + + fac * (-1.38692 + np.log(b2)) + - b2 * np.log(b2) + ) + + output[inds2] = prefac * ( + fac * np.log(0.25 * x2[inds2]) + - b2 * np.log(b2) + + 2 * (b2 - x2[inds2]) * self._nfw_func(X[inds2] * b**-1) + + 2 * (1 + b * (x2[inds2] - 2)) * self._nfw_func(X[inds2]) + ) + return 0.5 * output else: - if np.absolute(X - b) <= c: - output = prefac * (2*(1-2*b+b**3)*self._nfw_func(b) + - fac * (-1.38692 + np.log(b2)) - b2*np.log(b2)) + output = prefac * ( + 2 * (1 - 2 * b + b**3) * self._nfw_func(b) + + fac * (-1.38692 + np.log(b2)) + - b2 * np.log(b2) + ) else: - output = prefac * (fac * np.log(0.25 * x2) - b2 * np.log(b2) + - 2 * (b2 - x2) * self._nfw_func(X * b**-1) + 2 * (1+b*(x2 - 2))* - self._nfw_func(X)) + output = prefac * ( + fac * np.log(0.25 * x2) + - b2 * np.log(b2) + + 2 * (b2 - x2) * self._nfw_func(X * b**-1) + + 2 * (1 + b * (x2 - 2)) * self._nfw_func(X) + ) return 0.5 * output diff --git a/lenstronomy/LensModel/Profiles/cnfw_ellipse.py b/lenstronomy/LensModel/Profiles/cnfw_ellipse.py index 516493e74..0e44ad152 100644 --- a/lenstronomy/LensModel/Profiles/cnfw_ellipse.py +++ b/lenstronomy/LensModel/Profiles/cnfw_ellipse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to compute the Navaro-Frank-White function in mass/kappa space # the potential therefore is its integral @@ -8,18 +8,34 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CNFW_ELLIPSE'] +__all__ = ["CNFW_ELLIPSE"] class CNFW_ELLIPSE(LensProfileBase): - """ - this class contains functions concerning the NFW profile + """This class contains functions concerning the NFW profile. relation are: R_200 = c * Rs """ - param_names = ['Rs', 'alpha_Rs', 'r_core', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'r_core': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'r_core': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["Rs", "alpha_Rs", "r_core", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "r_core": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "r_core": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.cnfw = CNFW() @@ -27,51 +43,55 @@ def __init__(self): super(CNFW_ELLIPSE, self).__init__() def function(self, x, y, Rs, alpha_Rs, r_core, e1, e2, center_x=0, center_y=0): - """ - returns double integral of NFW profile - """ + """Returns double integral of NFW profile.""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - e = min(abs(1. - q), 0.99) - xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e) - xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e) + e = min(abs(1.0 - q), 0.99) + xt1 = (cos_phi * x_shift + sin_phi * y_shift) * np.sqrt(1 - e) + xt2 = (-sin_phi * x_shift + cos_phi * y_shift) * np.sqrt(1 + e) R_ = np.sqrt(xt1**2 + xt2**2) f_ = self.cnfw.function(R_, 0, Rs, alpha_Rs, r_core, center_x=0, center_y=0) return f_ def derivatives(self, x, y, Rs, alpha_Rs, r_core, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW) - """ + """Returns df/dx and df/dy of the function (integral of NFW)""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - e = min(abs(1. - q), 0.99) - xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e) - xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e) + e = min(abs(1.0 - q), 0.99) + xt1 = (cos_phi * x_shift + sin_phi * y_shift) * np.sqrt(1 - e) + xt2 = (-sin_phi * x_shift + cos_phi * y_shift) * np.sqrt(1 + e) - f_x_prim, f_y_prim = self.cnfw.derivatives(xt1, xt2, Rs, alpha_Rs, r_core, center_x=0, center_y=0) + f_x_prim, f_y_prim = self.cnfw.derivatives( + xt1, xt2, Rs, alpha_Rs, r_core, center_x=0, center_y=0 + ) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, Rs, alpha_Rs, r_core, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy - """ + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy.""" diff = 0.0000001 - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y) - - alpha_ra_dx_, alpha_dec_dx_ = self.derivatives(x - diff, y, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y) - alpha_ra_dy_, alpha_dec_dy_ = self.derivatives(x, y - diff, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y + ) + + alpha_ra_dx_, alpha_dec_dx_ = self.derivatives( + x - diff, y, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y + ) + alpha_ra_dy_, alpha_dec_dy_ = self.derivatives( + x, y - diff, Rs, alpha_Rs, r_core, e1, e2, center_x, center_y + ) dalpha_rara = (alpha_ra_dx - alpha_ra_dx_) / diff / 2 dalpha_radec = (alpha_ra_dy - alpha_ra_dy_) / diff / 2 @@ -85,17 +105,17 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, e1, e2, center_x=0, center_y=0): return f_xx, f_xy, f_yx, f_yy def mass_3d_lens(self, R, Rs, alpha_Rs, r_core, e1=0, e2=0): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units. :return: """ return self.cnfw.mass_3d_lens(R, Rs, alpha_Rs, r_core) def density_lens(self, R, Rs, alpha_Rs, r_core, e1=0, e2=0): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. + The integral in the LOS projection of this quantity results in the convergence + quantity. """ return self.cnfw.density_lens(R, Rs, alpha_Rs, r_core) diff --git a/lenstronomy/LensModel/Profiles/const_mag.py b/lenstronomy/LensModel/Profiles/const_mag.py index 307ed52f5..b70f8dc5d 100644 --- a/lenstronomy/LensModel/Profiles/const_mag.py +++ b/lenstronomy/LensModel/Profiles/const_mag.py @@ -1,115 +1,143 @@ -__author__ = 'gipagano' +__author__ = "gipagano" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['ConstMag'] +__all__ = ["ConstMag"] class ConstMag(LensProfileBase): - """ - this class implements the macromodel potential of `Diego et al. `_ + """This class implements the macromodel potential of `Diego et al. + + `_ Convergence and shear are computed according to `Diego2018 `_ """ - - param_names = ['center_x', 'center_y','mu_r', 'mu_t', 'parity', 'phi_G'] - lower_limit_default = {'center_x': -100, 'center_y': -100, 'mu_r':1, 'mu_t': 1000, 'parity': -1, 'phi_G':0.0} - upper_limit_default = {'center_x': 100, 'center_y': 100, 'mu_r':1, 'mu_t': 1000, 'parity': 1, 'phi_G':np.pi} - + + param_names = ["center_x", "center_y", "mu_r", "mu_t", "parity", "phi_G"] + lower_limit_default = { + "center_x": -100, + "center_y": -100, + "mu_r": 1, + "mu_t": 1000, + "parity": -1, + "phi_G": 0.0, + } + upper_limit_default = { + "center_x": 100, + "center_y": 100, + "mu_r": 1, + "mu_t": 1000, + "parity": 1, + "phi_G": np.pi, + } + def function(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0): """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) - :param mu_r: radial magnification + :param mu_r: radial magnification :param mu_t: tangential magnification - :param parity: parity side of the macromodel. Either +1 (positive parity) or -1 (negative parity) + :param parity: parity side of the macromodel. Either +1 (positive parity) or -1 (negative parity) :param phi_G: shear orientation angle (relative to the x-axis) - :return: lensing potential - """ - + :return: lensing potential + """ + # positive parity case if parity == 1: - gamma = (1./mu_t-1./mu_r)*0.5 - kappa = 1 -gamma-1./mu_r - + gamma = (1.0 / mu_t - 1.0 / mu_r) * 0.5 + kappa = 1 - gamma - 1.0 / mu_r + # negative parity case elif parity == -1: - gamma = (1./mu_t+1./mu_r)*0.5 - kappa = 1 -gamma+1./mu_r + gamma = (1.0 / mu_t + 1.0 / mu_r) * 0.5 + kappa = 1 - gamma + 1.0 / mu_r else: - raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity) - + raise ValueError( + "%f is not a valid value for the parity of the macromodel. Choose either +1 or -1." + % parity + ) + # compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate) - gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G) - + gamma1, gamma2 = gamma * np.cos(2 * phi_G), -gamma * np.sin(2 * phi_G) + x_shift = x - center_x y_shift = y - center_y - f_ = 1./2. * kappa * (x_shift*x_shift + y_shift*y_shift) + 1./2. * gamma1 * (x_shift*x_shift - y_shift*y_shift)-gamma2*x_shift*y_shift - + f_ = ( + 1.0 / 2.0 * kappa * (x_shift * x_shift + y_shift * y_shift) + + 1.0 / 2.0 * gamma1 * (x_shift * x_shift - y_shift * y_shift) + - gamma2 * x_shift * y_shift + ) + return f_ def derivatives(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0): """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) - :param mu_r: radial magnification + :param mu_r: radial magnification :param mu_t: tangential magnification - :param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity) + :param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity) :param phi_G: shear orientation angle (relative to the x-axis) :return: deflection angle (in angles) - """ - + """ + # positive parity case - if (parity== 1): - gamma = (1./mu_t-1./mu_r)*0.5 - kappa = 1 -gamma-1./mu_r - + if parity == 1: + gamma = (1.0 / mu_t - 1.0 / mu_r) * 0.5 + kappa = 1 - gamma - 1.0 / mu_r + # negative parity case - elif (parity== -1): - gamma = (1./mu_t+1./mu_r)*0.5 - kappa = 1 -gamma+1./mu_r + elif parity == -1: + gamma = (1.0 / mu_t + 1.0 / mu_r) * 0.5 + kappa = 1 - gamma + 1.0 / mu_r else: - raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity) - + raise ValueError( + "%f is not a valid value for the parity of the macromodel. Choose either +1 or -1." + % parity + ) + # compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate) - gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G) - + gamma1, gamma2 = gamma * np.cos(2 * phi_G), -gamma * np.sin(2 * phi_G) + x_shift = x - center_x y_shift = y - center_y - f_x = (kappa+gamma1)*x_shift - gamma2*y_shift - f_y = (kappa-gamma1)*y_shift - gamma2*x_shift + f_x = (kappa + gamma1) * x_shift - gamma2 * y_shift + f_y = (kappa - gamma1) * y_shift - gamma2 * x_shift return f_x, f_y def hessian(self, x, y, mu_r, mu_t, parity, phi_G, center_x=0, center_y=0): """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) - :param mu_r: radial magnification + :param mu_r: radial magnification :param mu_t: tangential magnification - :param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity) - :param phi_G: shear orientation angle (relative to the x-axis) + :param parity: parity of the side of the macromodel. Either +1 (positive parity) or -1 (negative parity) + :param phi_G: shear orientation angle (relative to the x-axis) :return: hessian matrix (in angles) - """ - + """ + # positive parity case - if (parity== 1): - gamma = (1./mu_t-1./mu_r)*0.5 - kappa = 1 -gamma-1./mu_r - + if parity == 1: + gamma = (1.0 / mu_t - 1.0 / mu_r) * 0.5 + kappa = 1 - gamma - 1.0 / mu_r + # negative parity case - elif (parity== -1): - gamma = (1./mu_t+1./mu_r)*0.5 - kappa = 1 -gamma+1./mu_r + elif parity == -1: + gamma = (1.0 / mu_t + 1.0 / mu_r) * 0.5 + kappa = 1 - gamma + 1.0 / mu_r else: - raise ValueError('%f is not a valid value for the parity of the macromodel. Choose either +1 or -1.' % parity) - + raise ValueError( + "%f is not a valid value for the parity of the macromodel. Choose either +1 or -1." + % parity + ) + # compute the shear along the x and y directions, rotate the vector in the opposite direction than the reference frame (compare with util.rotate) - gamma1, gamma2 = gamma*np.cos(2*phi_G), -gamma*np.sin(2*phi_G) - + gamma1, gamma2 = gamma * np.cos(2 * phi_G), -gamma * np.sin(2 * phi_G) + f_xx = kappa + gamma1 f_yy = kappa - gamma1 f_xy = -gamma2 diff --git a/lenstronomy/LensModel/Profiles/constant_shift.py b/lenstronomy/LensModel/Profiles/constant_shift.py index ff43022d1..837cd9b5f 100644 --- a/lenstronomy/LensModel/Profiles/constant_shift.py +++ b/lenstronomy/LensModel/Profiles/constant_shift.py @@ -1,18 +1,17 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np -__all__ = ['Shift'] +__all__ = ["Shift"] class Shift(LensProfileBase): - """ - Lens model with a constant shift of the deflection field - """ - param_names = ['alpha_x', 'alpha_y'] - lower_limit_default = {'alpha_x': -1000, 'alpha_y': -1000} - upper_limit_default = {'alpha_x': 1000, 'alpha_y': 1000} + """Lens model with a constant shift of the deflection field.""" + + param_names = ["alpha_x", "alpha_y"] + lower_limit_default = {"alpha_x": -1000, "alpha_y": -1000} + upper_limit_default = {"alpha_x": 1000, "alpha_y": 1000} def function(self, x, y, alpha_x, alpha_y): """ diff --git a/lenstronomy/LensModel/Profiles/convergence.py b/lenstronomy/LensModel/Profiles/convergence.py index 924974d2b..0cf0749f8 100644 --- a/lenstronomy/LensModel/Profiles/convergence.py +++ b/lenstronomy/LensModel/Profiles/convergence.py @@ -1,23 +1,21 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Convergence'] +__all__ = ["Convergence"] class Convergence(LensProfileBase): - """ - a single mass sheet (external convergence) - """ - model_name = 'CONVERGENCE' - param_names = ['kappa', 'ra_0', 'dec_0'] - lower_limit_default = {'kappa': -10, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'kappa': 10, 'ra_0': 100, 'dec_0': 100} + """A single mass sheet (external convergence)""" + + model_name = "CONVERGENCE" + param_names = ["kappa", "ra_0", "dec_0"] + lower_limit_default = {"kappa": -10, "ra_0": -100, "dec_0": -100} + upper_limit_default = {"kappa": 10, "ra_0": 100, "dec_0": 100} def function(self, x, y, kappa, ra_0=0, dec_0=0): - """ - lensing potential + """Lensing potential. :param x: x-coordinate :param y: y-coordinate @@ -25,12 +23,11 @@ def function(self, x, y, kappa, ra_0=0, dec_0=0): :return: lensing potential """ theta, phi = param_util.cart2polar(x - ra_0, y - dec_0) - f_ = 1. / 2 * kappa * theta ** 2 + f_ = 1.0 / 2 * kappa * theta**2 return f_ def derivatives(self, x, y, kappa, ra_0=0, dec_0=0): - """ - deflection angle + """Deflection angle. :param x: x-coordinate :param y: y-coordinate @@ -44,8 +41,7 @@ def derivatives(self, x, y, kappa, ra_0=0, dec_0=0): return f_x, f_y def hessian(self, x, y, kappa, ra_0=0, dec_0=0): - """ - Hessian matrix + """Hessian matrix. :param x: x-coordinate :param y: y-coordinate diff --git a/lenstronomy/LensModel/Profiles/coreBurkert.py b/lenstronomy/LensModel/Profiles/coreBurkert.py index 987da509b..bd59ab473 100644 --- a/lenstronomy/LensModel/Profiles/coreBurkert.py +++ b/lenstronomy/LensModel/Profiles/coreBurkert.py @@ -1,21 +1,30 @@ -__author__ = 'dgilman' +__author__ = "dgilman" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CoreBurkert'] +__all__ = ["CoreBurkert"] class CoreBurkert(LensProfileBase): - """ - lensing properties of a modified Burkert profile with variable core size - normalized by rho0, the central core density - - """ - - param_names = ['Rs', 'alpha_Rs', 'r_core', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 1, 'alpha_Rs': 0, 'r_core': 0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 100, 'r_core': 50, 'center_x': 100, 'center_y': 100} + """Lensing properties of a modified Burkert profile with variable core size + normalized by rho0, the central core density.""" + + param_names = ["Rs", "alpha_Rs", "r_core", "center_x", "center_y"] + lower_limit_default = { + "Rs": 1, + "alpha_Rs": 0, + "r_core": 0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 100, + "r_core": 50, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): """ @@ -35,19 +44,15 @@ def function(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): Rs = 0.0000001 x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) f_ = self.cBurkPot(R, Rs, rho0, r_core) return f_ def derivatives(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): - """ - deflection angles - :param x: x coordinate - :param y: y coordinate - :param Rs: scale radius - :param alpha_Rs: deflection angle at Rs - :param r_core: core radius + """Deflection angles :param x: x coordinate :param y: y coordinate :param Rs: + scale radius :param alpha_Rs: deflection angle at Rs :param r_core: core radius :param center_x: + :param center_y: :return: """ @@ -58,14 +63,13 @@ def derivatives(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): Rs = 0.0000001 x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) dx, dy = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_) return dx, dy def hessian(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): - """ :param x: x coordinate :param y: y coordinate @@ -81,7 +85,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): Rs = 0.0001 x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) rho0 = self._alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs, r_core=r_core) @@ -94,10 +98,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def mass_2d(self, R, Rs, rho0, r_core): - - """ - analytic solution of the projection integral - (convergence) + """Analytic solution of the projection integral (convergence) :param R: projected distance :param Rs: scale radius @@ -105,17 +106,16 @@ def mass_2d(self, R, Rs, rho0, r_core): :param r_core: core radius """ - x = R * Rs ** -1 - p = Rs * r_core ** -1 + x = R * Rs**-1 + p = Rs * r_core**-1 gx = self._G(x, p) - m_2d = 2 * np.pi * rho0 * Rs ** 3 * gx + m_2d = 2 * np.pi * rho0 * Rs**3 * gx return m_2d def coreBurkAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y): - """ - deflection angle + """Deflection angle. :param R: :param Rs: @@ -125,18 +125,17 @@ def coreBurkAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y): :param ax_y: :return: """ - x = R * Rs ** -1 - p = Rs * r_core ** -1 + x = R * Rs**-1 + p = Rs * r_core**-1 gx = self._G(x, p) - a = 2 * rho0 * Rs ** 2 * gx / x + a = 2 * rho0 * Rs**2 * gx / x return a * ax_x / R, a * ax_y / R def density(self, R, Rs, rho0, r_core): - """ - three dimensional cored Burkert profile + """Three dimensional cored Burkert profile. :param R: radius of interest :type R: float/numpy array @@ -147,13 +146,12 @@ def density(self, R, Rs, rho0, r_core): :return: rho(R) density """ - M0 = 4*np.pi * Rs ** 3 * rho0 + M0 = 4 * np.pi * Rs**3 * rho0 - return (M0 / (4*np.pi)) * ((r_core + R) * (Rs ** 2 + R ** 2)) ** -1 + return (M0 / (4 * np.pi)) * ((r_core + R) * (Rs**2 + R**2)) ** -1 def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0): - """ - projected two dimenstional core Burkert profile (kappa*Sigma_crit) + """Projected two dimenstional core Burkert profile (kappa*Sigma_crit) :param x: x coordinate :param y: y coordinate @@ -163,9 +161,9 @@ def density_2d(self, x, y, Rs, rho0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) - x = R * Rs ** -1 - p = Rs * r_core ** -1 + R = np.sqrt(x_**2 + y_**2) + x = R * Rs**-1 + p = Rs * r_core**-1 Fx = self._F(x, p) return 2 * rho0 * Rs * Fx @@ -179,26 +177,33 @@ def mass_3d(self, R, Rs, rho0, r_core): """ Rs = float(Rs) - b = r_core * Rs ** -1 - c = R * Rs ** -1 + b = r_core * Rs**-1 + c = R * Rs**-1 - M0 = 4*np.pi*Rs**3 * rho0 + M0 = 4 * np.pi * Rs**3 * rho0 - return M0 * (1+b**2) ** -1 * (0.5*np.log(1+c**2) + b**2*np.log(c*b**-1 + 1) - b*np.arctan(c)) + return ( + M0 + * (1 + b**2) ** -1 + * ( + 0.5 * np.log(1 + c**2) + + b**2 * np.log(c * b**-1 + 1) + - b * np.arctan(c) + ) + ) def cBurkPot(self, R, Rs, rho0, r_core): - """ :param R: projected distance :param Rs: scale radius :param rho0: central core density :param r_core: core radius """ - x = R * Rs ** -1 - p = Rs * r_core ** -1 + x = R * Rs**-1 + p = Rs * r_core**-1 hx = self._H(x, p) - return 2 * rho0 * Rs ** 3 * hx + return 2 * rho0 * Rs**3 * hx def cBurkGamma(self, R, Rs, rho0, r_core, ax_x, ax_y): """ @@ -218,142 +223,219 @@ def cBurkGamma(self, R, Rs, rho0, r_core, ax_x, ax_y): else: R[R <= c] = c - x = R * Rs ** -1 - p = Rs * r_core ** -1 + x = R * Rs**-1 + p = Rs * r_core**-1 gx = self._G(x, p) fx = self._F(x, p) - m_x = 2 * rho0 * Rs ** 3 * gx + m_x = 2 * rho0 * Rs**3 * gx kappa = 2 * rho0 * Rs * fx - a = 2 * (m_x * R ** -2 - kappa) + a = 2 * (m_x * R**-2 - kappa) - return 0.5 * a * (ax_y ** 2 - ax_x ** 2) / R ** 2, -a * (ax_x * ax_y) / R ** 2 + return 0.5 * a * (ax_y**2 - ax_x**2) / R**2, -a * (ax_x * ax_y) / R**2 @staticmethod def _u(x): - - return np.sqrt(1 + x ** 2) + return np.sqrt(1 + x**2) @staticmethod def _g(x, p): - - return np.sqrt(1 - x ** 2 * p ** 2) + return np.sqrt(1 - x**2 * p**2) @staticmethod def _f(x, p): - - return np.sqrt(x ** 2 * p ** 2 - 1) + return np.sqrt(x**2 * p**2 - 1) def _H(self, x, p): - - prefactor = (p + p ** 3) ** -1 * p + prefactor = (p + p**3) ** -1 * p if isinstance(x, np.ndarray): - inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) - func[inds1] = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x[inds1]) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x[inds1]) - 0.5 * np.arctanh(self._u(x[inds1]) ** -1)) * np.arctanh( - self._u(x[inds1]) ** -1) + \ - 2 * (self._g(x[inds1], p) - 0.5 * np.arctanh(self._g(x[inds1], p))) * \ - np.arctanh(self._g(x[inds1], p)) + (1 + p ** 2) * np.log(x[inds1]) ** 2 - np.pi * p * \ - np.log(1 + self._u(x[inds1])) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x[inds1]) * ( - 0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) - - func[inds2] = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x[inds2]) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x[inds2]) - 0.5 * np.arctanh(self._u(x[inds2]) ** -1)) * np.arctanh( - self._u(x[inds2]) ** -1) + \ - -2 * (self._f(x[inds2], p) - 0.5 * np.arctan(self._f(x[inds2], p))) * \ - np.arctan(self._f(x[inds2], p)) + (1 + p ** 2) * np.log(x[inds2]) ** 2 - np.pi * p * \ - np.log(1 + self._u(x[inds2])) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x[inds2]) * ( - 0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) - - func[inds0] = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x[inds0]) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x[inds0]) - 0.5 * np.arctanh(self._u(x[inds0]) ** -1)) * np.arctanh( - self._u(x[inds0]) ** -1) \ - + (1 + p ** 2) * np.log(x[inds0]) ** 2 - np.pi * p * \ - np.log(1 + self._u(x[inds0])) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x[inds0]) * ( - 0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) + func[inds1] = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x[inds1]) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x[inds1]) - 0.5 * np.arctanh(self._u(x[inds1]) ** -1)) + * np.arctanh(self._u(x[inds1]) ** -1) + + 2 + * (self._g(x[inds1], p) - 0.5 * np.arctanh(self._g(x[inds1], p))) + * np.arctanh(self._g(x[inds1], p)) + + (1 + p**2) * np.log(x[inds1]) ** 2 + - np.pi * p * np.log(1 + self._u(x[inds1])) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x[inds1]) + * (0.6137056388801094 + 0.6137056388801094 * p**2 + np.log(p**2)) + ) + + func[inds2] = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x[inds2]) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x[inds2]) - 0.5 * np.arctanh(self._u(x[inds2]) ** -1)) + * np.arctanh(self._u(x[inds2]) ** -1) + + -2 + * (self._f(x[inds2], p) - 0.5 * np.arctan(self._f(x[inds2], p))) + * np.arctan(self._f(x[inds2], p)) + + (1 + p**2) * np.log(x[inds2]) ** 2 + - np.pi * p * np.log(1 + self._u(x[inds2])) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x[inds2]) + * (0.6137056388801094 + 0.6137056388801094 * p**2 + np.log(p**2)) + ) + + func[inds0] = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x[inds0]) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x[inds0]) - 0.5 * np.arctanh(self._u(x[inds0]) ** -1)) + * np.arctanh(self._u(x[inds0]) ** -1) + + (1 + p**2) * np.log(x[inds0]) ** 2 + - np.pi * p * np.log(1 + self._u(x[inds0])) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x[inds0]) + * (0.6137056388801094 + 0.6137056388801094 * p**2 + np.log(p**2)) + ) else: if x * p < 1: - func = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) * np.arctanh(self._u(x) ** -1) + \ - 2 * (self._g(x, p) - 0.5 * np.arctanh(self._g(x, p))) * \ - np.arctanh(self._g(x, p)) + (1 + p ** 2) * np.log(x) ** 2 - np.pi * p * \ - np.log(1 + self._u(x)) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x) * (0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) + func = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) + * np.arctanh(self._u(x) ** -1) + + 2 + * (self._g(x, p) - 0.5 * np.arctanh(self._g(x, p))) + * np.arctanh(self._g(x, p)) + + (1 + p**2) * np.log(x) ** 2 + - np.pi * p * np.log(1 + self._u(x)) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x) + * ( + 0.6137056388801094 + + 0.6137056388801094 * p**2 + + np.log(p**2) + ) + ) elif x * p > 1: - func = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) * np.arctanh( - self._u(x) ** -1) + \ - -2 * (self._f(x, p) - 0.5 * np.arctan(self._f(x, p))) * \ - np.arctan(self._f(x, p)) + (1 + p ** 2) * np.log(x) ** 2 - np.pi * p * \ - np.log(1 + self._u(x)) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x) * ( - 0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) + func = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) + * np.arctanh(self._u(x) ** -1) + + -2 + * (self._f(x, p) - 0.5 * np.arctan(self._f(x, p))) + * np.arctan(self._f(x, p)) + + (1 + p**2) * np.log(x) ** 2 + - np.pi * p * np.log(1 + self._u(x)) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x) + * ( + 0.6137056388801094 + + 0.6137056388801094 * p**2 + + np.log(p**2) + ) + ) else: - func = 0.9058413472016891 + (-0.9640065632861909 + np.pi * self._u(x) - - 0.9058413472016892 * p) * p + 2 * p ** 2 * ( - self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) * np.arctanh(self._u(x) ** -1) \ - + (1 + p ** 2) * np.log(x) ** 2 - np.pi * p * \ - np.log(1 + self._u(x)) + (0.3068528194400547 + 0.25 * np.log(p ** 2)) * \ - np.log(p ** 2) + np.log(x) * (0.6137056388801094 + 0.6137056388801094 * p ** 2 + np.log(p ** 2)) + func = ( + 0.9058413472016891 + + ( + -0.9640065632861909 + + np.pi * self._u(x) + - 0.9058413472016892 * p + ) + * p + + 2 + * p**2 + * (self._u(x) - 0.5 * np.arctanh(self._u(x) ** -1)) + * np.arctanh(self._u(x) ** -1) + + (1 + p**2) * np.log(x) ** 2 + - np.pi * p * np.log(1 + self._u(x)) + + (0.3068528194400547 + 0.25 * np.log(p**2)) * np.log(p**2) + + np.log(x) + * ( + 0.6137056388801094 + + 0.6137056388801094 * p**2 + + np.log(p**2) + ) + ) return prefactor * func def _F(self, x, p): - - """ - solution of the projection integal (kappa) - arctanh / arctan function - :param x: r/Rs - :param p: r_core / Rs - :return: - """ - prefactor = 0.5 * (1 + p ** 2) ** -1 * p + """Solution of the projection integal (kappa) arctanh / arctan function :param + x: r/Rs :param p: r_core / Rs :return:""" + prefactor = 0.5 * (1 + p**2) ** -1 * p if isinstance(x, np.ndarray): - inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) - func[inds0] = self._u(x[inds0]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds0]) ** -1)) + func[inds0] = self._u(x[inds0]) ** -1 * ( + np.pi + 2 * p * np.arctanh(self._u(x[inds0]) ** -1) + ) - func[inds1] = self._u(x[inds1]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds1]) ** -1)) - \ - (2 * p * self._g(x[inds1], p) ** -1 * np.arctanh(self._g(x[inds1], p))) + func[inds1] = self._u(x[inds1]) ** -1 * ( + np.pi + 2 * p * np.arctanh(self._u(x[inds1]) ** -1) + ) - (2 * p * self._g(x[inds1], p) ** -1 * np.arctanh(self._g(x[inds1], p))) - func[inds2] = self._u(x[inds2]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds2]) ** -1)) - \ - (2 * p * self._f(x[inds2], p) ** -1 * np.arctan(self._f(x[inds2], p))) + func[inds2] = self._u(x[inds2]) ** -1 * ( + np.pi + 2 * p * np.arctanh(self._u(x[inds2]) ** -1) + ) - (2 * p * self._f(x[inds2], p) ** -1 * np.arctan(self._f(x[inds2], p))) return prefactor * func else: - if x * p == 1: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) elif x * p < 1: - func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ - (2 * p * self._g(x, p) ** -1 * np.arctanh(self._g(x, p))) + func = self._u(x) ** -1 * ( + np.pi + 2 * p * np.arctanh(self._u(x) ** -1) + ) - (2 * p * self._g(x, p) ** -1 * np.arctanh(self._g(x, p))) else: - func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ - (2 * p * self._f(x, p) ** -1 * np.arctan(self._f(x, p))) + func = self._u(x) ** -1 * ( + np.pi + 2 * p * np.arctanh(self._u(x) ** -1) + ) - (2 * p * self._f(x, p) ** -1 * np.arctan(self._f(x, p))) return prefactor * func @@ -366,67 +448,94 @@ def _G(self, x, p): :return: """ - prefactor = (p + p ** 3) ** -1 * p + prefactor = (p + p**3) ** -1 * p if isinstance(x, np.ndarray): - inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) - func[inds0] = np.log(0.25 * x[inds0] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds0]) - 1) + \ - 2 * p ** 2 * (self._u(x[inds0]) * np.arctanh(self._u(x[inds0]) ** -1) + - np.log(0.5 * x[inds0])) - - func[inds1] = np.log(0.25 * x[inds1] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds1]) - 1) + \ - 2 * p ** 2 * (self._u(x[inds1]) * np.arctanh(self._u(x[inds1]) ** -1) + - np.log(0.5 * x[inds1])) + 2 * self._g(x[inds1], p) * np.arctanh( - self._g(x[inds1], p)) - - func[inds2] = np.log(0.25 * x[inds2] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds2]) - 1) + \ - 2 * p ** 2 * (self._u(x[inds2]) * np.arctanh(self._u(x[inds2]) ** -1) + - np.log(0.5 * x[inds2])) - 2 * self._f(x[inds2], p) * np.arctan( - self._f(x[inds2], p)) - + func[inds0] = ( + np.log(0.25 * x[inds0] ** 2 * p**2) + + np.pi * p * (self._u(x[inds0]) - 1) + + 2 + * p**2 + * ( + self._u(x[inds0]) * np.arctanh(self._u(x[inds0]) ** -1) + + np.log(0.5 * x[inds0]) + ) + ) + + func[inds1] = ( + np.log(0.25 * x[inds1] ** 2 * p**2) + + np.pi * p * (self._u(x[inds1]) - 1) + + 2 + * p**2 + * ( + self._u(x[inds1]) * np.arctanh(self._u(x[inds1]) ** -1) + + np.log(0.5 * x[inds1]) + ) + + 2 * self._g(x[inds1], p) * np.arctanh(self._g(x[inds1], p)) + ) + + func[inds2] = ( + np.log(0.25 * x[inds2] ** 2 * p**2) + + np.pi * p * (self._u(x[inds2]) - 1) + + 2 + * p**2 + * ( + self._u(x[inds2]) * np.arctanh(self._u(x[inds2]) ** -1) + + np.log(0.5 * x[inds2]) + ) + - 2 * self._f(x[inds2], p) * np.arctan(self._f(x[inds2], p)) + ) else: - if x * p == 1: - - func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ - 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + - np.log(0.5 * x)) + func = ( + np.log(0.25 * x**2 * p**2) + + np.pi * p * (self._u(x) - 1) + + 2 + * p**2 + * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) + ) elif x * p < 1: - - func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ - 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + - np.log(0.5 * x)) + 2 * self._g(x, p) * np.arctanh(self._g(x, p)) + func = ( + np.log(0.25 * x**2 * p**2) + + np.pi * p * (self._u(x) - 1) + + 2 + * p**2 + * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) + + 2 * self._g(x, p) * np.arctanh(self._g(x, p)) + ) else: - - func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ - 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + - np.log(0.5 * x)) - 2 * self._f(x, p) * np.arctan(self._f(x, p)) + func = ( + np.log(0.25 * x**2 * p**2) + + np.pi * p * (self._u(x) - 1) + + 2 + * p**2 + * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) + - 2 * self._f(x, p) * np.arctan(self._f(x, p)) + ) return func * prefactor def _alpha2rho0(self, alpha_Rs=None, Rs=None, r_core=None): - - p = Rs * r_core ** -1 + p = Rs * r_core**-1 gx = self._G(1, p) - rho0 = alpha_Rs / (2 * Rs ** 2 * gx) + rho0 = alpha_Rs / (2 * Rs**2 * gx) return rho0 def _rho2alpha(self, rho0=None, Rs=None, r_core=None): - p = Rs / r_core gx = self._G(1, p) - alpha = 2 * Rs ** 2 * gx * rho0 + alpha = 2 * Rs**2 * gx * rho0 return alpha diff --git a/lenstronomy/LensModel/Profiles/cored_density.py b/lenstronomy/LensModel/Profiles/cored_density.py index f922511af..b37d95393 100644 --- a/lenstronomy/LensModel/Profiles/cored_density.py +++ b/lenstronomy/LensModel/Profiles/cored_density.py @@ -1,10 +1,10 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util import derivative_util as calc_util -__all__ = ['CoredDensity'] +__all__ = ["CoredDensity"] class CoredDensity(LensProfileBase): @@ -27,14 +27,24 @@ class for a uniform cored density dropping steep in the outskirts """ + _s = 0.000001 # numerical limit for minimal radius - param_names = ['sigma0', 'r_core', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': -1, 'r_core': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100} + param_names = ["sigma0", "r_core", "center_x", "center_y"] + lower_limit_default = { + "sigma0": -1, + "r_core": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 10, + "r_core": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - potential of cored density profile + """Potential of cored density profile. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -46,13 +56,17 @@ def function(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) - return 2 * sigma0 * r_core ** 2 * (2 * np.log(r) - np.log(np.sqrt(r**2 + r_core**2) - r_core)) + return ( + 2 + * sigma0 + * r_core**2 + * (2 * np.log(r) - np.log(np.sqrt(r**2 + r_core**2) - r_core)) + ) def derivatives(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - deflection angle of cored density profile + """Deflection angle of cored density profile. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -84,7 +98,7 @@ def hessian(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) d_alpha_dr = self.d_alpha_dr(r, sigma0, r_core) alpha = self.alpha_r(r, sigma0, r_core) @@ -97,56 +111,61 @@ def hessian(self, x, y, sigma0, r_core, center_x=0, center_y=0): @staticmethod def alpha_r(r, sigma0, r_core): - """ - radial deflection angle of the cored density profile + """Radial deflection angle of the cored density profile. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: deflection angle """ - return 2 * sigma0 * r_core ** 2 / r * (1 - (1 + (r/r_core)**2) ** (-1./2)) + return ( + 2 * sigma0 * r_core**2 / r * (1 - (1 + (r / r_core) ** 2) ** (-1.0 / 2)) + ) @staticmethod def d_alpha_dr(r, sigma0, r_core): - """ - radial derivatives of the radial deflection angle + """Radial derivatives of the radial deflection angle. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: dalpha/dr """ - return 2 * sigma0 * (((1 + (r/r_core) ** 2) ** (-3./2)) - (r_core/r) ** 2 * (1 - (1+(r/r_core)**2) ** (-1./2))) + return ( + 2 + * sigma0 + * ( + ((1 + (r / r_core) ** 2) ** (-3.0 / 2)) + - (r_core / r) ** 2 * (1 - (1 + (r / r_core) ** 2) ** (-1.0 / 2)) + ) + ) @staticmethod def kappa_r(r, sigma0, r_core): - """ - convergence of the cored density profile. This routine is also for testing + """Convergence of the cored density profile. This routine is also for testing. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: convergence at r """ - return sigma0 * (1 + (r/r_core)**2) ** (-3./2) + return sigma0 * (1 + (r / r_core) ** 2) ** (-3.0 / 2) @staticmethod def density(r, sigma0, r_core): - """ - rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-2) + """Rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-2) :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: density at radius r """ - return 2/np.pi * sigma0 * r_core**3 * (r_core**2 + r**2) ** (-2) + return 2 / np.pi * sigma0 * r_core**3 * (r_core**2 + r**2) ** (-2) def density_lens(self, r, sigma0, r_core): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius (angular scale) :param sigma0: convergence in the core @@ -156,8 +175,7 @@ def density_lens(self, r, sigma0, r_core): return self.density(r, sigma0, r_core) def density_2d(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - projected density at projected radius r + """Projected density at projected radius r. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -169,13 +187,12 @@ def density_2d(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) return self.kappa_r(r, sigma0, r_core) def mass_2d(self, r, sigma0, r_core): - """ - mass enclosed in cylinder of radius r + """Mass enclosed in cylinder of radius r. :param r: radius (angular scale) :param sigma0: convergence in the core @@ -186,20 +203,23 @@ def mass_2d(self, r, sigma0, r_core): @staticmethod def mass_3d(r, sigma0, r_core): - """ - mass enclosed 3d radius + """Mass enclosed 3d radius. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: mass enclosed 3d radius """ - return 8 * sigma0 * r_core**3 * (np.arctan(r/r_core)/(2*r_core) - r / (2 * (r**2 + r_core**2))) + return ( + 8 + * sigma0 + * r_core**3 + * (np.arctan(r / r_core) / (2 * r_core) - r / (2 * (r**2 + r_core**2))) + ) def mass_3d_lens(self, r, sigma0, r_core): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units - For this profile those are identical. + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units For this profile those are identical. :param r: radius (angular scale) :param sigma0: convergence in the core diff --git a/lenstronomy/LensModel/Profiles/cored_density_2.py b/lenstronomy/LensModel/Profiles/cored_density_2.py index a325e59b8..708f78289 100644 --- a/lenstronomy/LensModel/Profiles/cored_density_2.py +++ b/lenstronomy/LensModel/Profiles/cored_density_2.py @@ -1,34 +1,41 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from scipy.integrate import quad from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util import derivative_util as calc_util -__all__ = ['CoredDensity2'] +__all__ = ["CoredDensity2"] class CoredDensity2(LensProfileBase): - """ - class for a uniform cored density dropping steep in the outskirts - credits for suggesting this profile goes to Kfir Blum + """Class for a uniform cored density dropping steep in the outskirts credits for + suggesting this profile goes to Kfir Blum. .. math:: \\rho(r) = 2/\\pi * \\Sigma_{\\rm crit} R_c^2 * (R_c^2 + r^2)^{-3/2} This profile drops like an NFW profile as math:`\\rho(r)^{-3}`. - """ - model_name = 'CORED_DENSITY_2' + model_name = "CORED_DENSITY_2" _s = 0.000001 # numerical limit for minimal radius - param_names = ['sigma0', 'r_core', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': -1, 'r_core': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100} + param_names = ["sigma0", "r_core", "center_x", "center_y"] + lower_limit_default = { + "sigma0": -1, + "r_core": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 10, + "r_core": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - potential of cored density profile + """Potential of cored density profile. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -40,7 +47,7 @@ def function(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) if isinstance(r, int) or isinstance(r, float): return self._num_integral_potential(r, sigma0, r_core) @@ -57,14 +64,15 @@ def _num_integral_potential(self, r, sigma0, r_core): :param r_core: :return: """ + def _integrand(x): return self.alpha_r(x, sigma0=sigma0, r_core=r_core) + f_ = quad(_integrand, 0, r)[0] return f_ def derivatives(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - deflection angle of cored density profile + """Deflection angle of cored density profile. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -96,7 +104,7 @@ def hessian(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) d_alpha_dr = self.d_alpha_dr(r, sigma0, r_core) alpha = self.alpha_r(r, sigma0, r_core) @@ -109,57 +117,61 @@ def hessian(self, x, y, sigma0, r_core, center_x=0, center_y=0): @staticmethod def alpha_r(r, sigma0, r_core): - """ - radial deflection angle of the cored density profile + """Radial deflection angle of the cored density profile. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: deflection angle """ - return sigma0 * r_core ** 2 * np.log((r_core**2 + r**2) / r_core**2) / r # this is mass_2d / (r * pi) + return ( + sigma0 * r_core**2 * np.log((r_core**2 + r**2) / r_core**2) / r + ) # this is mass_2d / (r * pi) @staticmethod def d_alpha_dr(r, sigma0, r_core): - """ - radial derivatives of the radial deflection angle + """Radial derivatives of the radial deflection angle. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: dalpha/dr """ - return sigma0 * r_core ** 2 * (-1./r**2 * np.log((r_core**2 + r**2) / r_core**2) + 1/r * r_core**2 / - (r**2 + r_core**2) * 2 * r/r_core**2) + return ( + sigma0 + * r_core**2 + * ( + -1.0 / r**2 * np.log((r_core**2 + r**2) / r_core**2) + + 1 / r * r_core**2 / (r**2 + r_core**2) * 2 * r / r_core**2 + ) + ) @staticmethod def kappa_r(r, sigma0, r_core): - """ - convergence of the cored density profile. This routine is also for testing + """Convergence of the cored density profile. This routine is also for testing. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: convergence at r """ - return sigma0 * r_core ** 2 / (r_core ** 2 + r ** 2) + return sigma0 * r_core**2 / (r_core**2 + r**2) @staticmethod def density(r, sigma0, r_core): - """ - rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-3/2) + """Rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-3/2) :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: density at radius r """ - return 1./2 * sigma0 * r_core**2 * (r_core**2 + r**2) ** (-3./2) + return 1.0 / 2 * sigma0 * r_core**2 * (r_core**2 + r**2) ** (-3.0 / 2) def density_lens(self, r, sigma0, r_core): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius (angular scale) :param sigma0: convergence in the core @@ -169,8 +181,7 @@ def density_lens(self, r, sigma0, r_core): return self.density(r, sigma0, r_core) def density_2d(self, x, y, sigma0, r_core, center_x=0, center_y=0): - """ - projected density at projected radius r + """Projected density at projected radius r. :param x: x-coordinate in angular units :param y: y-coordinate in angular units @@ -182,26 +193,26 @@ def density_2d(self, x, y, sigma0, r_core, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) return self.kappa_r(r, sigma0, r_core) @staticmethod def mass_2d(r, sigma0, r_core): - """ - mass enclosed in cylinder of radius r + """Mass enclosed in cylinder of radius r. :param r: radius (angular scale) :param sigma0: convergence in the core :param r_core: core radius :return: mass enclosed in cylinder of radius r """ - return sigma0 * r_core ** 2 * np.pi * np.log((r_core**2 + r**2) / r_core**2) + return ( + sigma0 * r_core**2 * np.pi * np.log((r_core**2 + r**2) / r_core**2) + ) @staticmethod def mass_3d(r, sigma0, r_core): - """ - mass enclosed 3d radius + """Mass enclosed 3d radius. :param r: radius (angular scale) :param sigma0: convergence in the core @@ -209,12 +220,18 @@ def mass_3d(r, sigma0, r_core): :return: mass enclosed 3d radius """ r_ = np.sqrt(r**2 + r_core**2) - return 2 * np.pi * sigma0 * r_core**2 * (r_ * np.log(r_ + r) - np.log(r_core) * r_ - r) / r_ + return ( + 2 + * np.pi + * sigma0 + * r_core**2 + * (r_ * np.log(r_ + r) - np.log(r_core) * r_ - r) + / r_ + ) def mass_3d_lens(self, r, sigma0, r_core): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units - For this profile those are identical. + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units For this profile those are identical. :param r: radius (angular scale) :param sigma0: convergence in the core diff --git a/lenstronomy/LensModel/Profiles/cored_density_exp.py b/lenstronomy/LensModel/Profiles/cored_density_exp.py index ab3a394fa..f3d3e35b3 100644 --- a/lenstronomy/LensModel/Profiles/cored_density_exp.py +++ b/lenstronomy/LensModel/Profiles/cored_density_exp.py @@ -1,34 +1,40 @@ -__author__ = 'lucateo' +__author__ = "lucateo" import numpy as np from scipy.special import exp1, erf from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CoredDensityExp'] +__all__ = ["CoredDensityExp"] class CoredDensityExp(LensProfileBase): - """ - this class contains functions concerning an exponential cored density profile, - namely + """This class contains functions concerning an exponential cored density profile, + namely. ..math:: \\rho(r) = \\rho_0 \\exp(- (\\theta / \\theta_c)^2) - """ + _s = 0.000001 # numerical limit for minimal radius - param_names = ['kappa_0', 'theta_c', 'center_x', 'center_y'] - lower_limit_default = {'kappa_0': 0, 'theta_c': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'kappa_0': 10, 'theta_c': 100, 'center_x': 100, 'center_y': 100} + param_names = ["kappa_0", "theta_c", "center_x", "center_y"] + lower_limit_default = { + "kappa_0": 0, + "theta_c": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "kappa_0": 10, + "theta_c": 100, + "center_x": 100, + "center_y": 100, + } @staticmethod def rhotilde(kappa_0, theta_c): - """ - Computes the central density in angular units - :param kappa_0: central convergence of profile - :param theta_c: core radius (in arcsec) - :return: central density in 1/arcsec - """ + """Computes the central density in angular units :param kappa_0: central + convergence of profile :param theta_c: core radius (in arcsec) :return: central + density in 1/arcsec.""" return kappa_0 / (np.sqrt(np.pi) * theta_c) def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): @@ -43,28 +49,24 @@ def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_ ** 2 + y_ ** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) - Integral_factor = 0.5 * exp1((r/theta_c)**2) + np.log((r/theta_c)) + Integral_factor = 0.5 * exp1((r / theta_c) ** 2) + np.log((r / theta_c)) function = kappa_0 * theta_c**2 * Integral_factor return function @staticmethod def alpha_radial(r, kappa_0, theta_c): - """ - returns the radial part of the deflection angle - :param r: angular position (normally in units of arc seconds) - :param kappa_0: central convergence of profile - :param theta_c: core radius (in arcsec) - :return: radial deflection angle - """ + """Returns the radial part of the deflection angle :param r: angular position + (normally in units of arc seconds) :param kappa_0: central convergence of + profile :param theta_c: core radius (in arcsec) :return: radial deflection + angle.""" prefactor = kappa_0 * theta_c**2 / r - return prefactor * (1 - np.exp(- (r/theta_c)**2)) + return prefactor * (1 - np.exp(-((r / theta_c) ** 2))) def derivatives(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (lensing potential), which are - the deflection angles + """Returns df/dx and df/dy of the function (lensing potential), which are the + deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -97,18 +99,17 @@ def hessian(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, 0.00000001) prefactor = kappa_0 * theta_c**2 - expFactor = np.exp(- (R/theta_c)**2) - factor1 = (1 - expFactor)/R**4 - factor2 = 2/(R**2 * theta_c**2) * expFactor + expFactor = np.exp(-((R / theta_c) ** 2)) + factor1 = (1 - expFactor) / R**4 + factor2 = 2 / (R**2 * theta_c**2) * expFactor f_xx = prefactor * (factor1 * (y_**2 - x_**2) + factor2 * x_**2) f_yy = prefactor * (factor1 * (x_**2 - y_**2) + factor2 * y_**2) - f_xy = prefactor * (- factor1 * 2 * x_ * y_ + factor2 * x_*y_) + f_xy = prefactor * (-factor1 * 2 * x_ * y_ + factor2 * x_ * y_) return f_xx, f_xy, f_xy, f_yy def density(self, R, kappa_0, theta_c): - """ - three dimensional density profile in angular units - (rho0_physical = rho0_angular Sigma_crit / D_lens) + """Three dimensional density profile in angular units (rho0_physical = + rho0_angular Sigma_crit / D_lens) :param R: projected angular position (normally in units of arc seconds) :param kappa_0: central convergence of profile @@ -116,12 +117,12 @@ def density(self, R, kappa_0, theta_c): :return: rho(R) density """ rhotilde = self.rhotilde(kappa_0, theta_c) - return rhotilde * np.exp(-(R/theta_c)**2) + return rhotilde * np.exp(-((R / theta_c) ** 2)) def density_lens(self, r, kappa_0, theta_c): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: angular position (normally in units of arc seconds) :param kappa_0: central convergence of profile @@ -132,21 +133,19 @@ def density_lens(self, r, kappa_0, theta_c): @staticmethod def kappa_r(R, kappa_0, theta_c): - """ - convergence of the cored density profile. This routine is also for testing + """Convergence of the cored density profile. This routine is also for testing. :param R: radius (angular scale) :param kappa_0: convergence in the core :param theta_c: core radius :return: convergence at r """ - expFactor = np.exp(- (R/theta_c)**2) + expFactor = np.exp(-((R / theta_c) ** 2)) return kappa_0 * expFactor def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): - """ - projected two dimensional ULDM profile (convergence * Sigma_crit), but given our - units convention for rho0, it is basically the convergence + """Projected two dimensional ULDM profile (convergence * Sigma_crit), but given + our units convention for rho0, it is basically the convergence. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -161,31 +160,23 @@ def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0): @staticmethod def mass_3d(R, kappa_0, theta_c): - """ - mass enclosed a 3d sphere or radius r - :param kappa_0: central convergence of profile - :param theta_c: core radius (in arcsec) - :param R: radius in arcseconds - :return: mass of soliton in angular units - """ - integral_factor = np.sqrt(np.pi) * erf(R/theta_c)/2 - R/theta_c * np.exp(-(R/theta_c)**2) + """Mass enclosed a 3d sphere or radius r :param kappa_0: central convergence of + profile :param theta_c: core radius (in arcsec) :param R: radius in arcseconds + :return: mass of soliton in angular units.""" + integral_factor = np.sqrt(np.pi) * erf(R / theta_c) / 2 - R / theta_c * np.exp( + -((R / theta_c) ** 2) + ) m_3d = 2 * np.sqrt(np.pi) * kappa_0 * theta_c**2 * integral_factor return m_3d def mass_3d_lens(self, r, kappa_0, theta_c): - """ - mass enclosed a 3d sphere or radius r - :param kappa_0: central convergence of profile - :param theta_c: core radius (in arcsec) - :return: mass - """ + """Mass enclosed a 3d sphere or radius r :param kappa_0: central convergence of + profile :param theta_c: core radius (in arcsec) :return: mass.""" m_3d = self.mass_3d(r, kappa_0, theta_c) return m_3d def mass_2d(self, R, kappa_0, theta_c): - """ - mass enclosed a 2d sphere of radius r - returns + """Mass enclosed a 2d sphere of radius r returns. .. math:: M_{2D} = 2 \\pi \\int_0^r dr' r' \\int dz \\rho(\\sqrt(r'^2 + z^2)) diff --git a/lenstronomy/LensModel/Profiles/cored_density_mst.py b/lenstronomy/LensModel/Profiles/cored_density_mst.py index 9a389a828..b6ddda7aa 100644 --- a/lenstronomy/LensModel/Profiles/cored_density_mst.py +++ b/lenstronomy/LensModel/Profiles/cored_density_mst.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.LensModel.Profiles.cored_density import CoredDensity @@ -7,43 +7,58 @@ from lenstronomy.LensModel.Profiles.uldm import Uldm from lenstronomy.LensModel.Profiles.convergence import Convergence -__all__ = ['CoredDensityMST'] +__all__ = ["CoredDensityMST"] class CoredDensityMST(LensProfileBase): + """Approximate mass-sheet transform of a density core. + + This routine takes the parameters of the density core and subtracts a mass-sheet + that approximates the cored profile in it's center to counter-act (in approximation) + this model. This allows for better sampling of the mass-sheet transformed quantities + that do not have strong covariances. The subtraction of the mass-sheet is done such + that the sampler returns the real central convergence of the original model (but be + careful, the output of quantities like the Einstein angle of the main deflector are + still the not-scaled one). Attention!!! The interpretation of the result is that the + mass sheet as 'CONVERGENCE' that is present needs to be subtracted in post- + processing. """ - approximate mass-sheet transform of a density core. This routine takes the parameters of the density core and - subtracts a mass-sheet that approximates the cored profile in it's center to counter-act (in approximation) this - model. This allows for better sampling of the mass-sheet transformed quantities that do not have strong covariances. - The subtraction of the mass-sheet is done such that the sampler returns the real central convergence of the original - model (but be careful, the output of quantities like the Einstein angle of the main deflector are still the - not-scaled one). - Attention!!! The interpretation of the result is that the mass sheet as 'CONVERGENCE' that is present needs to be - subtracted in post-processing. - """ - param_names = ['lambda_approx', 'r_core', 'center_x', 'center_y'] - lower_limit_default = {'lambda_approx': -1, 'r_core': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'lambda_approx': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100} - def __init__(self, profile_type='CORED_DENSITY'): - if profile_type == 'CORED_DENSITY': + param_names = ["lambda_approx", "r_core", "center_x", "center_y"] + lower_limit_default = { + "lambda_approx": -1, + "r_core": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "lambda_approx": 10, + "r_core": 100, + "center_x": 100, + "center_y": 100, + } + + def __init__(self, profile_type="CORED_DENSITY"): + if profile_type == "CORED_DENSITY": self._profile = CoredDensity() - elif profile_type == 'CORED_DENSITY_2': + elif profile_type == "CORED_DENSITY_2": self._profile = CoredDensity2() - elif profile_type == 'CORED_DENSITY_EXP': + elif profile_type == "CORED_DENSITY_EXP": self._profile = CoredDensityExp() # Due to parameters name conventions/positioning, right now only the free soliton with - # the default value of slope = 8 is supported - elif profile_type == 'CORED_DENSITY_ULDM': + # the default value of slope = 8 is supported + elif profile_type == "CORED_DENSITY_ULDM": self._profile = Uldm() else: - raise ValueError('profile_type %s not supported for CoredDensityMST instance.' % profile_type) + raise ValueError( + "profile_type %s not supported for CoredDensityMST instance." + % profile_type + ) self._convergence = Convergence() super(CoredDensityMST, self).__init__() def function(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): - """ - lensing potential of approximate mass-sheet correction + """Lensing potential of approximate mass-sheet correction. :param x: x-coordinate :param y: y-coordinate @@ -53,14 +68,15 @@ def function(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): :param center_y: y-center of the profile :return: lensing potential correction """ - kappa_ext = (1 - lambda_approx)/lambda_approx - f_cored_density = self._profile.function(x, y, kappa_ext, r_core, center_x, center_y) + kappa_ext = (1 - lambda_approx) / lambda_approx + f_cored_density = self._profile.function( + x, y, kappa_ext, r_core, center_x, center_y + ) f_ms = self._convergence.function(x, y, kappa_ext, center_x, center_y) return f_cored_density - f_ms def derivatives(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): - """ - deflection angles of approximate mass-sheet correction + """Deflection angles of approximate mass-sheet correction. :param x: x-coordinate :param y: y-coordinate @@ -70,14 +86,17 @@ def derivatives(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): :param center_y: y-center of the profile :return: alpha_x, alpha_y """ - kappa_ext = (1 - lambda_approx)/lambda_approx - f_x_cd, f_y_cd = self._profile.derivatives(x, y, kappa_ext, r_core, center_x, center_y) - f_x_ms, f_y_ms = self._convergence.derivatives(x, y, kappa_ext, center_x, center_y) + kappa_ext = (1 - lambda_approx) / lambda_approx + f_x_cd, f_y_cd = self._profile.derivatives( + x, y, kappa_ext, r_core, center_x, center_y + ) + f_x_ms, f_y_ms = self._convergence.derivatives( + x, y, kappa_ext, center_x, center_y + ) return f_x_cd - f_x_ms, f_y_cd - f_y_ms def hessian(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): - """ - Hessian terms of approximate mass-sheet correction + """Hessian terms of approximate mass-sheet correction. :param x: x-coordinate :param y: y-coordinate @@ -87,7 +106,16 @@ def hessian(self, x, y, lambda_approx, r_core, center_x=0, center_y=0): :param center_y: y-center of the profile :return: df/dxx, df/dxy, df/dyx, df/dyy """ - kappa_ext = (1 - lambda_approx)/lambda_approx - f_xx_cd, f_xy_cd, f_yx_cd, f_yy_cd = self._profile.hessian(x, y, kappa_ext, r_core, center_x, center_y) - f_xx_ms, f_xy_ms, f_yx_ms, f_yy_ms = self._convergence.hessian(x, y, kappa_ext, center_x, center_y) - return f_xx_cd - f_xx_ms, f_xy_cd - f_xy_ms, f_yx_cd - f_yx_ms, f_yy_cd - f_yy_ms + kappa_ext = (1 - lambda_approx) / lambda_approx + f_xx_cd, f_xy_cd, f_yx_cd, f_yy_cd = self._profile.hessian( + x, y, kappa_ext, r_core, center_x, center_y + ) + f_xx_ms, f_xy_ms, f_yx_ms, f_yy_ms = self._convergence.hessian( + x, y, kappa_ext, center_x, center_y + ) + return ( + f_xx_cd - f_xx_ms, + f_xy_cd - f_xy_ms, + f_yx_cd - f_yx_ms, + f_yy_cd - f_yy_ms, + ) diff --git a/lenstronomy/LensModel/Profiles/cored_steep_ellipsoid.py b/lenstronomy/LensModel/Profiles/cored_steep_ellipsoid.py index 7ecec92ef..ce3b55979 100644 --- a/lenstronomy/LensModel/Profiles/cored_steep_ellipsoid.py +++ b/lenstronomy/LensModel/Profiles/cored_steep_ellipsoid.py @@ -1,11 +1,17 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np from lenstronomy.Util import param_util from lenstronomy.Util import util -__all__ = ['CSE', 'CSEMajorAxis', 'CSEMajorAxisSet', 'CSEProductAvg', 'CSEProductAvgSet'] +__all__ = [ + "CSE", + "CSEMajorAxis", + "CSEMajorAxisSet", + "CSEProductAvg", + "CSEProductAvgSet", +] class CSE(LensProfileBase): @@ -25,17 +31,34 @@ class CSE(LensProfileBase): \\xi(x, y) = \\sqrt{x^2 + \\frac{y^2}{q^2}} """ - param_names = ['A', 's', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'A': -1000, 's': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'A': 1000, 's': 10000, 'e1': 0.5, 'e2': 0.5, 'center_x': -100, 'center_y': -100} - def __init__(self, axis='product_avg'): - if axis == 'major': + param_names = ["A", "s", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "A": -1000, + "s": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "A": 1000, + "s": 10000, + "e1": 0.5, + "e2": 0.5, + "center_x": -100, + "center_y": -100, + } + + def __init__(self, axis="product_avg"): + if axis == "major": self.major_axis_model = CSEMajorAxis() - elif axis == 'product_avg': + elif axis == "product_avg": self.major_axis_model = CSEProductAvg() else: - raise ValueError("axis must be set to 'major' or 'product_avg'. Input is %s ." % axis) + raise ValueError( + "axis must be set to 'major' or 'product_avg'. Input is %s ." % axis + ) super(CSE, self).__init__() def function(self, x, y, a, s, e1, e2, center_x, center_y): @@ -111,8 +134,8 @@ def hessian(self, x, y, a, s, e1, e2, center_x, center_y): f__xx, f__xy, __, f__yy = self.major_axis_model.hessian(x__, y__, a, s, q) # rotate back - kappa = 1. / 2 * (f__xx + f__yy) - gamma1__ = 1. / 2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_q) * gamma1__ - np.sin(2 * phi_q) * gamma2__ gamma2 = +np.sin(2 * phi_q) * gamma1__ + np.cos(2 * phi_q) * gamma2__ @@ -139,9 +162,23 @@ class CSEMajorAxis(LensProfileBase): \\xi(x, y) = \\sqrt{x^2 + \\frac{y^2}{q^2}} """ - param_names = ['A', 's', 'q', 'center_x', 'center_y'] - lower_limit_default = {'A': -1000, 's': 0, 'q': 0.001, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'A': 1000, 's': 10000, 'q': 0.99999, 'e2': 0.5, 'center_x': -100, 'center_y': -100} + + param_names = ["A", "s", "q", "center_x", "center_y"] + lower_limit_default = { + "A": -1000, + "s": 0, + "q": 0.001, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "A": 1000, + "s": 10000, + "q": 0.99999, + "e2": 0.5, + "center_x": -100, + "center_y": -100, + } def function(self, x, y, a, s, q): """ @@ -155,9 +192,9 @@ def function(self, x, y, a, s, q): """ # potential calculation - psi = np.sqrt(q**2*(s**2 + x**2) + y**2) - Phi = (psi + s)**2 + (1-q**2) * x**2 - phi = q/(2*s) * np.log(Phi) - q/s * np.log((1+q) * s) + psi = np.sqrt(q**2 * (s**2 + x**2) + y**2) + Phi = (psi + s) ** 2 + (1 - q**2) * x**2 + phi = q / (2 * s) * np.log(Phi) - q / s * np.log((1 + q) * s) return a * phi def derivatives(self, x, y, a, s, q): @@ -171,9 +208,9 @@ def derivatives(self, x, y, a, s, q): :return: deflection in x- and y-direction """ - psi = np.sqrt(q ** 2 * (s ** 2 + x ** 2) + y ** 2) - Phi = (psi + s) ** 2 + (1 - q ** 2) * x ** 2 - f_x = q * x * (psi + q**2*s) / (s * psi * Phi) + psi = np.sqrt(q**2 * (s**2 + x**2) + y**2) + Phi = (psi + s) ** 2 + (1 - q**2) * x**2 + f_x = q * x * (psi + q**2 * s) / (s * psi * Phi) f_y = q * y * (psi + s) / (s * psi * Phi) return a * f_x, a * f_y @@ -190,19 +227,42 @@ def hessian(self, x, y, a, s, q): """ # equations 21-23 in Oguri 2021 - psi = np.sqrt(q ** 2 * (s ** 2 + x ** 2) + y ** 2) - Phi = (psi + s) ** 2 + (1 - q ** 2) * x ** 2 - f_xx = q/(s * Phi) * (1 + q**2*s*(q**2 * s**2 + y**2)/psi**3 - 2*x**2*(psi + q**2*s)**2/(psi**2 * Phi)) - f_yy = q/(s * Phi) * (1 + q**2 * s * (s**2 + x**2)/psi**3 - 2*y**2*(psi + s)**2/(psi**2 * Phi)) - f_xy = - q * x*y / (s * Phi) * (q**2 * s / psi**3 + 2 * (psi + q**2*s) * (psi + s) / (psi**2 * Phi)) + psi = np.sqrt(q**2 * (s**2 + x**2) + y**2) + Phi = (psi + s) ** 2 + (1 - q**2) * x**2 + f_xx = ( + q + / (s * Phi) + * ( + 1 + + q**2 * s * (q**2 * s**2 + y**2) / psi**3 + - 2 * x**2 * (psi + q**2 * s) ** 2 / (psi**2 * Phi) + ) + ) + f_yy = ( + q + / (s * Phi) + * ( + 1 + + q**2 * s * (s**2 + x**2) / psi**3 + - 2 * y**2 * (psi + s) ** 2 / (psi**2 * Phi) + ) + ) + f_xy = ( + -q + * x + * y + / (s * Phi) + * ( + q**2 * s / psi**3 + + 2 * (psi + q**2 * s) * (psi + s) / (psi**2 * Phi) + ) + ) return a * f_xx, a * f_xy, a * f_xy, a * f_yy class CSEMajorAxisSet(LensProfileBase): - """ - a set of CSE profiles along a joint center and axis - """ + """A set of CSE profiles along a joint center and axis.""" def __init__(self): self.major_axis_model = CSEMajorAxis() @@ -260,9 +320,8 @@ def hessian(self, x, y, a_list, s_list, q): class CSEProductAvg(LensProfileBase): - """ - Cored steep ellipsoid (CSE) evaluated at the product-averaged radius sqrt(ab), - such that mass is not changed when increasing ellipticity + """Cored steep ellipsoid (CSE) evaluated at the product-averaged radius sqrt(ab), + such that mass is not changed when increasing ellipticity. Same as CSEMajorAxis but evaluated at r=sqrt(q)*r_original @@ -276,13 +335,24 @@ class CSEProductAvg(LensProfileBase): .. math:: \\xi(x, y) = \\sqrt{qx^2 + \\frac{y^2}{q}} - - - """ - param_names = ['A', 's', 'q', 'center_x', 'center_y'] - lower_limit_default = {'A': -1000, 's': 0, 'q': 0.001, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'A': 1000, 's': 10000, 'q': 0.99999, 'e2': 0.5, 'center_x': -100, 'center_y': -100} + + param_names = ["A", "s", "q", "center_x", "center_y"] + lower_limit_default = { + "A": -1000, + "s": 0, + "q": 0.001, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "A": 1000, + "s": 10000, + "q": 0.99999, + "e2": 0.5, + "center_x": -100, + "center_y": -100, + } def __init__(self): super(CSEProductAvg, self).__init__() @@ -290,8 +360,10 @@ def __init__(self): @staticmethod def _convert2prodavg(x, y, a, s, q): - """ - converts coordinates and re-normalizes major-axis parameterization to instead be wrt. product-averaged + """Converts coordinates and re-normalizes major-axis parameterization to instead + be wrt. + + product-averaged """ a = a / q x = x * np.sqrt(q) @@ -340,9 +412,7 @@ def hessian(self, x, y, a, s, q): class CSEProductAvgSet(LensProfileBase): - """ - a set of CSE profiles along a joint center and axis - """ + """A set of CSE profiles along a joint center and axis.""" def __init__(self): self.major_axis_model = CSEProductAvg() diff --git a/lenstronomy/LensModel/Profiles/curved_arc_const.py b/lenstronomy/LensModel/Profiles/curved_arc_const.py index dbfefaa77..1ceb84bb9 100644 --- a/lenstronomy/LensModel/Profiles/curved_arc_const.py +++ b/lenstronomy/LensModel/Profiles/curved_arc_const.py @@ -3,13 +3,12 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util import util -__all__ = ['CurvedArcConstMST', 'CurvedArcConst'] +__all__ = ["CurvedArcConstMST", "CurvedArcConst"] class CurvedArcConstMST(LensProfileBase): - """ - lens model that describes a section of a highly magnified deflector region. - The parameterization is chosen to describe local observables efficient. + """Lens model that describes a section of a highly magnified deflector region. The + parameterization is chosen to describe local observables efficient. Observables are: - curvature radius (basically bending relative to the center of the profile) @@ -22,18 +21,49 @@ class CurvedArcConstMST(LensProfileBase): - Should work with other perturbative models without breaking its meaning (say when adding additional shear terms) - Must best reflect the observables in lensing - minimal covariances between the parameters, intuitive parameterization. - """ - param_names = ['tangential_stretch', 'radial_stretch', 'curvature', 'direction', 'center_x', 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'radial_stretch': -5, 'curvature': 0.000001, 'direction': -np.pi, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'radial_stretch': 5, 'curvature': 100, 'direction': np.pi, 'center_x': 100, 'center_y': 100} + + param_names = [ + "tangential_stretch", + "radial_stretch", + "curvature", + "direction", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "radial_stretch": -5, + "curvature": 0.000001, + "direction": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "radial_stretch": 5, + "curvature": 100, + "direction": np.pi, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._mst = Convergence() self._curve = CurvedArcConst() super(CurvedArcConstMST, self).__init__() - def function(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def function( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ ATTENTION: there may not be a global lensing potential! @@ -47,9 +77,21 @@ def function(self, x, y, tangential_stretch, radial_stretch, curvature, directio :param center_y: center of source in image plane :return: """ - raise NotImplemented('lensing potential for regularly curved arc is not implemented') - - def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + raise NotImplemented( + "lensing potential for regularly curved arc is not implemented" + ) + + def derivatives( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -62,17 +104,31 @@ def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direc :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch + lambda_mst = 1.0 / radial_stretch kappa_ext = 1 - lambda_mst curve_stretch = tangential_stretch / radial_stretch - f_x_curve, f_y_curve = self._curve.derivatives(x, y, curve_stretch, curvature, direction, center_x, center_y) - f_x_mst, f_y_mst = self._mst.derivatives(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) + f_x_curve, f_y_curve = self._curve.derivatives( + x, y, curve_stretch, curvature, direction, center_x, center_y + ) + f_x_mst, f_y_mst = self._mst.derivatives( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) f_x = lambda_mst * f_x_curve + f_x_mst f_y = lambda_mst * f_y_curve + f_y_mst return f_x, f_y - def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def hessian( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -85,11 +141,15 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch + lambda_mst = 1.0 / radial_stretch kappa_ext = 1 - lambda_mst curve_stretch = tangential_stretch / radial_stretch - f_xx_c, f_xy_c, f_yx_c, f_yy_c = self._curve.hessian(x, y, curve_stretch, curvature, direction, center_x, center_y) - f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) + f_xx_c, f_xy_c, f_yx_c, f_yy_c = self._curve.hessian( + x, y, curve_stretch, curvature, direction, center_x, center_y + ) + f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) f_xx = lambda_mst * f_xx_c + f_xx_mst f_xy = lambda_mst * f_xy_c + f_xy_mst f_yx = lambda_mst * f_yx_c + f_yx_mst @@ -98,17 +158,34 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction class CurvedArcConst(LensProfileBase): - """ - curved arc lensing with orientation of curvature perpendicular to the x-axis with unity radial stretch - - """ - param_names = ['tangential_stretch', 'curvature', 'direction', 'center_x', 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'curvature': 0.000001, 'direction': -np.pi, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'curvature': 100, 'direction': np.pi, - 'center_x': 100, 'center_y': 100} - - def function(self, x, y, tangential_stretch, curvature, direction, center_x, center_y): + """Curved arc lensing with orientation of curvature perpendicular to the x-axis with + unity radial stretch.""" + + param_names = [ + "tangential_stretch", + "curvature", + "direction", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "curvature": 0.000001, + "direction": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "curvature": 100, + "direction": np.pi, + "center_x": 100, + "center_y": 100, + } + + def function( + self, x, y, tangential_stretch, curvature, direction, center_x, center_y + ): """ ATTENTION: there may not be a global lensing potential! @@ -121,9 +198,13 @@ def function(self, x, y, tangential_stretch, curvature, direction, center_x, cen :param center_y: center of source in image plane :return: """ - raise NotImplemented('lensing potential for regularly curved arc is not implemented') + raise NotImplemented( + "lensing potential for regularly curved arc is not implemented" + ) - def derivatives(self, x, y, tangential_stretch, curvature, direction, center_x, center_y): + def derivatives( + self, x, y, tangential_stretch, curvature, direction, center_x, center_y + ): """ :param x: @@ -139,7 +220,7 @@ def derivatives(self, x, y, tangential_stretch, curvature, direction, center_x, r = 1 / curvature # deflection angle to allow for tangential stretch # (ratio of source position around zero point relative to radius is tangential stretch) - alpha = r * (1/tangential_stretch + 1) + alpha = r * (1 / tangential_stretch + 1) # shift x_ = x - center_x @@ -166,7 +247,9 @@ def derivatives(self, x, y, tangential_stretch, curvature, direction, center_x, f_x, f_y = util.rotate(f__x, f__y, -direction) return f_x, f_y - def hessian(self, x, y, tangential_stretch, curvature, direction, center_x, center_y): + def hessian( + self, x, y, tangential_stretch, curvature, direction, center_x, center_y + ): """ :param x: @@ -178,16 +261,21 @@ def hessian(self, x, y, tangential_stretch, curvature, direction, center_x, cent :param center_y: center of source in image plane :return: """ - alpha_ra, alpha_dec = self.derivatives(x, y, tangential_stretch, curvature, direction, center_x, center_y) + alpha_ra, alpha_dec = self.derivatives( + x, y, tangential_stretch, curvature, direction, center_x, center_y + ) diff = 0.0000001 - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, tangential_stretch, curvature, direction, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, tangential_stretch, curvature, direction, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, tangential_stretch, curvature, direction, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, tangential_stretch, curvature, direction, center_x, center_y + ) f_xx = (alpha_ra_dx - alpha_ra) / diff f_xy = (alpha_ra_dy - alpha_ra) / diff f_yx = (alpha_dec_dx - alpha_dec) / diff f_yy = (alpha_dec_dy - alpha_dec) / diff - """ #TODO make rotational invariances of double derivates with curl r = 1 / curvature @@ -233,7 +321,7 @@ def _deflection(y, r, tangential_stretch): :return: deflections f_x, f_y """ - x_r = np.sqrt(r ** 2 - y ** 2) + x_r = np.sqrt(r**2 - y**2) f_x = x_r - r # move y-coordinate circle length / tangential stretch up from x-axis phi = np.arcsin(y / r) diff --git a/lenstronomy/LensModel/Profiles/curved_arc_sis_mst.py b/lenstronomy/LensModel/Profiles/curved_arc_sis_mst.py index 23a8b9bc4..12fc6344f 100644 --- a/lenstronomy/LensModel/Profiles/curved_arc_sis_mst.py +++ b/lenstronomy/LensModel/Profiles/curved_arc_sis_mst.py @@ -3,13 +3,12 @@ from lenstronomy.LensModel.Profiles.convergence import Convergence from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CurvedArcSISMST'] +__all__ = ["CurvedArcSISMST"] class CurvedArcSISMST(LensProfileBase): - """ - lens model that describes a section of a highly magnified deflector region. - The parameterization is chosen to describe local observables efficient. + """Lens model that describes a section of a highly magnified deflector region. The + parameterization is chosen to describe local observables efficient. Observables are: - curvature radius (basically bending relative to the center of the profile) @@ -22,11 +21,32 @@ class CurvedArcSISMST(LensProfileBase): - Should work with other perturbative models without breaking its meaning (say when adding additional shear terms) - Must best reflect the observables in lensing - minimal covariances between the parameters, intuitive parameterization. - """ - param_names = ['tangential_stretch', 'radial_stretch', 'curvature', 'direction', 'center_x', 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'radial_stretch': -5, 'curvature': 0.000001, 'direction': -np.pi, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'radial_stretch': 5, 'curvature': 100, 'direction': np.pi, 'center_x': 100, 'center_y': 100} + + param_names = [ + "tangential_stretch", + "radial_stretch", + "curvature", + "direction", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "radial_stretch": -5, + "curvature": 0.000001, + "direction": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "radial_stretch": 5, + "curvature": 100, + "direction": np.pi, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._sis = SIS() @@ -34,7 +54,9 @@ def __init__(self): super(CurvedArcSISMST, self).__init__() @staticmethod - def stretch2sis_mst(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ): """ :param tangential_stretch: float, stretch of intrinsic source in tangential direction @@ -45,18 +67,21 @@ def stretch2sis_mst(tangential_stretch, radial_stretch, curvature, direction, ce :param center_y: center of source in image plane :return: parameters in terms of a spherical SIS + MST resulting in the same observables """ - center_x_sis, center_y_sis = center_deflector(curvature, direction, center_x, center_y) - r_curvature = 1. / curvature - lambda_mst = 1./radial_stretch + center_x_sis, center_y_sis = center_deflector( + curvature, direction, center_x, center_y + ) + r_curvature = 1.0 / curvature + lambda_mst = 1.0 / radial_stretch kappa_ext = 1 - lambda_mst - theta_E = r_curvature * (1. - radial_stretch / tangential_stretch) + theta_E = r_curvature * (1.0 - radial_stretch / tangential_stretch) return theta_E, kappa_ext, center_x_sis, center_y_sis @staticmethod - def sis_mst2stretch(theta_E, kappa_ext, center_x_sis, center_y_sis, center_x, center_y): - """ - turn Singular power-law lens model into stretch parameterization at position (center_x, center_y) - This is the inverse function of stretch2spp() + def sis_mst2stretch( + theta_E, kappa_ext, center_x_sis, center_y_sis, center_x, center_y + ): + """Turn Singular power-law lens model into stretch parameterization at position + (center_x, center_y) This is the inverse function of stretch2spp() :param theta_E: Einstein radius of SIS profile :param kappa_ext: external convergence (MST factor 1 - kappa_ext) @@ -67,14 +92,26 @@ def sis_mst2stretch(theta_E, kappa_ext, center_x_sis, center_y_sis, center_x, ce :return: tangential_stretch, radial_stretch, curvature, direction :return: """ - r_curvature = np.sqrt((center_x_sis - center_x) ** 2 + (center_y_sis - center_y) ** 2) + r_curvature = np.sqrt( + (center_x_sis - center_x) ** 2 + (center_y_sis - center_y) ** 2 + ) direction = np.arctan2(center_y - center_y_sis, center_x - center_x_sis) - radial_stretch = 1. / (1 - kappa_ext) - tangential_stretch = 1 / (1 - (theta_E/r_curvature)) * radial_stretch - curvature = 1./r_curvature + radial_stretch = 1.0 / (1 - kappa_ext) + tangential_stretch = 1 / (1 - (theta_E / r_curvature)) * radial_stretch + curvature = 1.0 / r_curvature return tangential_stretch, radial_stretch, curvature, direction - def function(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def function( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ ATTENTION: there may not be a global lensing potential! @@ -88,15 +125,31 @@ def function(self, x, y, tangential_stretch, radial_stretch, curvature, directio :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) - f_sis = self._sis.function(x, y, theta_E, center_x_sis, center_y_sis) # - self._sis.function(center_x, center_y, theta_E, center_x_sis, center_y_sis) - alpha_x, alpha_y = self._sis.derivatives(center_x, center_y, theta_E, center_x_sis, center_y_sis) + lambda_mst = 1.0 / radial_stretch + theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_sis = self._sis.function( + x, y, theta_E, center_x_sis, center_y_sis + ) # - self._sis.function(center_x, center_y, theta_E, center_x_sis, center_y_sis) + alpha_x, alpha_y = self._sis.derivatives( + center_x, center_y, theta_E, center_x_sis, center_y_sis + ) f_sis_0 = alpha_x * (x - center_x) + alpha_y * (y - center_y) f_mst = self._mst.function(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) return lambda_mst * (f_sis - f_sis_0) + f_mst - def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def derivatives( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -109,18 +162,34 @@ def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direc :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) - f_x_sis, f_y_sis = self._sis.derivatives(x, y, theta_E, center_x_sis, center_y_sis) - f_x0, f_y0 = self._sis.derivatives(center_x, center_y, theta_E, center_x_sis, center_y_sis) - f_x_mst, f_y_mst = self._mst.derivatives(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) + lambda_mst = 1.0 / radial_stretch + theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_x_sis, f_y_sis = self._sis.derivatives( + x, y, theta_E, center_x_sis, center_y_sis + ) + f_x0, f_y0 = self._sis.derivatives( + center_x, center_y, theta_E, center_x_sis, center_y_sis + ) + f_x_mst, f_y_mst = self._mst.derivatives( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) f_x = lambda_mst * (f_x_sis - f_x0) + f_x_mst f_y = lambda_mst * (f_y_sis - f_y0) + f_y_mst return f_x, f_y - def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def hessian( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -133,13 +202,22 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = self._sis.hessian(x, y, theta_E, center_x_sis, center_y_sis) - f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) - return lambda_mst * f_xx_sis + f_xx_mst, lambda_mst * f_xy_sis + f_xy_mst, lambda_mst * f_yx_sis + f_yx_mst, lambda_mst * f_yy_sis + f_yy_mst + lambda_mst = 1.0 / radial_stretch + theta_E, kappa_ext, center_x_sis, center_y_sis = self.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = self._sis.hessian( + x, y, theta_E, center_x_sis, center_y_sis + ) + f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) + return ( + lambda_mst * f_xx_sis + f_xx_mst, + lambda_mst * f_xy_sis + f_xy_mst, + lambda_mst * f_yx_sis + f_yx_mst, + lambda_mst * f_yy_sis + f_yy_mst, + ) def center_deflector(curvature, direction, center_x, center_y): diff --git a/lenstronomy/LensModel/Profiles/curved_arc_spp.py b/lenstronomy/LensModel/Profiles/curved_arc_spp.py index 6b0d7073c..e495300da 100644 --- a/lenstronomy/LensModel/Profiles/curved_arc_spp.py +++ b/lenstronomy/LensModel/Profiles/curved_arc_spp.py @@ -2,13 +2,12 @@ from lenstronomy.LensModel.Profiles.spp import SPP from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CurvedArcSPP', 'center_deflector'] +__all__ = ["CurvedArcSPP", "center_deflector"] class CurvedArcSPP(LensProfileBase): - """ - lens model that describes a section of a highly magnified deflector region. - The parameterization is chosen to describe local observables efficient. + """Lens model that describes a section of a highly magnified deflector region. The + parameterization is chosen to describe local observables efficient. Observables are: - curvature radius (basically bending relative to the center of the profile) @@ -21,18 +20,41 @@ class CurvedArcSPP(LensProfileBase): - Should work with other perturbative models without breaking its meaning (say when adding additional shear terms) - Must best reflect the observables in lensing - minimal covariances between the parameters, intuitive parameterization. - """ - param_names = ['tangential_stretch', 'radial_stretch', 'curvature', 'direction', 'center_x', 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'radial_stretch': -5, 'curvature': 0.000001, 'direction': -np.pi, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'radial_stretch': 5, 'curvature': 100, 'direction': np.pi, 'center_x': 100, 'center_y': 100} + + param_names = [ + "tangential_stretch", + "radial_stretch", + "curvature", + "direction", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "radial_stretch": -5, + "curvature": 0.000001, + "direction": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "radial_stretch": 5, + "curvature": 100, + "direction": np.pi, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._spp = SPP() super(CurvedArcSPP, self).__init__() @staticmethod - def stretch2spp(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ): """ :param tangential_stretch: float, stretch of intrinsic source in tangential direction @@ -43,17 +65,18 @@ def stretch2spp(tangential_stretch, radial_stretch, curvature, direction, center :param center_y: center of source in image plane :return: parameters in terms of a spherical power-law profile resulting in the same observables """ - center_x_spp, center_y_spp = center_deflector(curvature, direction, center_x, center_y) - r_curvature = 1. / curvature - gamma = (1./radial_stretch - 1) / (1 - 1./tangential_stretch) + 2 - theta_E = abs(1 - 1./tangential_stretch)**(1./(gamma - 1)) * r_curvature + center_x_spp, center_y_spp = center_deflector( + curvature, direction, center_x, center_y + ) + r_curvature = 1.0 / curvature + gamma = (1.0 / radial_stretch - 1) / (1 - 1.0 / tangential_stretch) + 2 + theta_E = abs(1 - 1.0 / tangential_stretch) ** (1.0 / (gamma - 1)) * r_curvature return theta_E, gamma, center_x_spp, center_y_spp @staticmethod def spp2stretch(theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y): - """ - turn Singular power-law lens model into stretch parameterization at position (center_x, center_y) - This is the inverse function of stretch2spp() + """Turn Singular power-law lens model into stretch parameterization at position + (center_x, center_y) This is the inverse function of stretch2spp() :param theta_E: Einstein radius of SPP model :param gamma: power-law slope @@ -63,14 +86,26 @@ def spp2stretch(theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y): :param center_y: center of curved model definition :return: tangential_stretch, radial_stretch, curvature, direction """ - r_curvature = np.sqrt((center_x_spp - center_x)**2 + (center_y_spp - center_y)**2) + r_curvature = np.sqrt( + (center_x_spp - center_x) ** 2 + (center_y_spp - center_y) ** 2 + ) direction = np.arctan2(center_y - center_y_spp, center_x - center_x_spp) - tangential_stretch = 1 / (1 - (theta_E/r_curvature) ** (gamma - 1)) - radial_stretch = 1 / (1 + (gamma - 2) * (theta_E/r_curvature) ** (gamma - 1)) - curvature = 1./r_curvature + tangential_stretch = 1 / (1 - (theta_E / r_curvature) ** (gamma - 1)) + radial_stretch = 1 / (1 + (gamma - 2) * (theta_E / r_curvature) ** (gamma - 1)) + curvature = 1.0 / r_curvature return tangential_stretch, radial_stretch, curvature, direction - def function(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def function( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ ATTENTION: there may not be a global lensing potential! @@ -84,13 +119,27 @@ def function(self, x, y, tangential_stretch, radial_stretch, curvature, directio :param center_y: center of source in image plane :return: """ - theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) + theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) f_ = self._spp.function(x, y, theta_E, gamma, center_x_spp, center_y_spp) - alpha_x, alpha_y = self._spp.derivatives(center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp) + alpha_x, alpha_y = self._spp.derivatives( + center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp + ) f_0 = alpha_x * (x - center_x) + alpha_y * (y - center_y) return f_ - f_0 - def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def derivatives( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -103,14 +152,28 @@ def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direc :param center_y: center of source in image plane :return: """ - theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) - f_x, f_y = self._spp.derivatives(x, y, theta_E, gamma, center_x_spp, center_y_spp) - f_x0, f_y0 = self._spp.derivatives(center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp) + theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_x, f_y = self._spp.derivatives( + x, y, theta_E, gamma, center_x_spp, center_y_spp + ) + f_x0, f_y0 = self._spp.derivatives( + center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp + ) return f_x - f_x0, f_y - f_y0 - def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y): + def hessian( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ): """ :param x: @@ -123,9 +186,9 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction :param center_y: center of source in image plane :return: """ - theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) + theta_E, gamma, center_x_spp, center_y_spp = self.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) return self._spp.hessian(x, y, theta_E, gamma, center_x_spp, center_y_spp) diff --git a/lenstronomy/LensModel/Profiles/curved_arc_spt.py b/lenstronomy/LensModel/Profiles/curved_arc_spt.py index 78b1a8569..b4e91c267 100644 --- a/lenstronomy/LensModel/Profiles/curved_arc_spt.py +++ b/lenstronomy/LensModel/Profiles/curved_arc_spt.py @@ -3,30 +3,66 @@ from lenstronomy.LensModel.Profiles.curved_arc_sis_mst import CurvedArcSISMST from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CurvedArcSPT'] +__all__ = ["CurvedArcSPT"] class CurvedArcSPT(LensProfileBase): - """ - Curved arc model based on SIS+MST with an additional non-linear shear distortions applied on the source coordinates - around the center. - This profile is effectively a Source Position Transform of a curved arc and a shear distortion. + """Curved arc model based on SIS+MST with an additional non-linear shear distortions + applied on the source coordinates around the center. + This profile is effectively a Source Position Transform of a curved arc and a shear + distortion. """ - param_names = ['tangential_stretch', 'radial_stretch', 'curvature', 'direction', 'gamma1', 'gamma2', 'center_x', - 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'radial_stretch': -5, 'curvature': 0.000001, 'direction': -np.pi, - 'gamma1': -0.5, 'gamma2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'radial_stretch': 5, 'curvature': 100, 'direction': np.pi, - 'gamma1': 0.5, 'gamma2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = [ + "tangential_stretch", + "radial_stretch", + "curvature", + "direction", + "gamma1", + "gamma2", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "radial_stretch": -5, + "curvature": 0.000001, + "direction": -np.pi, + "gamma1": -0.5, + "gamma2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "radial_stretch": 5, + "curvature": 100, + "direction": np.pi, + "gamma1": 0.5, + "gamma2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._curve = CurvedArcSISMST() self._distort = ShearReduced() super(CurvedArcSPT, self).__init__() - def function(self, x, y, tangential_stretch, radial_stretch, curvature, direction, gamma1, gamma2, center_x, - center_y): + def function( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ): """ ATTENTION: there may not be a global lensing potential! @@ -42,10 +78,23 @@ def function(self, x, y, tangential_stretch, radial_stretch, curvature, directio :param center_y: center of source in image plane :return: """ - raise NotImplemented('lensing potential for regularly curved arc is not implemented') + raise NotImplemented( + "lensing potential for regularly curved arc is not implemented" + ) - def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direction, gamma1, gamma2, center_x, - center_y): + def derivatives( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ): """ :param x: @@ -61,20 +110,41 @@ def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, direc :return: """ # computed regular curved arc deflection - f_x_c, f_y_c = self._curve.derivatives(x, y, tangential_stretch, radial_stretch, curvature, direction, - center_x, center_y) + f_x_c, f_y_c = self._curve.derivatives( + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ) # map to source plane coordinate system beta_x, beta_y = x - f_x_c, y - f_y_c # distort source plane coordinate system around (center_x, center_y) - f_x_b, f_y_b = self._distort.derivatives(beta_x, beta_y, gamma1, gamma2, ra_0=center_x, dec_0=center_y) + f_x_b, f_y_b = self._distort.derivatives( + beta_x, beta_y, gamma1, gamma2, ra_0=center_x, dec_0=center_y + ) beta_x_, beta_y_ = beta_x - f_x_b, beta_y - f_y_b # compute total deflection between initial coordinate and final source coordinate to match lens equation # beta = theta - alpha f_x, f_y = x - beta_x_, y - beta_y_ return f_x, f_y - def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction, gamma1, gamma2, center_x, - center_y): + def hessian( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ): """ :param x: @@ -89,13 +159,43 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, direction :param center_y: center of source in image plane :return: """ - alpha_ra, alpha_dec = self.derivatives(x, y, tangential_stretch, radial_stretch, curvature, direction, gamma1, - gamma2, center_x, center_y) + alpha_ra, alpha_dec = self.derivatives( + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ) diff = 0.0000001 - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, tangential_stretch, radial_stretch, curvature, - direction, gamma1, gamma2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, tangential_stretch, radial_stretch, curvature, - direction, gamma1, gamma2, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, + y + diff, + tangential_stretch, + radial_stretch, + curvature, + direction, + gamma1, + gamma2, + center_x, + center_y, + ) f_xx = (alpha_ra_dx - alpha_ra) / diff f_xy = (alpha_ra_dy - alpha_ra) / diff diff --git a/lenstronomy/LensModel/Profiles/curved_arc_tan_diff.py b/lenstronomy/LensModel/Profiles/curved_arc_tan_diff.py index 626abf2ac..dc7bca323 100644 --- a/lenstronomy/LensModel/Profiles/curved_arc_tan_diff.py +++ b/lenstronomy/LensModel/Profiles/curved_arc_tan_diff.py @@ -4,12 +4,12 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util import param_util -__all__ = ['CurvedArcTanDiff'] +__all__ = ["CurvedArcTanDiff"] class CurvedArcTanDiff(LensProfileBase): - """ - Curved arc model with an additional non-zero tangential stretch differential in tangential direction component + """Curved arc model with an additional non-zero tangential stretch differential in + tangential direction component. Observables are: - curvature radius (basically bending relative to the center of the profile) @@ -22,11 +22,35 @@ class CurvedArcTanDiff(LensProfileBase): - Should work with other perturbative models without breaking its meaning (say when adding additional shear terms) - Must best reflect the observables in lensing - minimal covariances between the parameters, intuitive parameterization. - """ - param_names = ['tangential_stretch', 'radial_stretch', 'curvature', 'dtan_dtan', 'direction', 'center_x', 'center_y'] - lower_limit_default = {'tangential_stretch': -100, 'radial_stretch': -5, 'curvature': 0.000001, 'dtan_dtan': -10, 'direction': -np.pi, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'tangential_stretch': 100, 'radial_stretch': 5, 'curvature': 100, 'dtan_dtan': 10, 'direction': np.pi, 'center_x': 100, 'center_y': 100} + + param_names = [ + "tangential_stretch", + "radial_stretch", + "curvature", + "dtan_dtan", + "direction", + "center_x", + "center_y", + ] + lower_limit_default = { + "tangential_stretch": -100, + "radial_stretch": -5, + "curvature": 0.000001, + "dtan_dtan": -10, + "direction": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "tangential_stretch": 100, + "radial_stretch": 5, + "curvature": 100, + "dtan_dtan": 10, + "direction": np.pi, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._sie = SIE(NIE=True) @@ -34,7 +58,15 @@ def __init__(self): super(CurvedArcTanDiff, self).__init__() @staticmethod - def stretch2sie_mst(tangential_stretch, radial_stretch, curvature, dtan_dtan, direction, center_x, center_y): + def stretch2sie_mst( + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ): """ :param tangential_stretch: float, stretch of intrinsic source in tangential direction @@ -46,13 +78,17 @@ def stretch2sie_mst(tangential_stretch, radial_stretch, curvature, dtan_dtan, di :param center_y: center of source in image plane :return: parameters in terms of a spherical SIS + MST resulting in the same observables """ - center_x_sis, center_y_sis = center_deflector(curvature, direction, center_x, center_y) - r_curvature = 1. / curvature - lambda_mst = 1./radial_stretch + center_x_sis, center_y_sis = center_deflector( + curvature, direction, center_x, center_y + ) + r_curvature = 1.0 / curvature + lambda_mst = 1.0 / radial_stretch kappa_ext = 1 - lambda_mst - theta_E = r_curvature * (1. - radial_stretch / tangential_stretch) + theta_E = r_curvature * (1.0 - radial_stretch / tangential_stretch) # analytic relation (see Birrer 2021) - dlambda_tan_dr = tangential_stretch / r_curvature * (1 - tangential_stretch / radial_stretch) + dlambda_tan_dr = ( + tangential_stretch / r_curvature * (1 - tangential_stretch / radial_stretch) + ) # translate tangential eigenvalue gradient in lens ellipticity dtan_dtan_ = dtan_dtan * tangential_stretch @@ -68,11 +104,22 @@ def stretch2sie_mst(tangential_stretch, radial_stretch, curvature, dtan_dtan, di e1_sie, e2_sie = param_util.phi_q2_ellipticity(phi, q) # ellipticity adopted Einstein radius to match local tangential and radial stretch - factor = np.sqrt(1 + q ** 2) / np.sqrt(2 * q) + factor = np.sqrt(1 + q**2) / np.sqrt(2 * q) theta_E_sie = theta_E * factor return theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis - def function(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_dtan, direction, center_x, center_y): + def function( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ): """ ATTENTION: there may not be a global lensing potential! @@ -87,15 +134,45 @@ def function(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_dta :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis = self.stretch2sie_mst(tangential_stretch, radial_stretch, curvature, dtan_dtan, direction, center_x, center_y) - f_sis = self._sie.function(x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) # - self._sis.function(center_x, center_y, theta_E, center_x_sis, center_y_sis) - alpha_x, alpha_y = self._sie.derivatives(center_x, center_y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) + lambda_mst = 1.0 / radial_stretch + ( + theta_E_sie, + e1_sie, + e2_sie, + kappa_ext, + center_x_sis, + center_y_sis, + ) = self.stretch2sie_mst( + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ) + f_sis = self._sie.function( + x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis + ) # - self._sis.function(center_x, center_y, theta_E, center_x_sis, center_y_sis) + alpha_x, alpha_y = self._sie.derivatives( + center_x, center_y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis + ) f_sis_0 = alpha_x * (x - center_x) + alpha_y * (y - center_y) f_mst = self._mst.function(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) return lambda_mst * (f_sis - f_sis_0) + f_mst - def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_dtan, direction, center_x, center_y): + def derivatives( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ): """ :param x: @@ -109,18 +186,48 @@ def derivatives(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_ :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis = self.stretch2sie_mst(tangential_stretch, - radial_stretch, curvature, dtan_dtan, - direction, center_x, center_y) - f_x_sis, f_y_sis = self._sie.derivatives(x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) - f_x0, f_y0 = self._sie.derivatives(center_x, center_y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) - f_x_mst, f_y_mst = self._mst.derivatives(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) + lambda_mst = 1.0 / radial_stretch + ( + theta_E_sie, + e1_sie, + e2_sie, + kappa_ext, + center_x_sis, + center_y_sis, + ) = self.stretch2sie_mst( + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ) + f_x_sis, f_y_sis = self._sie.derivatives( + x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis + ) + f_x0, f_y0 = self._sie.derivatives( + center_x, center_y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis + ) + f_x_mst, f_y_mst = self._mst.derivatives( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) f_x = lambda_mst * (f_x_sis - f_x0) + f_x_mst f_y = lambda_mst * (f_y_sis - f_y0) + f_y_mst return f_x, f_y - def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_dtan, direction, center_x, center_y): + def hessian( + self, + x, + y, + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ): """ :param x: @@ -134,13 +241,35 @@ def hessian(self, x, y, tangential_stretch, radial_stretch, curvature, dtan_dtan :param center_y: center of source in image plane :return: """ - lambda_mst = 1. / radial_stretch - theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis = self.stretch2sie_mst(tangential_stretch, - radial_stretch, curvature, dtan_dtan, - direction, center_x, center_y) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = self._sie.hessian(x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) - f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) - return lambda_mst * f_xx_sis + f_xx_mst, lambda_mst * f_xy_sis + f_xy_mst, lambda_mst * f_yx_sis + f_yx_mst, lambda_mst * f_yy_sis + f_yy_mst + lambda_mst = 1.0 / radial_stretch + ( + theta_E_sie, + e1_sie, + e2_sie, + kappa_ext, + center_x_sis, + center_y_sis, + ) = self.stretch2sie_mst( + tangential_stretch, + radial_stretch, + curvature, + dtan_dtan, + direction, + center_x, + center_y, + ) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = self._sie.hessian( + x, y, theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis + ) + f_xx_mst, f_xy_mst, f_yx_mst, f_yy_mst = self._mst.hessian( + x, y, kappa_ext, ra_0=center_x, dec_0=center_y + ) + return ( + lambda_mst * f_xx_sis + f_xx_mst, + lambda_mst * f_xy_sis + f_xy_mst, + lambda_mst * f_yx_sis + f_yx_mst, + lambda_mst * f_yy_sis + f_yy_mst, + ) def center_deflector(curvature, direction, center_x, center_y): diff --git a/lenstronomy/LensModel/Profiles/dipole.py b/lenstronomy/LensModel/Profiles/dipole.py index 439a63dd0..3fadf256a 100644 --- a/lenstronomy/LensModel/Profiles/dipole.py +++ b/lenstronomy/LensModel/Profiles/dipole.py @@ -1,19 +1,23 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Dipole', 'DipoleUtil'] +__all__ = ["Dipole", "DipoleUtil"] class Dipole(LensProfileBase): - """ - class for dipole response of two massive bodies (experimental) - """ - param_names = ['com_x', 'com_y', 'phi_dipole', 'coupling'] - lower_limit_default = {'com_x': -100, 'com_y': -100, 'phi_dipole': -10, 'coupling': -10} - upper_limit_default = {'com_x': 100, 'com_y': 100, 'phi_dipole': 10, 'coupling': 10} + """Class for dipole response of two massive bodies (experimental)""" + + param_names = ["com_x", "com_y", "phi_dipole", "coupling"] + lower_limit_default = { + "com_x": -100, + "com_y": -100, + "phi_dipole": -10, + "coupling": -10, + } + upper_limit_default = {"com_x": 100, "com_y": 100, "phi_dipole": 10, "coupling": 10} def function(self, x, y, com_x, com_y, phi_dipole, coupling): # coordinate shift @@ -23,7 +27,7 @@ def function(self, x, y, com_x, com_y, phi_dipole, coupling): # rotation angle sin_phi = np.sin(phi_dipole) cos_phi = np.cos(phi_dipole) - x_ = cos_phi*x_shift + sin_phi*y_shift + x_ = cos_phi * x_shift + sin_phi * y_shift # y_ = -sin_phi*x_shift + cos_phi*y_shift # r = np.sqrt(x_**2 + y_**2) @@ -33,7 +37,6 @@ def function(self, x, y, com_x, com_y, phi_dipole, coupling): return f_ def derivatives(self, x, y, com_x, com_y, phi_dipole, coupling): - # coordinate shift x_shift = x - com_x y_shift = y - com_y @@ -41,18 +44,17 @@ def derivatives(self, x, y, com_x, com_y, phi_dipole, coupling): # rotation angle sin_phi = np.sin(phi_dipole) cos_phi = np.cos(phi_dipole) - x_ = cos_phi*x_shift + sin_phi*y_shift - y_ = -sin_phi*x_shift + cos_phi*y_shift + x_ = cos_phi * x_shift + sin_phi * y_shift + y_ = -sin_phi * x_shift + cos_phi * y_shift - f_x_prim = coupling * x_/np.sqrt(x_**2 + y_**2) + f_x_prim = coupling * x_ / np.sqrt(x_**2 + y_**2) f_y_prim = np.zeros_like(x_) # rotate back - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, com_x, com_y, phi_dipole, coupling): - # coordinate shift x_shift = x - com_x y_shift = y - com_y @@ -60,20 +62,26 @@ def hessian(self, x, y, com_x, com_y, phi_dipole, coupling): # rotation angle sin_phi = np.sin(phi_dipole) cos_phi = np.cos(phi_dipole) - x_ = cos_phi*x_shift + sin_phi*y_shift - y_ = -sin_phi*x_shift + cos_phi*y_shift + x_ = cos_phi * x_shift + sin_phi * y_shift + y_ = -sin_phi * x_shift + cos_phi * y_shift r = np.sqrt(x_**2 + y_**2) - f_xx_prim = coupling*y_**2/r**3 + f_xx_prim = coupling * y_**2 / r**3 f_xy_prim = -coupling * x_ * y_ / r**3 f_yy_prim = np.zeros_like(x_) - kappa = 1./2 * (f_xx_prim + f_yy_prim) - gamma1_value = 1./2 * (f_xx_prim - f_yy_prim) + kappa = 1.0 / 2 * (f_xx_prim + f_yy_prim) + gamma1_value = 1.0 / 2 * (f_xx_prim - f_yy_prim) gamma2_value = f_xy_prim # rotate back - gamma1 = np.cos(2*phi_dipole)*gamma1_value-np.sin(2*phi_dipole)*gamma2_value - gamma2 = +np.sin(2*phi_dipole)*gamma1_value+np.cos(2*phi_dipole)*gamma2_value + gamma1 = ( + np.cos(2 * phi_dipole) * gamma1_value + - np.sin(2 * phi_dipole) * gamma2_value + ) + gamma2 = ( + +np.sin(2 * phi_dipole) * gamma1_value + + np.cos(2 * phi_dipole) * gamma2_value + ) f_xx = kappa + gamma1 f_yy = kappa - gamma1 @@ -82,24 +90,22 @@ def hessian(self, x, y, com_x, com_y, phi_dipole, coupling): class DipoleUtil(object): - """ - pre-calculation of dipole properties - """ + """Pre-calculation of dipole properties.""" @staticmethod def com(center1_x, center1_y, center2_x, center2_y, Fm): """ :return: center of mass """ - com_x = (Fm * center1_x + center2_x)/(Fm + 1.) - com_y = (Fm * center1_y + center2_y)/(Fm + 1.) + com_x = (Fm * center1_x + center2_x) / (Fm + 1.0) + com_y = (Fm * center1_y + center2_y) / (Fm + 1.0) return com_x, com_y @staticmethod def mass_ratio(theta_E, theta_E_sub): - """ - computes mass ration of the two clumps with given Einstein radius and power law slope (clump1/sub-clump) - :param theta_E: + """Computes mass ration of the two clumps with given Einstein radius and power + law slope (clump1/sub-clump) :param theta_E: + :param theta_E_sub: :return: """ @@ -107,9 +113,6 @@ def mass_ratio(theta_E, theta_E_sub): @staticmethod def angle(center1_x, center1_y, center2_x, center2_y): - """ - compute the rotation angle of the dipole - :return: - """ + """Compute the rotation angle of the dipole :return:""" phi_G = np.arctan2(center2_y - center1_y, center2_x - center1_x) return phi_G diff --git a/lenstronomy/LensModel/Profiles/elliptical_density_slice.py b/lenstronomy/LensModel/Profiles/elliptical_density_slice.py index ba9d39e48..c2e3afedb 100644 --- a/lenstronomy/LensModel/Profiles/elliptical_density_slice.py +++ b/lenstronomy/LensModel/Profiles/elliptical_density_slice.py @@ -5,10 +5,10 @@ from lenstronomy.Util import param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['ElliSLICE'] +__all__ = ["ElliSLICE"] -class ElliSLICE (LensProfileBase): +class ElliSLICE(LensProfileBase): """ This class computes the lensing quantities for an elliptical slice of constant density. Based on Schramm 1994 https://ui.adsabs.harvard.edu/abs/1994A%26A...284...44S/abstract @@ -37,13 +37,25 @@ class ElliSLICE (LensProfileBase): y_c = y - center_y """ - param_names = ['a', 'b', 'psi', 'sigma_0', 'center_x', 'center_y'] - lower_limit_default = {'a': 0., 'b': 0., 'psi': -90./180.*np.pi, 'center_x': -100., 'center_y': -100.} - upper_limit_default = {'a': 100., 'b': 100., 'psi': 90. / 180. * np.pi, 'center_x': 100., 'center_y': 100.} - def function(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): - """ - lensing potential + param_names = ["a", "b", "psi", "sigma_0", "center_x", "center_y"] + lower_limit_default = { + "a": 0.0, + "b": 0.0, + "psi": -90.0 / 180.0 * np.pi, + "center_x": -100.0, + "center_y": -100.0, + } + upper_limit_default = { + "a": 100.0, + "b": 100.0, + "psi": 90.0 / 180.0 * np.pi, + "center_x": 100.0, + "center_y": 100.0, + } + + def function(self, x, y, a, b, psi, sigma_0, center_x=0.0, center_y=0.0): + """Lensing potential. :param a: float, semi-major axis, must be positive :param b: float, semi-minor axis, must be positive @@ -51,28 +63,39 @@ def function(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): :param sigma_0: float, surface mass density, must be positive :param center_x: float, center on the x axis :param center_y: float, center on the y axis - """ - kwargs_slice = {'center_x': center_x, 'center_y': center_y, 'a': a, 'b': b, 'psi': psi, 'sigma_0': sigma_0} + kwargs_slice = { + "center_x": center_x, + "center_y": center_y, + "a": a, + "b": b, + "psi": psi, + "sigma_0": sigma_0, + } x_ = x - center_x y_ = y - center_y - x_rot = x_*np.cos(psi) + y_*np.sin(psi) - y_rot = -x_*np.sin(psi) + y_*np.cos(psi) + x_rot = x_ * np.cos(psi) + y_ * np.sin(psi) + y_rot = -x_ * np.sin(psi) + y_ * np.cos(psi) try: len(x_) except: - if (x_rot ** 2 / a ** 2) + (y_rot ** 2 / b ** 2) <= 1: + if (x_rot**2 / a**2) + (y_rot**2 / b**2) <= 1: return self.pot_in(x_, y_, kwargs_slice) else: return self.pot_ext(x_, y_, kwargs_slice) else: - f = np.array([self.pot_in(x_[i], y_[i], kwargs_slice) if (x_rot[i] ** 2 / a ** 2) + (y_rot[i] ** 2 / b ** 2) <= 1 - else self.pot_ext(x_[i], y_[i], kwargs_slice) for i in range(len(x_))]) + f = np.array( + [ + self.pot_in(x_[i], y_[i], kwargs_slice) + if (x_rot[i] ** 2 / a**2) + (y_rot[i] ** 2 / b**2) <= 1 + else self.pot_ext(x_[i], y_[i], kwargs_slice) + for i in range(len(x_)) + ] + ) return f - def derivatives(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): - """ - lensing deflection angle + def derivatives(self, x, y, a, b, psi, sigma_0, center_x=0.0, center_y=0.0): + """Lensing deflection angle. :param a: float, semi-major axis, must be positive :param b: float, semi-minor axis, must be positive @@ -80,9 +103,15 @@ def derivatives(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): :param sigma_0: float, surface mass density, must be positive :param center_x: float, center on the x axis :param center_y: float, center on the y axis - """ - kwargs_slice = {'center_x': center_x, 'center_y': center_y, 'a': a, 'b': b, 'psi': psi, 'sigma_0': sigma_0} + kwargs_slice = { + "center_x": center_x, + "center_y": center_y, + "a": a, + "b": b, + "psi": psi, + "sigma_0": sigma_0, + } x_ = x - center_x y_ = y - center_y x_rot = x_ * np.cos(psi) + y_ * np.sin(psi) @@ -90,18 +119,23 @@ def derivatives(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): try: len(x_) except: - if (x_rot ** 2 / a ** 2) + (y_rot ** 2 / b ** 2) <= 1: + if (x_rot**2 / a**2) + (y_rot**2 / b**2) <= 1: return self.alpha_in(x_, y_, kwargs_slice) else: return self.alpha_ext(x_, y_, kwargs_slice) else: - defl = np.array([self.alpha_in(x_[i], y_[i], kwargs_slice) if (x_rot[i] ** 2 / a ** 2) + (y_rot[i] ** 2 / b ** 2) <= 1 - else self.alpha_ext(x_[i], y_[i], kwargs_slice) for i in range(len(x_))]) + defl = np.array( + [ + self.alpha_in(x_[i], y_[i], kwargs_slice) + if (x_rot[i] ** 2 / a**2) + (y_rot[i] ** 2 / b**2) <= 1 + else self.alpha_ext(x_[i], y_[i], kwargs_slice) + for i in range(len(x_)) + ] + ) return defl[:, 0], defl[:, 1] - def hessian(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): - """ - lensing second derivatives + def hessian(self, x, y, a, b, psi, sigma_0, center_x=0.0, center_y=0.0): + """Lensing second derivatives. :param a: float, semi-major axis, must be positive :param b: float, semi-minor axis, must be positive @@ -109,13 +143,18 @@ def hessian(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): :param sigma_0: float, surface mass density, must be positive :param center_x: float, center on the x axis :param center_y: float, center on the y axis - """ diff = 0.000000001 - alpha_ra, alpha_dec = self.derivatives(x, y, a, b, psi, sigma_0, center_x, center_y) - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, a, b, psi, sigma_0, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, a, b, psi, sigma_0, center_x, center_y) + alpha_ra, alpha_dec = self.derivatives( + x, y, a, b, psi, sigma_0, center_x, center_y + ) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, a, b, psi, sigma_0, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, a, b, psi, sigma_0, center_x, center_y + ) f_xx = (alpha_ra_dx - alpha_ra) / diff f_xy = (alpha_ra_dy - alpha_ra) / diff @@ -125,11 +164,9 @@ def hessian(self, x, y, a, b, psi, sigma_0, center_x=0., center_y=0.): @staticmethod def sign(z): - """ - sign function + """Sign function. :param z: complex - """ x = z.real y = z.imag @@ -139,36 +176,36 @@ def sign(z): return -1 def alpha_in(self, x, y, kwargs_slice): - """ - deflection angle for (x,y) inside the elliptical slice - - :param kwargs_slice: dict, dictionary with the slice definition (a,b,psi,sigma_0) + """Deflection angle for (x,y) inside the elliptical slice. + :param kwargs_slice: dict, dictionary with the slice definition + (a,b,psi,sigma_0) """ z = complex(x, y) zb = z.conjugate() - psi = kwargs_slice['psi'] - e = (kwargs_slice['a'] - kwargs_slice['b']) / (kwargs_slice['a'] + kwargs_slice['b']) - sig_0 = kwargs_slice['sigma_0'] + psi = kwargs_slice["psi"] + e = (kwargs_slice["a"] - kwargs_slice["b"]) / ( + kwargs_slice["a"] + kwargs_slice["b"] + ) + sig_0 = kwargs_slice["sigma_0"] e2ipsi = c.exp(2j * psi) I_in = (z - e * zb * e2ipsi) * sig_0 return I_in.real, I_in.imag def alpha_ext(self, x, y, kwargs_slice): - """ - deflection angle for (x,y) outside the elliptical slice - - :param kwargs_slice: dict, dictionary with the slice definition (a,b,psi,sigma_0) + """Deflection angle for (x,y) outside the elliptical slice. + :param kwargs_slice: dict, dictionary with the slice definition + (a,b,psi,sigma_0) """ z = complex(x, y) r, phi = param_util.cart2polar(x, y) zb = z.conjugate() - psi = kwargs_slice['psi'] - a = kwargs_slice['a'] - b = kwargs_slice['b'] - f2 = a ** 2 - b ** 2 - sig_0 = kwargs_slice['sigma_0'] + psi = kwargs_slice["psi"] + a = kwargs_slice["a"] + b = kwargs_slice["b"] + f2 = a**2 - b**2 + sig_0 = kwargs_slice["sigma_0"] median_op = False # when (x,y) is on one of the ellipse axis, there might be an issue when calculating the square root of # zb ** 2 * e2ipsi - f2. When the argument has an imaginary part ==0, having 0. or -0. may return different @@ -176,28 +213,70 @@ def alpha_ext(self, x, y, kwargs_slice): # away from this position, perpendicularly to the axis ; another one is at -delta perpendicularly away from # x,y). We calculate the function for each point and take the median. This avoids any singularity for points # along the axis but it slows down the function. - if np.abs(np.sin(phi - psi)) <= 10 ** -10 \ - or np.abs(np.sin(phi - psi - np.pi / 2.)) <= 10 ** -10: # very close to one of the ellipse axis + if ( + np.abs(np.sin(phi - psi)) <= 10**-10 + or np.abs(np.sin(phi - psi - np.pi / 2.0)) <= 10**-10 + ): # very close to one of the ellipse axis median_op = True e2ipsi = c.exp(2j * psi) eipsi = c.exp(1j * psi) if median_op is True: - eps = 10 ** -10 + eps = 10**-10 z_minus_eps = complex(r * np.cos(phi - eps), r * np.sin(phi - eps)) zb_minus_eps = z_minus_eps.conjugate() z_plus_eps = complex(r * np.cos(phi + eps), r * np.sin(phi + eps)) zb_plus_eps = z_plus_eps.conjugate() - I_out_minus = 2 * a * b / f2 * (zb_minus_eps * e2ipsi - eipsi * self.sign(zb_minus_eps * eipsi) - * c.sqrt(zb_minus_eps ** 2 * e2ipsi - f2)) * sig_0 - I_out_plus = 2 * a * b / f2 * (zb_plus_eps * e2ipsi - eipsi * self.sign(zb_plus_eps * eipsi) - * c.sqrt(zb_plus_eps ** 2 * e2ipsi - f2)) * sig_0 - I_out_mid = 2 * a * b / f2 * (zb * e2ipsi - eipsi * self.sign(zb * eipsi) - * c.sqrt(zb ** 2 * e2ipsi - f2)) * sig_0 + I_out_minus = ( + 2 + * a + * b + / f2 + * ( + zb_minus_eps * e2ipsi + - eipsi + * self.sign(zb_minus_eps * eipsi) + * c.sqrt(zb_minus_eps**2 * e2ipsi - f2) + ) + * sig_0 + ) + I_out_plus = ( + 2 + * a + * b + / f2 + * ( + zb_plus_eps * e2ipsi + - eipsi + * self.sign(zb_plus_eps * eipsi) + * c.sqrt(zb_plus_eps**2 * e2ipsi - f2) + ) + * sig_0 + ) + I_out_mid = ( + 2 + * a + * b + / f2 + * ( + zb * e2ipsi + - eipsi * self.sign(zb * eipsi) * c.sqrt(zb**2 * e2ipsi - f2) + ) + * sig_0 + ) I_out_real = np.median([I_out_minus.real, I_out_plus.real, I_out_mid.real]) I_out_imag = np.median([I_out_minus.imag, I_out_plus.imag, I_out_mid.imag]) else: - I_out = 2 * a * b / f2 * ( - zb * e2ipsi - eipsi * self.sign(zb * eipsi) * c.sqrt(zb ** 2 * e2ipsi - f2)) * sig_0 + I_out = ( + 2 + * a + * b + / f2 + * ( + zb * e2ipsi + - eipsi * self.sign(zb * eipsi) * c.sqrt(zb**2 * e2ipsi - f2) + ) + * sig_0 + ) I_out_real = I_out.real I_out_imag = I_out.imag @@ -211,36 +290,40 @@ def alpha_ext(self, x, y, kwargs_slice): @staticmethod def pot_in(x, y, kwargs_slice): - """ - lensing potential for (x,y) inside the elliptical slice - - :param kwargs_slice: dict, dictionary with the slice definition (a,b,psi,sigma_0) + """Lensing potential for (x,y) inside the elliptical slice. + :param kwargs_slice: dict, dictionary with the slice definition + (a,b,psi,sigma_0) """ - psi = kwargs_slice['psi'] - a = kwargs_slice['a'] - b = kwargs_slice['b'] - sig_0 = kwargs_slice['sigma_0'] + psi = kwargs_slice["psi"] + a = kwargs_slice["a"] + b = kwargs_slice["b"] + sig_0 = kwargs_slice["sigma_0"] e = (a - b) / (a + b) - rE = (a + b) / 2. - pot_in = 0.5 * ((1 - e) * (x * np.cos(psi) + y * np.sin(psi)) ** 2 + (1 + e) * ( - y * np.cos(psi) - x * np.sin(psi)) ** 2) * sig_0 - cst = sig_0 * rE ** 2 * (1 - e ** 2) * np.log(rE) + rE = (a + b) / 2.0 + pot_in = ( + 0.5 + * ( + (1 - e) * (x * np.cos(psi) + y * np.sin(psi)) ** 2 + + (1 + e) * (y * np.cos(psi) - x * np.sin(psi)) ** 2 + ) + * sig_0 + ) + cst = sig_0 * rE**2 * (1 - e**2) * np.log(rE) return pot_in + cst def pot_ext(self, x, y, kwargs_slice): - """ - lensing potential for (x,y) outside the elliptical slice - - :param kwargs_slice: dict, dictionary with the slice definition (a,b,psi,sigma_0) + """Lensing potential for (x,y) outside the elliptical slice. + :param kwargs_slice: dict, dictionary with the slice definition + (a,b,psi,sigma_0) """ z = complex(x, y) # zb = z.conjugate() - psi = kwargs_slice['psi'] - a = kwargs_slice['a'] - b = kwargs_slice['b'] - sig_0 = kwargs_slice['sigma_0'] + psi = kwargs_slice["psi"] + a = kwargs_slice["a"] + b = kwargs_slice["b"] + sig_0 = kwargs_slice["sigma_0"] r, phi = param_util.cart2polar(x, y) median_op = False # when (x,y) is on one of the ellipse axis, there might be an issue when calculating the square root of @@ -249,34 +332,96 @@ def pot_ext(self, x, y, kwargs_slice): # away from this position, perpendicularly to the axis ; another one is at -delta perpendicularly away from # x,y). We calculate the function for each point and take the median. This avoids any singularity for points # along the axis but it slows down the function. - if np.abs(np.sin(phi - psi)) <= 10 ** -10 \ - or np.abs(np.sin(phi - psi - np.pi / 2.)) <= 10 ** -10: # very close to one of the ellipse axis + if ( + np.abs(np.sin(phi - psi)) <= 10**-10 + or np.abs(np.sin(phi - psi - np.pi / 2.0)) <= 10**-10 + ): # very close to one of the ellipse axis median_op = True e = (a - b) / (a + b) - f2 = a ** 2 - b ** 2 + f2 = a**2 - b**2 emipsi = c.exp(-1j * psi) em2ipsi = c.exp(-2j * psi) if median_op is True: - eps = 10 ** -10 + eps = 10**-10 z_minus_eps = complex(r * np.cos(phi - eps), r * np.sin(phi - eps)) z_plus_eps = complex(r * np.cos(phi + eps), r * np.sin(phi + eps)) - pot_ext_minus = (1 - e ** 2) / (4 * e) * (f2 * c.log( - (self.sign(z_minus_eps * emipsi) * z_minus_eps * emipsi + c.sqrt(z_minus_eps ** 2 * em2ipsi - f2)) / 2.) - - self.sign(z_minus_eps * emipsi) * z_minus_eps * emipsi * c.sqrt( - z_minus_eps ** 2 * em2ipsi - f2) + z_minus_eps ** 2 * em2ipsi) * sig_0 - pot_ext_plus = (1 - e ** 2) / (4 * e) * (f2 * c.log( - (self.sign(z_plus_eps * emipsi) * z_plus_eps * emipsi + c.sqrt(z_plus_eps ** 2 * em2ipsi - f2)) / 2.) - - self.sign(z_plus_eps * emipsi) * z_plus_eps * emipsi * c.sqrt( - z_plus_eps ** 2 * em2ipsi - f2) + z_plus_eps ** 2 * em2ipsi) * sig_0 - pot_ext_mid = (1 - e ** 2) / (4 * e) * ( - f2 * c.log((self.sign(z * emipsi) * z * emipsi + c.sqrt(z ** 2 * em2ipsi - f2)) / 2.) - - self.sign(z * emipsi) * z * emipsi * c.sqrt(z ** 2 * em2ipsi - f2) + z ** 2 * em2ipsi) * sig_0 - pot_ext = np.median([pot_ext_minus.real, pot_ext_plus.real, pot_ext_mid.real]) + pot_ext_minus = ( + (1 - e**2) + / (4 * e) + * ( + f2 + * c.log( + ( + self.sign(z_minus_eps * emipsi) * z_minus_eps * emipsi + + c.sqrt(z_minus_eps**2 * em2ipsi - f2) + ) + / 2.0 + ) + - self.sign(z_minus_eps * emipsi) + * z_minus_eps + * emipsi + * c.sqrt(z_minus_eps**2 * em2ipsi - f2) + + z_minus_eps**2 * em2ipsi + ) + * sig_0 + ) + pot_ext_plus = ( + (1 - e**2) + / (4 * e) + * ( + f2 + * c.log( + ( + self.sign(z_plus_eps * emipsi) * z_plus_eps * emipsi + + c.sqrt(z_plus_eps**2 * em2ipsi - f2) + ) + / 2.0 + ) + - self.sign(z_plus_eps * emipsi) + * z_plus_eps + * emipsi + * c.sqrt(z_plus_eps**2 * em2ipsi - f2) + + z_plus_eps**2 * em2ipsi + ) + * sig_0 + ) + pot_ext_mid = ( + (1 - e**2) + / (4 * e) + * ( + f2 + * c.log( + ( + self.sign(z * emipsi) * z * emipsi + + c.sqrt(z**2 * em2ipsi - f2) + ) + / 2.0 + ) + - self.sign(z * emipsi) * z * emipsi * c.sqrt(z**2 * em2ipsi - f2) + + z**2 * em2ipsi + ) + * sig_0 + ) + pot_ext = np.median( + [pot_ext_minus.real, pot_ext_plus.real, pot_ext_mid.real] + ) else: - pot_ext = ((1 - e ** 2) / (4 * e) * ( - f2 * c.log((self.sign(z * emipsi) * z * emipsi + c.sqrt(z ** 2 * em2ipsi - f2)) / 2.) - - self.sign(z * emipsi) * z * emipsi * - c.sqrt(z ** 2 * em2ipsi - f2) + z ** 2 * em2ipsi) * sig_0).real + pot_ext = ( + (1 - e**2) + / (4 * e) + * ( + f2 + * c.log( + ( + self.sign(z * emipsi) * z * emipsi + + c.sqrt(z**2 * em2ipsi - f2) + ) + / 2.0 + ) + - self.sign(z * emipsi) * z * emipsi * c.sqrt(z**2 * em2ipsi - f2) + + z**2 * em2ipsi + ) + * sig_0 + ).real return pot_ext - diff --git a/lenstronomy/LensModel/Profiles/epl.py b/lenstronomy/LensModel/Profiles/epl.py index d6ba158c5..a62acc35e 100644 --- a/lenstronomy/LensModel/Profiles/epl.py +++ b/lenstronomy/LensModel/Profiles/epl.py @@ -1,4 +1,4 @@ -__author__ = 'ntessore' +__author__ = "ntessore" import numpy as np import lenstronomy.Util.util as util @@ -8,12 +8,11 @@ from scipy.special import hyp2f1 -__all__ = ['EPL', 'EPLMajorAxis'] +__all__ = ["EPL", "EPLMajorAxis"] class EPL(LensProfileBase): - """ - Elliptical Power Law mass profile + """Elliptical Power Law mass profile. .. math:: \\kappa(x, y) = \\frac{3-\\gamma}{2} \\left(\\frac{\\theta_{E}}{\\sqrt{q x^2 + y^2/q}} \\right)^{\\gamma-1} @@ -46,9 +45,24 @@ class EPL(LensProfileBase): scheme. An alternative implementation of the same model using a fortran code FASTELL is implemented as 'PEMD' profile. """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "gamma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 1.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 2.5, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.epl_major_axis = EPLMajorAxis() @@ -56,14 +70,13 @@ def __init__(self): super(EPL, self).__init__() def param_conv(self, theta_E, gamma, e1, e2): - """ - converts parameters as defined in this class to the parameters used in the EPLMajorAxis() class + """Converts parameters as defined in this class to the parameters used in the + EPLMajorAxis() class. :param theta_E: Einstein radius as defined in the profile class :param gamma: negative power-law slope :param e1: eccentricity modulus :param e2: eccentricity modulus - :return: b, t, q, phi_G """ if self._static is True: @@ -72,9 +85,8 @@ def param_conv(self, theta_E, gamma, e1, e2): @staticmethod def _param_conv(theta_E, gamma, e1, e2): - """ - convert parameters from :math:`R = \\sqrt{q x^2 + y^2/q}` to - :math:`R = \\sqrt{q^2 x^2 + y^2}` + """Convert parameters from :math:`R = \\sqrt{q x^2 + y^2/q}` to :math:`R = + \\sqrt{q^2 x^2 + y^2}` :param gamma: power law slope :param theta_E: Einstein radius @@ -99,7 +111,12 @@ def set_static(self, theta_E, gamma, e1, e2, center_x=0, center_y=0): :return: self variables set """ self._static = True - self._b_static, self._t_static, self._q_static, self._phi_G_static = self._param_conv(theta_E, gamma, e1, e2) + ( + self._b_static, + self._t_static, + self._q_static, + self._phi_G_static, + ) = self._param_conv(theta_E, gamma, e1, e2) def set_dynamic(self): """ @@ -107,13 +124,13 @@ def set_dynamic(self): :return: """ self._static = False - if hasattr(self, '_b_static'): + if hasattr(self, "_b_static"): del self._b_static - if hasattr(self, '_t_static'): + if hasattr(self, "_t_static"): del self._t_static - if hasattr(self, '_phi_G_static'): + if hasattr(self, "_phi_G_static"): del self._phi_G_static - if hasattr(self, '_q_static'): + if hasattr(self, "_q_static"): del self._q_static def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): @@ -188,8 +205,8 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): # evaluate f__xx, f__xy, f__yx, f__yy = self.epl_major_axis.hessian(x__, y__, b, t, q) # rotate back - kappa = 1./2 * (f__xx + f__yy) - gamma1__ = 1./2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_G) * gamma1__ - np.sin(2 * phi_G) * gamma2__ gamma2 = +np.sin(2 * phi_G) * gamma1__ + np.cos(2 * phi_G) * gamma2__ @@ -199,21 +216,16 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def mass_3d_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the spherical power-law mass enclosed (with SPP routine) - :param r: radius within the mass is computed - :param theta_E: Einstein radius - :param gamma: power-law slope - :param e1: eccentricity component (not used) - :param e2: eccentricity component (not used) - :return: mass enclosed a 3D radius r - """ + """Computes the spherical power-law mass enclosed (with SPP routine) :param r: + radius within the mass is computed :param theta_E: Einstein radius :param gamma: + power-law slope :param e1: eccentricity component (not used) :param e2: + eccentricity component (not used) :return: mass enclosed a 3D radius r.""" return self.spp.mass_3d_lens(r, theta_E, gamma) def density_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius within the mass is computed :param theta_E: Einstein radius @@ -226,9 +238,7 @@ def density_lens(self, r, theta_E, gamma, e1=None, e2=None): class EPLMajorAxis(LensProfileBase): - """ - This class contains the function and the derivatives of the - elliptical power law. + """This class contains the function and the derivatives of the elliptical power law. .. math:: \\kappa = (2-t)/2 * \\left[\\frac{b}{\\sqrt{q^2 x^2 + y^2}}\\right]^t @@ -238,15 +248,14 @@ class EPLMajorAxis(LensProfileBase): Tessore & Metcalf (2015), https://arxiv.org/abs/1507.01819 """ - param_names = ['b', 't', 'q', 'center_x', 'center_y'] - def __init__(self): + param_names = ["b", "t", "q", "center_x", "center_y"] + def __init__(self): super(EPLMajorAxis, self).__init__() def function(self, x, y, b, t, q): - """ - returns the lensing potential + """Returns the lensing potential. :param x: x-coordinate in image plane relative to center (major axis) :param y: y-coordinate in image plane relative to center (minor axis) @@ -259,13 +268,12 @@ def function(self, x, y, b, t, q): alpha_x, alpha_y = self.derivatives(x, y, b, t, q) # deflection potential, eq. (15) - psi = (x*alpha_x + y*alpha_y)/(2 - t) + psi = (x * alpha_x + y * alpha_y) / (2 - t) return psi def derivatives(self, x, y, b, t, q): - """ - returns the deflection angles + """Returns the deflection angles. :param x: x-coordinate in image plane relative to center (major axis) :param y: y-coordinate in image plane relative to center (minor axis) @@ -276,26 +284,25 @@ def derivatives(self, x, y, b, t, q): """ # elliptical radius, eq. (5) Z = np.empty(np.shape(x), dtype=complex) - Z.real = q*x + Z.real = q * x Z.imag = y R = np.abs(Z) R = np.maximum(R, 0.000000001) # angular dependency with extra factor of R, eq. (23) - R_omega = Z*hyp2f1(1, t/2, 2-t/2, -(1-q)/(1+q)*(Z/Z.conj())) + R_omega = Z * hyp2f1(1, t / 2, 2 - t / 2, -(1 - q) / (1 + q) * (Z / Z.conj())) # deflection, eq. (22) - alpha = 2/(1+q)*(b/R)**t*R_omega + alpha = 2 / (1 + q) * (b / R) ** t * R_omega # return real and imaginary part - alpha_real = np.nan_to_num(alpha.real, posinf=10**10, neginf=-10**10) - alpha_imag = np.nan_to_num(alpha.imag, posinf=10**10, neginf=-10**10) + alpha_real = np.nan_to_num(alpha.real, posinf=10**10, neginf=-(10**10)) + alpha_imag = np.nan_to_num(alpha.imag, posinf=10**10, neginf=-(10**10)) return alpha_real, alpha_imag def hessian(self, x, y, b, t, q): - """ - Hessian matrix of the lensing potential + """Hessian matrix of the lensing potential. :param x: x-coordinate in image plane relative to center (major axis) :param y: y-coordinate in image plane relative to center (minor axis) @@ -304,25 +311,25 @@ def hessian(self, x, y, b, t, q): :param q: axis ratio :return: f_xx, f_yy, f_xy """ - R = np.hypot(q*x, y) + R = np.hypot(q * x, y) R = np.maximum(R, 0.00000001) r = np.hypot(x, y) - cos, sin = x/r, y/r - cos2, sin2 = cos*cos*2 - 1, sin*cos*2 + cos, sin = x / r, y / r + cos2, sin2 = cos * cos * 2 - 1, sin * cos * 2 # convergence, eq. (2) - kappa = (2 - t)/2*(b/R)**t - kappa = np.nan_to_num(kappa, posinf=10**10, neginf=-10**10) + kappa = (2 - t) / 2 * (b / R) ** t + kappa = np.nan_to_num(kappa, posinf=10**10, neginf=-(10**10)) # deflection via method alpha_x, alpha_y = self.derivatives(x, y, b, t, q) # shear, eq. (17), corrected version from arXiv/corrigendum - gamma_1 = (1-t)*(alpha_x*cos - alpha_y*sin)/r - kappa*cos2 - gamma_2 = (1-t)*(alpha_y*cos + alpha_x*sin)/r - kappa*sin2 - gamma_1 = np.nan_to_num(gamma_1, posinf=10**10, neginf=-10**10) - gamma_2 = np.nan_to_num(gamma_2, posinf=10**10, neginf=-10**10) + gamma_1 = (1 - t) * (alpha_x * cos - alpha_y * sin) / r - kappa * cos2 + gamma_2 = (1 - t) * (alpha_y * cos + alpha_x * sin) / r - kappa * sin2 + gamma_1 = np.nan_to_num(gamma_1, posinf=10**10, neginf=-(10**10)) + gamma_2 = np.nan_to_num(gamma_2, posinf=10**10, neginf=-(10**10)) # second derivatives from convergence and shear f_xx = kappa + gamma_1 diff --git a/lenstronomy/LensModel/Profiles/epl_boxydisky.py b/lenstronomy/LensModel/Profiles/epl_boxydisky.py index a9af8f88d..a25c662cc 100644 --- a/lenstronomy/LensModel/Profiles/epl_boxydisky.py +++ b/lenstronomy/LensModel/Profiles/epl_boxydisky.py @@ -1,4 +1,4 @@ -__author__ = 'Maverick-Oh' +__author__ = "Maverick-Oh" import numpy as np import lenstronomy.Util.util as util @@ -7,12 +7,12 @@ from lenstronomy.LensModel.Profiles.epl import EPL from lenstronomy.LensModel.Profiles.multipole import Multipole -__all__ = ['EPL_BOXYDISKY'] +__all__ = ["EPL_BOXYDISKY"] + class EPL_BOXYDISKY(LensProfileBase): - """" - EPL (Elliptical Power Law) mass profile combined with Multipole with m=4, so that it's either purely boxy or - disky with EPL's axis and Multipole's axis aligned. + """ " EPL (Elliptical Power Law) mass profile combined with Multipole with m=4, so + that it's either purely boxy or disky with EPL's axis and Multipole's axis aligned. Reference to the implementation: https://ui.adsabs.harvard.edu/abs/2022A%26A...659A.127V/abstract @@ -26,11 +26,26 @@ class EPL_BOXYDISKY(LensProfileBase): :param center_y: center of distortion :param a_m: multipole strength. The profile becomes disky when a_m>0 and boxy when a_m<0 """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y', 'a_m'] - lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100,\ - 'a_m': -0.1} - upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100,\ - 'a_m': +0.1} + + param_names = ["theta_E", "gamma", "e1", "e2", "center_x", "center_y", "a_m"] + lower_limit_default = { + "theta_E": 0, + "gamma": 1.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + "a_m": -0.1, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 2.5, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + "a_m": +0.1, + } def __init__(self): self._epl = EPL() @@ -42,8 +57,21 @@ def param_split(self, theta_E, gamma, e1, e2, a_m, center_x=0, center_y=0): # this function converts a given parameter set of EPL_BOXYDISKY into two parameter sets; one for EPL and the # other for Multipole with m=4. phi, _ = param_util.ellipticity2phi_q(e1, e2) - kwargs_epl = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': center_x, 'center_y': center_y} - kwargs_multipole = {'m': self._m, 'a_m': a_m, 'phi_m': phi, 'center_x': center_x, 'center_y': center_y} + kwargs_epl = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": center_x, + "center_y": center_y, + } + kwargs_multipole = { + "m": self._m, + "a_m": a_m, + "phi_m": phi, + "center_x": center_x, + "center_y": center_y, + } return kwargs_epl, kwargs_multipole @@ -61,8 +89,9 @@ def function(self, x, y, theta_E, gamma, e1, e2, a_m, center_x=0, center_y=0): :param center_y: profile center :return: lensing potential """ - kwargs_epl, kwargs_multipole = self.param_split(theta_E, gamma, e1, e2, a_m, center_x=center_x, - center_y=center_y) + kwargs_epl, kwargs_multipole = self.param_split( + theta_E, gamma, e1, e2, a_m, center_x=center_x, center_y=center_y + ) f_epl = self._epl.function(x, y, **kwargs_epl) f_multipole = self._multipole.function(x, y, **kwargs_multipole) return f_epl + f_multipole @@ -81,9 +110,13 @@ def derivatives(self, x, y, theta_E, gamma, e1, e2, a_m, center_x=0, center_y=0) :param center_y: profile center :return: alpha_x, alpha_y """ - kwargs_epl, kwargs_multipole = self.param_split(theta_E, gamma, e1, e2, a_m, center_x=center_x, center_y=center_y) + kwargs_epl, kwargs_multipole = self.param_split( + theta_E, gamma, e1, e2, a_m, center_x=center_x, center_y=center_y + ) f_x_epl, f_y_epl = self._epl.derivatives(x, y, **kwargs_epl) - f_x_multipole, f_y_multipole = self._multipole.derivatives(x, y, **kwargs_multipole) + f_x_multipole, f_y_multipole = self._multipole.derivatives( + x, y, **kwargs_multipole + ) f_x = f_x_epl + f_x_multipole f_y = f_y_epl + f_y_multipole return f_x, f_y @@ -102,9 +135,16 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, a_m, center_x=0, center_y=0): :param center_y: profile center :return: f_xx, f_xy, f_yx, f_yy """ - kwargs_epl, kwargs_multipole = self.param_split(theta_E, gamma, e1, e2, a_m, center_x=center_x, center_y=center_y) + kwargs_epl, kwargs_multipole = self.param_split( + theta_E, gamma, e1, e2, a_m, center_x=center_x, center_y=center_y + ) f_xx_epl, f_xy_epl, f_yx_epl, f_yy_epl = self._epl.hessian(x, y, **kwargs_epl) - f_xx_multipole, f_xy_multipole, f_yx_multipole, f_yy_multipole = self._multipole.hessian(x, y, **kwargs_multipole) + ( + f_xx_multipole, + f_xy_multipole, + f_yx_multipole, + f_yy_multipole, + ) = self._multipole.hessian(x, y, **kwargs_multipole) f_xx = f_xx_epl + f_xx_multipole f_xy = f_xy_epl + f_xy_multipole f_yx = f_yx_epl + f_yx_multipole diff --git a/lenstronomy/LensModel/Profiles/epl_numba.py b/lenstronomy/LensModel/Profiles/epl_numba.py index 0167acfcb..c350201c1 100644 --- a/lenstronomy/LensModel/Profiles/epl_numba.py +++ b/lenstronomy/LensModel/Profiles/epl_numba.py @@ -1,15 +1,15 @@ -__author__ = 'ewoudwempe' +__author__ = "ewoudwempe" import numpy as np import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util.numba_util import jit, nan_to_num -__all__ = ['EPL_numba'] +__all__ = ["EPL_numba"] class EPL_numba(LensProfileBase): - """" + """ " Elliptical Power Law mass profile - computation accelerated with numba .. math:: @@ -43,16 +43,31 @@ class EPL_numba(LensProfileBase): A (slower) implementation of the same model using hyperbolic functions without the iterative calculation is accessible as 'EPL' not requiring numba. """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "gamma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 1.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 2.5, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): super(EPL_numba).__init__() @staticmethod @jit() - def function(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): + def function(x, y, theta_E, gamma, e1, e2, center_x=0.0, center_y=0.0): """ :param x: x-coordinate (angle) @@ -65,13 +80,15 @@ def function(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): :param center_y: y-position of lens center :return: lensing potential """ - z, b, t, q, ang = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y) + z, b, t, q, ang = param_transform( + x, y, theta_E, gamma, e1, e2, center_x, center_y + ) alph = alpha(z.real, z.imag, b, q, t) - return 1/(2-t)*(z.real*alph.real+z.imag*alph.imag) + return 1 / (2 - t) * (z.real * alph.real + z.imag * alph.imag) @staticmethod @jit() - def derivatives(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): + def derivatives(x, y, theta_E, gamma, e1, e2, center_x=0.0, center_y=0.0): """ :param x: x-coordinate (angle) @@ -84,13 +101,15 @@ def derivatives(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): :param center_y: y-position of lens center :return: deflection angles alpha_x, alpha_y """ - z, b, t, q, ang = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y) - alph = alpha(z.real, z.imag, b, q, t) * np.exp(1j*ang) + z, b, t, q, ang = param_transform( + x, y, theta_E, gamma, e1, e2, center_x, center_y + ) + alph = alpha(z.real, z.imag, b, q, t) * np.exp(1j * ang) return alph.real, alph.imag @staticmethod @jit() - def hessian(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): + def hessian(x, y, theta_E, gamma, e1, e2, center_x=0.0, center_y=0.0): """ :param x: x-coordinate (angle) @@ -103,48 +122,55 @@ def hessian(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): :param center_y: y-position of lens center :return: Hessian components f_xx, f_yy, f_xy """ - z, b, t, q, ang_ell = param_transform(x, y, theta_E, gamma, e1, e2, center_x, center_y) + z, b, t, q, ang_ell = param_transform( + x, y, theta_E, gamma, e1, e2, center_x, center_y + ) ang = np.angle(z) # r = np.abs(z) - zz_ell = z.real*q+1j*z.imag + zz_ell = z.real * q + 1j * z.imag R = np.abs(zz_ell) phi = np.angle(zz_ell) # u = np.minimum(nan_to_num((b/R)**t),1e100) - u = np.fmin((b/R)**t, 1e10) # I remove all factors of (b/R)**t to only have to remove nans once. - # The np.fmin is a regularisation near R=0, to avoid overflows - # in the magnification calculations - kappa = (2-t)/2 - Roverr = np.sqrt(np.cos(ang)**2*q**2+np.sin(ang)**2) + u = np.fmin( + (b / R) ** t, 1e10 + ) # I remove all factors of (b/R)**t to only have to remove nans once. + # The np.fmin is a regularisation near R=0, to avoid overflows + # in the magnification calculations + kappa = (2 - t) / 2 + Roverr = np.sqrt(np.cos(ang) ** 2 * q**2 + np.sin(ang) ** 2) Omega = omega(phi, t, q) - alph = (2*b)/(1+q)/b*Omega - gamma_shear = -np.exp(2j*(ang+ang_ell))*kappa + (1-t)*np.exp(1j*(ang+2*ang_ell)) * alph*Roverr - - f_xx = (kappa + gamma_shear.real)*u - f_yy = (kappa - gamma_shear.real)*u - f_xy = gamma_shear.imag*u + alph = (2 * b) / (1 + q) / b * Omega + gamma_shear = ( + -np.exp(2j * (ang + ang_ell)) * kappa + + (1 - t) * np.exp(1j * (ang + 2 * ang_ell)) * alph * Roverr + ) + + f_xx = (kappa + gamma_shear.real) * u + f_yy = (kappa - gamma_shear.real) * u + f_xy = gamma_shear.imag * u # Fix the nans if x=y=0 is filled in return f_xx, f_xy, f_xy, f_yy @jit() -def param_transform(x, y, theta_E, gamma, e1, e2, center_x=0., center_y=0.): - """Converts the parameters from lenstronomy definitions (as defined in PEMD) to the definitions of Tessore+ (2015)""" - t = gamma-1 +def param_transform(x, y, theta_E, gamma, e1, e2, center_x=0.0, center_y=0.0): + """Converts the parameters from lenstronomy definitions (as defined in PEMD) to the + definitions of Tessore+ (2015)""" + t = gamma - 1 phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y ang = phi_G - z = np.exp(-1j*phi_G) * (x_shift + y_shift*1j) - return z, theta_E*np.sqrt(q), t, q, ang + z = np.exp(-1j * phi_G) * (x_shift + y_shift * 1j) + return z, theta_E * np.sqrt(q), t, q, ang @jit() def alpha(x, y, b, q, t, Omega=None): - """ - Calculates the complex deflection + """Calculates the complex deflection. :param x: x-coordinate (angle) :param y: y-coordinate (angle) @@ -154,25 +180,29 @@ def alpha(x, y, b, q, t, Omega=None): :param Omega: If given, use this Omega (to avoid recalculations) :return: complex deflection angle """ - zz = x*q + 1j*y + zz = x * q + 1j * y R = np.abs(zz) phi = np.angle(zz) if Omega is None: Omega = omega(phi, t, q) # Omega = omega(phi, t, q) - alph = (2*b)/(1+q)*nan_to_num((b/R)**t*R/b)*Omega + alph = (2 * b) / (1 + q) * nan_to_num((b / R) ** t * R / b) * Omega return alph -@jit(fastmath=True) # Because of the reduction nature of this, relaxing commutativity actually matters a lot (4x speedup). +@jit( + fastmath=True +) # Because of the reduction nature of this, relaxing commutativity actually matters a lot (4x speedup). def omega(phi, t, q, niter_max=200, tol=1e-16): - f = (1-q)/(1+q) + f = (1 - q) / (1 + q) omegas = np.zeros_like(phi, dtype=np.complex128) - niter = min(niter_max, int(np.log(tol)/np.log(f))+2) # The absolute value of each summand is always less than f, hence this limit for the number of iterations. - Omega = 1*np.exp(1j*phi) - fact = -f*np.exp(2j*phi) + niter = min( + niter_max, int(np.log(tol) / np.log(f)) + 2 + ) # The absolute value of each summand is always less than f, hence this limit for the number of iterations. + Omega = 1 * np.exp(1j * phi) + fact = -f * np.exp(2j * phi) for n in range(1, niter): omegas += Omega - Omega *= (2*n-(2-t))/(2*n+(2-t)) * fact + Omega *= (2 * n - (2 - t)) / (2 * n + (2 - t)) * fact omegas += Omega return omegas diff --git a/lenstronomy/LensModel/Profiles/flexion.py b/lenstronomy/LensModel/Profiles/flexion.py index 61967f6af..4354832fa 100644 --- a/lenstronomy/LensModel/Profiles/flexion.py +++ b/lenstronomy/LensModel/Profiles/flexion.py @@ -1,33 +1,55 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Flexion'] +__all__ = ["Flexion"] class Flexion(LensProfileBase): - """ - class for flexion - """ - param_names = ['g1', 'g2', 'g3', 'g4', 'ra_0', 'dec_0'] - lower_limit_default = {'g1': -0.1, 'g2': -0.1, 'g3': -0.1, 'g4': -0.1, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'g1': 0.1, 'g2': 0.1, 'g3': 0.1, 'g4': 0.1, 'ra_0': 100, 'dec_0': 100} + """Class for flexion.""" + + param_names = ["g1", "g2", "g3", "g4", "ra_0", "dec_0"] + lower_limit_default = { + "g1": -0.1, + "g2": -0.1, + "g3": -0.1, + "g4": -0.1, + "ra_0": -100, + "dec_0": -100, + } + upper_limit_default = { + "g1": 0.1, + "g2": 0.1, + "g3": 0.1, + "g4": 0.1, + "ra_0": 100, + "dec_0": 100, + } def function(self, x, y, g1, g2, g3, g4, ra_0=0, dec_0=0): x_ = x - ra_0 y_ = y - dec_0 - f_ = 1./6 * (g1 * x_**3 + 3*g2 * x_**2 * y_ + 3*g3 * x_ * y_**2 + g4 * y_**3) + f_ = ( + 1.0 + / 6 + * ( + g1 * x_**3 + + 3 * g2 * x_**2 * y_ + + 3 * g3 * x_ * y_**2 + + g4 * y_**3 + ) + ) return f_ def derivatives(self, x, y, g1, g2, g3, g4, ra_0=0, dec_0=0): x_ = x - ra_0 y_ = y - dec_0 - f_x = 1./2.*g1*x_**2 + g2*x_*y_ + 1./2.*g3*y_**2 - f_y = 1./2.*g2*x_**2 + g3*x_*y_ + 1./2.*g4*y_**2 + f_x = 1.0 / 2.0 * g1 * x_**2 + g2 * x_ * y_ + 1.0 / 2.0 * g3 * y_**2 + f_y = 1.0 / 2.0 * g2 * x_**2 + g3 * x_ * y_ + 1.0 / 2.0 * g4 * y_**2 return f_x, f_y def hessian(self, x, y, g1, g2, g3, g4, ra_0=0, dec_0=0): x_ = x - ra_0 y_ = y - dec_0 - f_xx = g1*x_ + g2*y_ - f_yy = g3*x_ + g4*y_ - f_xy = g2*x_ + g3*y_ + f_xx = g1 * x_ + g2 * y_ + f_yy = g3 * x_ + g4 * y_ + f_xy = g2 * x_ + g3 * y_ return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/flexionfg.py b/lenstronomy/LensModel/Profiles/flexionfg.py index c4813e636..b8eeb34bc 100644 --- a/lenstronomy/LensModel/Profiles/flexionfg.py +++ b/lenstronomy/LensModel/Profiles/flexionfg.py @@ -1,7 +1,7 @@ from lenstronomy.LensModel.Profiles.flexion import Flexion from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Flexionfg'] +__all__ = ["Flexionfg"] class Flexionfg(LensProfileBase): @@ -9,17 +9,31 @@ class Flexionfg(LensProfileBase): Flexion consist of basis F flexion and G flexion (F1,F2,G1,G2), see formulas 2.54, 2.55 in Massimo Meneghetti 2017 - "Introduction to Gravitational Lensing". """ - param_names = ['F1', 'F2', 'G1', 'G2', 'ra_0', 'dec_0'] - lower_limit_default = {'F1': -0.1, 'F2': -0.1, 'G1': -0.1, 'G2': -0.1, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'F1': 0.1, 'F2': 0.1, 'G1': 0.1, 'G2': 0.1, 'ra_0': 100, 'dec_0': 100} + + param_names = ["F1", "F2", "G1", "G2", "ra_0", "dec_0"] + lower_limit_default = { + "F1": -0.1, + "F2": -0.1, + "G1": -0.1, + "G2": -0.1, + "ra_0": -100, + "dec_0": -100, + } + upper_limit_default = { + "F1": 0.1, + "F2": 0.1, + "G1": 0.1, + "G2": 0.1, + "ra_0": 100, + "dec_0": 100, + } def __init__(self): self.flexion_cart = Flexion() super(Flexionfg, self).__init__() def function(self, x, y, F1, F2, G1, G2, ra_0=0, dec_0=0): - """ - lensing potential + """Lensing potential. :param x: x-coordinate :param y: y-coordinate @@ -35,49 +49,31 @@ def function(self, x, y, F1, F2, G1, G2, ra_0=0, dec_0=0): return self.flexion_cart.function(x, y, _g1, _g2, _g3, _g4, ra_0, dec_0) def derivatives(self, x, y, F1, F2, G1, G2, ra_0=0, dec_0=0): - """ - deflection angle - :param x: x-coordinate - :param y: y-coordinate - :param F1: F1 flexion, derivative of kappa in x direction - :param F2: F2 flexion, derivative of kappa in y direction - :param G1: G1 flexion - :param G2: G2 flexion - :param ra_0: center x-coordinate - :param dec_0: center x-coordinate - :return: deflection angle - """ + """Deflection angle :param x: x-coordinate :param y: y-coordinate :param F1: F1 + flexion, derivative of kappa in x direction :param F2: F2 flexion, derivative of + kappa in y direction :param G1: G1 flexion :param G2: G2 flexion :param ra_0: + center x-coordinate :param dec_0: center x-coordinate :return: deflection + angle.""" _g1, _g2, _g3, _g4 = self.transform_fg(F1, F2, G1, G2) return self.flexion_cart.derivatives(x, y, _g1, _g2, _g3, _g4, ra_0, dec_0) def hessian(self, x, y, F1, F2, G1, G2, ra_0=0, dec_0=0): - """ - Hessian matrix - :param x: x-coordinate - :param y: y-coordinate - :param F1: F1 flexion, derivative of kappa in x direction - :param F2: F2 flexion, derivative of kappa in y direction - :param G1: G1 flexion - :param G2: G2 flexion - :param ra_0: center x-coordinate - :param dec_0: center y-coordinate - :return: second order derivatives f_xx, f_yy, f_xy - """ + """Hessian matrix :param x: x-coordinate :param y: y-coordinate :param F1: F1 + flexion, derivative of kappa in x direction :param F2: F2 flexion, derivative of + kappa in y direction :param G1: G1 flexion :param G2: G2 flexion :param ra_0: + center x-coordinate :param dec_0: center y-coordinate :return: second order + derivatives f_xx, f_yy, f_xy.""" _g1, _g2, _g3, _g4 = self.transform_fg(F1, F2, G1, G2) return self.flexion_cart.hessian(x, y, _g1, _g2, _g3, _g4, ra_0, dec_0) @staticmethod def transform_fg(F1, F2, G1, G2): - """ - basis transform from (F1,F2,G1,G2) to (g1,g2,g3,g4) - :param F1: F1 flexion, derivative of kappa in x direction - :param F2: F2 flexion, derivative of kappa in y direction - :param G1: G1 flexion - :param G2: G2 flexion - :return: g1,g2,g3,g4 (phi_xxx, phi_xxy, phi_xyy, phi_yyy) - """ - g1 = (3*F1 + G1) * 0.5 - g2 = (3*F2 + G2) * 0.5 + """Basis transform from (F1,F2,G1,G2) to (g1,g2,g3,g4) :param F1: F1 flexion, + derivative of kappa in x direction :param F2: F2 flexion, derivative of kappa in + y direction :param G1: G1 flexion :param G2: G2 flexion :return: g1,g2,g3,g4 + (phi_xxx, phi_xxy, phi_xyy, phi_yyy)""" + g1 = (3 * F1 + G1) * 0.5 + g2 = (3 * F2 + G2) * 0.5 g3 = (F1 - G1) * 0.5 g4 = (F2 - G2) * 0.5 return g1, g2, g3, g4 diff --git a/lenstronomy/LensModel/Profiles/gauss_decomposition.py b/lenstronomy/LensModel/Profiles/gauss_decomposition.py index eb44ae6a7..eb5fb985e 100644 --- a/lenstronomy/LensModel/Profiles/gauss_decomposition.py +++ b/lenstronomy/LensModel/Profiles/gauss_decomposition.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- -""" -This module contains the class to compute lensing properties of any -elliptical profile using Shajib (2019)'s Gauss decomposition. -""" +"""This module contains the class to compute lensing properties of any elliptical +profile using Shajib (2019)'s Gauss decomposition.""" -__author__ = 'ajshajib' +__author__ = "ajshajib" import numpy as np import abc @@ -15,22 +13,34 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() -_SQRT_2PI = np.sqrt(2*np.pi) +_SQRT_2PI = np.sqrt(2 * np.pi) @export class GaussianEllipseKappaSet(LensProfileBase): - """ - This class computes the lensing properties of a set of concentric - elliptical Gaussian convergences. - """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5, - 'center_x': 100, 'center_y': 100} + """This class computes the lensing properties of a set of concentric elliptical + Gaussian convergences.""" + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "sigma": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5): """ @@ -41,14 +51,13 @@ def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5): :type min_ellipticity: ``float`` """ self.gaussian_ellipse_kappa = GaussianEllipseKappa( - use_scipy_wofz=use_scipy_wofz, - min_ellipticity=min_ellipticity) + use_scipy_wofz=use_scipy_wofz, min_ellipticity=min_ellipticity + ) super(GaussianEllipseKappaSet, self).__init__() def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the potential function for a set of concentric elliptical - Gaussian convergence profiles. + """Compute the potential function for a set of concentric elliptical Gaussian + convergence profiles. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -72,19 +81,15 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): function = np.zeros_like(x, dtype=float) for i in range(len(amp)): - function += self.gaussian_ellipse_kappa.function(x, y, - amp[i], - sigma[i], e1, - e2, - center_x, - center_y) + function += self.gaussian_ellipse_kappa.function( + x, y, amp[i], sigma[i], e1, e2, center_x, center_y + ) return function def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the derivatives of function angles :math:`\\partial - f/\\partial x`, :math:`\\partial f/\\partial y` at :math:`x,\\ y` for a - set of concentric elliptic Gaussian convergence profiles. + """Compute the derivatives of function angles :math:`\\partial f/\\partial x`, + :math:`\\partial f/\\partial y` at :math:`x,\\ y` for a set of concentric + elliptic Gaussian convergence profiles. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -109,19 +114,25 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): f_y = np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_x_i, f_y_i = self.gaussian_ellipse_kappa.derivatives(x, y, amp=amp[i], sigma=sigma[i], e1=e1, e2=e2, - center_x=center_x, center_y=center_y) + f_x_i, f_y_i = self.gaussian_ellipse_kappa.derivatives( + x, + y, + amp=amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) f_x += f_x_i f_y += f_y_i return f_x, f_y def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute Hessian matrix of function :math:`\\partial^2f/\\partial x^2`, - :math:`\\partial^2 f/\\partial y^2`, :math:`\\partial^2 f/\\partial - x\\partial y` for a set of concentric elliptic Gaussian convergence - profiles. + """Compute Hessian matrix of function :math:`\\partial^2f/\\partial x^2`, + :math:`\\partial^2 f/\\partial y^2`, :math:`\\partial^2 f/\\partial x\\partial + y` for a set of concentric elliptic Gaussian convergence profiles. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -149,8 +160,16 @@ def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): f_xy = np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_ellipse_kappa.hessian(x, y, amp=amp[i], sigma=sigma[i], e1=e1, - e2=e2, center_x=center_x, center_y=center_y) + f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_ellipse_kappa.hessian( + x, + y, + amp=amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) f_xx += f_xx_i f_yy += f_yy_i f_xy += f_xy_i @@ -158,10 +177,8 @@ def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the density of a set of concentric elliptical Gaussian - convergence profiles :math:`\\sum A/(2\\pi \\sigma^2) \\exp(-( - x^2+y^2/q^2)/2\\sigma^2)`. + """Compute the density of a set of concentric elliptical Gaussian convergence + profiles :math:`\\sum A/(2\\pi \\sigma^2) \\exp(-( x^2+y^2/q^2)/2\\sigma^2)`. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -185,23 +202,36 @@ def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): density_2d = np.zeros_like(x, dtype=float) for i in range(len(amp)): - density_2d += self.gaussian_ellipse_kappa.density_2d(x, y, amp=amp[i], sigma=sigma[i], e1=e1, e2=e2, - center_x=center_x, center_y=center_y) + density_2d += self.gaussian_ellipse_kappa.density_2d( + x, + y, + amp=amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) return density_2d @export class GaussDecompositionAbstract(metaclass=abc.ABCMeta): - """ - This abstract class sets up a template for computing lensing properties of - an elliptical convergence through Shajib (2019)'s Gauss decomposition. - """ - def __init__(self, n_sigma=15, sigma_start_mult=0.02, sigma_end_mult=15., - precision=10, use_scipy_wofz=True, min_ellipticity=1e-5): - """ - Set up settings for the Gaussian decomposition. For more details about - the decomposition parameters, see Shajib (2019). + """This abstract class sets up a template for computing lensing properties of an + elliptical convergence through Shajib (2019)'s Gauss decomposition.""" + + def __init__( + self, + n_sigma=15, + sigma_start_mult=0.02, + sigma_end_mult=15.0, + precision=10, + use_scipy_wofz=True, + min_ellipticity=1e-5, + ): + """Set up settings for the Gaussian decomposition. For more details about the + decomposition parameters, see Shajib (2019). :param n_sigma: Number of Gaussian components :type n_sigma: ``int`` @@ -217,8 +247,8 @@ def __init__(self, n_sigma=15, sigma_start_mult=0.02, sigma_end_mult=15., :type min_ellipticity: ``float`` """ self.gaussian_set = GaussianEllipseKappaSet( - use_scipy_wofz=use_scipy_wofz, - min_ellipticity=min_ellipticity) + use_scipy_wofz=use_scipy_wofz, min_ellipticity=min_ellipticity + ) self.n_sigma = n_sigma self.sigma_start_mult = sigma_start_mult @@ -229,36 +259,39 @@ def __init__(self, n_sigma=15, sigma_start_mult=0.02, sigma_end_mult=15., # nodes and weights based on Fourier-Euler method # for details Abate & Whitt (2006) kes = np.arange(2 * p + 1) - self.betas = np.sqrt(2 * p * np.log(10) / 3. + 2. * 1j * np.pi * kes) + self.betas = np.sqrt(2 * p * np.log(10) / 3.0 + 2.0 * 1j * np.pi * kes) epsilons = np.zeros(2 * p + 1) epsilons[0] = 0.5 - epsilons[1:p + 1] = 1. - epsilons[-1] = 1 / 2. ** p + epsilons[1 : p + 1] = 1.0 + epsilons[-1] = 1 / 2.0**p for k in range(1, p): - epsilons[2 * p - k] = epsilons[2 * p - k + 1] + 1 / 2. ** p * comb( - p, k) + epsilons[2 * p - k] = epsilons[2 * p - k + 1] + 1 / 2.0**p * comb(p, k) - self.etas = (-1.) ** kes * epsilons * 10. ** (p / 3.) * 2. * _SQRT_2PI + self.etas = (-1.0) ** kes * epsilons * 10.0 ** (p / 3.0) * 2.0 * _SQRT_2PI def gauss_decompose(self, **kwargs): - r""" - Compute the amplitudes and sigmas of Gaussian components using the - integral transform with Gaussian kernel from Shajib (2019). The - returned values are in the convention of eq. (2.13). + r"""Compute the amplitudes and sigmas of Gaussian components using the integral + transform with Gaussian kernel from Shajib (2019). The returned values are in + the convention of eq. (2.13). :param kwargs: Keyword arguments to send to ``func`` :return: Amplitudes and standard deviations of the Gaussian components :rtype: tuple ``(numpy.array, numpy.array)`` """ - sigma_start = self.sigma_start_mult*self.get_scale(**kwargs) - sigma_end = self.sigma_end_mult*self.get_scale(**kwargs) + sigma_start = self.sigma_start_mult * self.get_scale(**kwargs) + sigma_end = self.sigma_end_mult * self.get_scale(**kwargs) sigmas = np.logspace(np.log10(sigma_start), np.log10(sigma_end), self.n_sigma) - f_sigmas = np.sum(self.etas * self.get_kappa_1d(sigmas[:, np.newaxis]*self.betas[np.newaxis, :], **kwargs).real, - axis=1) + f_sigmas = np.sum( + self.etas + * self.get_kappa_1d( + sigmas[:, np.newaxis] * self.betas[np.newaxis, :], **kwargs + ).real, + axis=1, + ) del_log_sigma = np.abs(np.diff(np.log(sigmas)).mean()) @@ -272,9 +305,8 @@ def gauss_decompose(self, **kwargs): @abc.abstractmethod def get_scale(self, **kwargs): - """ - Abstract method to identify the keyword argument for the scale size - among the profile parameters of the child class' convergence profile. + """Abstract method to identify the keyword argument for the scale size among the + profile parameters of the child class' convergence profile. :param kwargs: Keyword arguments :return: Scale size @@ -283,20 +315,16 @@ def get_scale(self, **kwargs): @abc.abstractmethod def get_kappa_1d(self, y, **kwargs): - r""" - Abstract method to compute the spherical Sersic profile at y. - The concrete method has to defined by the child class. + r"""Abstract method to compute the spherical Sersic profile at y. The concrete + method has to defined by the child class. :param y: y coordinate :type y: ``float`` or ``numpy.array`` :param kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile """ - def function(self, x, y, e1=0., e2=0., center_x=0., - center_y=0., **kwargs): - r""" - Compute the deflection potential of a Gauss-decomposed - elliptic convergence. + def function(self, x, y, e1=0.0, e2=0.0, center_x=0.0, center_y=0.0, **kwargs): + r"""Compute the deflection potential of a Gauss-decomposed elliptic convergence. :param x: x coordinate :type x: ``float`` @@ -317,16 +345,16 @@ def function(self, x, y, e1=0., e2=0., center_x=0., amps, sigmas = self.gauss_decompose(**kwargs) # converting the amplitude convention A -> A/(2*pi*sigma^2) - amps *= 2.*np.pi * sigmas * sigmas + amps *= 2.0 * np.pi * sigmas * sigmas - return self.gaussian_set.function(x, y, amps, sigmas, e1, e2, center_x, center_y) + return self.gaussian_set.function( + x, y, amps, sigmas, e1, e2, center_x, center_y + ) - def derivatives(self, x, y, e1=0., e2=0., center_x=0., - center_y=0., **kwargs): - r""" - Compute the derivatives of the deflection potential :math:`\partial - f/\partial x`, :math:`\partial f/\partial y` for a Gauss-decomposed - elliptic convergence. + def derivatives(self, x, y, e1=0.0, e2=0.0, center_x=0.0, center_y=0.0, **kwargs): + r"""Compute the derivatives of the deflection potential :math:`\partial + f/\partial x`, :math:`\partial f/\partial y` for a Gauss-decomposed elliptic + convergence. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -347,17 +375,16 @@ def derivatives(self, x, y, e1=0., e2=0., center_x=0., amps, sigmas = self.gauss_decompose(**kwargs) # converting the amplitude convention A -> A/(2*pi*sigma^2) - amps *= 2. * np.pi * sigmas * sigmas + amps *= 2.0 * np.pi * sigmas * sigmas - return self.gaussian_set.derivatives(x, y, amps, sigmas, e1, e2, center_x, center_y) + return self.gaussian_set.derivatives( + x, y, amps, sigmas, e1, e2, center_x, center_y + ) - def hessian(self, x, y, e1=0., e2=0., center_x=0., - center_y=0., **kwargs): - r""" - Compute the Hessian of the deflection potential - :math:`\partial^2f/\partial x^2`, :math:`\partial^2 f/ \partial - y^2`, :math:`\partial^2 f/\partial x\partial y` of a Gauss-decomposed - elliptic Sersic convergence. + def hessian(self, x, y, e1=0.0, e2=0.0, center_x=0.0, center_y=0.0, **kwargs): + r"""Compute the Hessian of the deflection potential :math:`\partial^2f/\partial + x^2`, :math:`\partial^2 f/ \partial y^2`, :math:`\partial^2 f/\partial x\partial + y` of a Gauss-decomposed elliptic Sersic convergence. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -378,14 +405,12 @@ def hessian(self, x, y, e1=0., e2=0., center_x=0., amps, sigmas = self.gauss_decompose(**kwargs) # converting the amplitude convention A -> A/(2*pi*sigma^2) - amps *= 2. * np.pi * sigmas * sigmas + amps *= 2.0 * np.pi * sigmas * sigmas return self.gaussian_set.hessian(x, y, amps, sigmas, e1, e2, center_x, center_y) - def density_2d(self, x, y, e1=0., e2=0., center_x=0., - center_y=0., **kwargs): - r""" - Compute the convergence profile for Gauss-decomposed elliptic Sersic profile. + def density_2d(self, x, y, e1=0.0, e2=0.0, center_x=0.0, center_y=0.0, **kwargs): + r"""Compute the convergence profile for Gauss-decomposed elliptic Sersic profile. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -406,29 +431,40 @@ def density_2d(self, x, y, e1=0., e2=0., center_x=0., amps, sigmas = self.gauss_decompose(**kwargs) # converting the amplitude convention A -> A/(2*pi*sigma^2) - amps *= 2. * np.pi * sigmas * sigmas + amps *= 2.0 * np.pi * sigmas * sigmas - return self.gaussian_set.density_2d(x, y, amps, sigmas, e1, e2, center_x, center_y) + return self.gaussian_set.density_2d( + x, y, amps, sigmas, e1, e2, center_x, center_y + ) @export class SersicEllipseGaussDec(GaussDecompositionAbstract): - """ - This class computes the lensing properties of an elliptical Sersic - profile using the Shajib (2019)'s Gauss decomposition method. - """ - param_names = ['k_eff', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x', - 'center_y'] - lower_limit_default = {'k_eff': 0., 'R_sersic': 0., 'n_sersic': 0.5, - 'e1': -0.5, 'e2': -0.5, 'center_x': -100., - 'center_y': -100.} - upper_limit_default = {'k_eff': 100., 'R_sersic': 100., 'n_sersic': 8., - 'e1': 0.5, 'e2': 0.5, 'center_x': 100., - 'center_y': 100.} + """This class computes the lensing properties of an elliptical Sersic profile using + the Shajib (2019)'s Gauss decomposition method.""" + + param_names = ["k_eff", "R_sersic", "n_sersic", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "k_eff": 0.0, + "R_sersic": 0.0, + "n_sersic": 0.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100.0, + "center_y": -100.0, + } + upper_limit_default = { + "k_eff": 100.0, + "R_sersic": 100.0, + "n_sersic": 8.0, + "e1": 0.5, + "e2": 0.5, + "center_x": 100.0, + "center_y": 100.0, + } def get_kappa_1d(self, y, **kwargs): - r""" - Compute the spherical Sersic profile at y. + r"""Compute the spherical Sersic profile at y. :param y: y coordinate :type y: ``float`` @@ -445,17 +481,16 @@ def get_kappa_1d(self, y, **kwargs): :return: Sersic function at y :rtype: ``type(y)`` """ - n_sersic = kwargs['n_sersic'] - R_sersic = kwargs['R_sersic'] - k_eff = kwargs['k_eff'] + n_sersic = kwargs["n_sersic"] + R_sersic = kwargs["R_sersic"] + k_eff = kwargs["k_eff"] bn = SersicUtil.b_n(n_sersic) - return k_eff * np.exp(-bn * (y / R_sersic) ** (1. / n_sersic) + bn) + return k_eff * np.exp(-bn * (y / R_sersic) ** (1.0 / n_sersic) + bn) def get_scale(self, **kwargs): - """ - Identify the scale size from the keyword arguments. + """Identify the scale size from the keyword arguments. :param kwargs: Keyword arguments @@ -470,26 +505,43 @@ def get_scale(self, **kwargs): :return: Sersic radius :rtype: ``float`` """ - return kwargs['R_sersic'] + return kwargs["R_sersic"] @export class NFWEllipseGaussDec(GaussDecompositionAbstract): - """ - This class computes the lensing properties of an elliptical, projected NFW - profile using Shajib (2019)'s Gauss decomposition method. - """ - param_names = ['Rs', 'alpha_Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'e1': -0.5, 'e2': -0.5, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'e1': 0.5, 'e2': 0.5, - 'center_x': 100, 'center_y': 100} - - def __init__(self, n_sigma=15, sigma_start_mult=0.005, sigma_end_mult=50., - precision=10, use_scipy_wofz=True, min_ellipticity=1e-5): - """ - Set up settings for the Gaussian decomposition. For more details about - the decomposition parameters, see Shajib (2019). + """This class computes the lensing properties of an elliptical, projected NFW + profile using Shajib (2019)'s Gauss decomposition method.""" + + param_names = ["Rs", "alpha_Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + + def __init__( + self, + n_sigma=15, + sigma_start_mult=0.005, + sigma_end_mult=50.0, + precision=10, + use_scipy_wofz=True, + min_ellipticity=1e-5, + ): + """Set up settings for the Gaussian decomposition. For more details about the + decomposition parameters, see Shajib (2019). :param n_sigma: Number of Gaussian components :type n_sigma: ``int`` @@ -504,16 +556,17 @@ def __init__(self, n_sigma=15, sigma_start_mult=0.005, sigma_end_mult=50., :param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used. :type min_ellipticity: ``float`` """ - super(NFWEllipseGaussDec, self).__init__(n_sigma=n_sigma, - sigma_start_mult=sigma_start_mult, - sigma_end_mult=sigma_end_mult, - precision=precision, - use_scipy_wofz=use_scipy_wofz, - min_ellipticity=min_ellipticity) + super(NFWEllipseGaussDec, self).__init__( + n_sigma=n_sigma, + sigma_start_mult=sigma_start_mult, + sigma_end_mult=sigma_end_mult, + precision=precision, + use_scipy_wofz=use_scipy_wofz, + min_ellipticity=min_ellipticity, + ) def get_kappa_1d(self, y, **kwargs): - r""" - Compute the spherical projected NFW profile at y. + r"""Compute the spherical projected NFW profile at y. :param y: y coordinate :type y: ``float`` @@ -528,37 +581,34 @@ def get_kappa_1d(self, y, **kwargs): :return: projected NFW profile at y :rtype: ``type(y)`` """ - R_s = kwargs['Rs'] - alpha_Rs = kwargs['alpha_Rs'] + R_s = kwargs["Rs"] + alpha_Rs = kwargs["alpha_Rs"] kappa_s = alpha_Rs / (4 * R_s * (1 - 0.30102999566)) # log2 = 0.30102999566 - x = y/R_s + x = y / R_s f = np.empty(shape=x.shape, dtype=x.dtype) - range1 = (x > 1.) + range1 = x > 1.0 if np.any(range1): s = x[range1] - f[range1] = (1 - np.arccos(1 / s) / np.sqrt(s * s - 1)) / ( - s * s - 1) + f[range1] = (1 - np.arccos(1 / s) / np.sqrt(s * s - 1)) / (s * s - 1) - range2 = (x < 1.) + range2 = x < 1.0 if np.any(range2): s = x[range2] - f[range2] = (1 - np.arccosh(1 / s) / np.sqrt(1 - s * s)) / ( - s * s - 1) + f[range2] = (1 - np.arccosh(1 / s) / np.sqrt(1 - s * s)) / (s * s - 1) range3 = np.logical_and(np.logical_not(range1), np.logical_not(range2)) if np.any(range3): - f[range3] = 1. / 3. + f[range3] = 1.0 / 3.0 return 2 * kappa_s * f def get_scale(self, **kwargs): - """ - Identify the scale size from the keyword arguments. + """Identify the scale size from the keyword arguments. :param kwargs: Keyword arguments @@ -571,52 +621,67 @@ def get_scale(self, **kwargs): :return: NFW scale radius :rtype: ``float`` """ - return kwargs['Rs'] + return kwargs["Rs"] @export class GaussDecompositionAbstract3D(GaussDecompositionAbstract): - """ - This abstract class sets up a template for computing lensing properties of - a convergence from 3D spherical profile through Shajib (2019)'s Gauss - decomposition. - """ + """This abstract class sets up a template for computing lensing properties of a + convergence from 3D spherical profile through Shajib (2019)'s Gauss + decomposition.""" + def gauss_decompose(self, **kwargs): - r""" - Compute the amplitudes and sigmas of Gaussian components using the - integral transform with Gaussian kernel from Shajib (2019). The - returned values are in the convention of eq. (2.13). + r"""Compute the amplitudes and sigmas of Gaussian components using the integral + transform with Gaussian kernel from Shajib (2019). The returned values are in + the convention of eq. (2.13). :param kwargs: Keyword arguments to send to ``func`` :return: Amplitudes and standard deviations of the Gaussian components :rtype: tuple ``(numpy.array, numpy.array)`` """ - f_sigmas, sigmas = super(GaussDecompositionAbstract3D, - self).gauss_decompose(**kwargs) + f_sigmas, sigmas = super(GaussDecompositionAbstract3D, self).gauss_decompose( + **kwargs + ) return f_sigmas * sigmas * _SQRT_2PI, sigmas @export class CTNFWGaussDec(GaussDecompositionAbstract3D): - """ - This class computes the lensing properties of an projection from a - spherical cored-truncated NFW profile using Shajib (2019)'s Gauss - decomposition method. - """ - param_names = ['r_s', 'r_core', 'r_trunc', 'a', 'rho_s', 'center_x' - 'center_y'] - lower_limit_default = {'r_s': 0, 'r_core': 0, 'r_trunc': 0, 'a': 0., - 'rho_s': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'r_s': 100, 'r_core': 100, 'r_trunc': 100, 'a': 10., - 'rho_s': 1000, 'center_x': 100, 'center_y': 100} - - def __init__(self, n_sigma=15, sigma_start_mult=0.01, sigma_end_mult=20., - precision=10, use_scipy_wofz=True): - """ - Set up settings for the Gaussian decomposition. For more details about - the decomposition parameters, see Shajib (2019). + """This class computes the lensing properties of an projection from a spherical + cored-truncated NFW profile using Shajib (2019)'s Gauss decomposition method.""" + + param_names = ["r_s", "r_core", "r_trunc", "a", "rho_s", "center_x" "center_y"] + lower_limit_default = { + "r_s": 0, + "r_core": 0, + "r_trunc": 0, + "a": 0.0, + "rho_s": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "r_s": 100, + "r_core": 100, + "r_trunc": 100, + "a": 10.0, + "rho_s": 1000, + "center_x": 100, + "center_y": 100, + } + + def __init__( + self, + n_sigma=15, + sigma_start_mult=0.01, + sigma_end_mult=20.0, + precision=10, + use_scipy_wofz=True, + ): + """Set up settings for the Gaussian decomposition. For more details about the + decomposition parameters, see Shajib (2019). :param n_sigma: Number of Gaussian components :type n_sigma: ``int`` @@ -629,13 +694,16 @@ def __init__(self, n_sigma=15, sigma_start_mult=0.01, sigma_end_mult=20., :param use_scipy_wofz: To be passed to ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed. :type use_scipy_wofz: ``bool`` """ - super(CTNFWGaussDec, self).__init__(n_sigma=n_sigma, sigma_start_mult=sigma_start_mult, - sigma_end_mult=sigma_end_mult, precision=precision, - use_scipy_wofz=use_scipy_wofz) + super(CTNFWGaussDec, self).__init__( + n_sigma=n_sigma, + sigma_start_mult=sigma_start_mult, + sigma_end_mult=sigma_end_mult, + precision=precision, + use_scipy_wofz=use_scipy_wofz, + ) def get_kappa_1d(self, y, **kwargs): - r""" - Compute the spherical cored-truncated NFW profile at y. + r"""Compute the spherical cored-truncated NFW profile at y. :param y: y coordinate :type y: ``float`` @@ -656,23 +724,26 @@ def get_kappa_1d(self, y, **kwargs): :return: projected NFW profile at y :rtype: ``type(y)`` """ - r_s = kwargs['r_s'] - r_trunc = kwargs['r_trunc'] - r_core = kwargs['r_core'] - rho_s = kwargs['rho_s'] - a = kwargs['a'] + r_s = kwargs["r_s"] + r_trunc = kwargs["r_trunc"] + r_core = kwargs["r_core"] + rho_s = kwargs["rho_s"] + a = kwargs["a"] - beta = r_core/r_s - tau = r_trunc/r_s + beta = r_core / r_s + tau = r_trunc / r_s - x = y/r_s + x = y / r_s - return rho_s * (tau*tau / (tau*tau + x*x)) / (x**a + beta**a)**( - 1./a) / (1. + x)**2 + return ( + rho_s + * (tau * tau / (tau * tau + x * x)) + / (x**a + beta**a) ** (1.0 / a) + / (1.0 + x) ** 2 + ) def get_scale(self, **kwargs): - """ - Identify the scale size from the keyword arguments. + """Identify the scale size from the keyword arguments. :param kwargs: Keyword arguments @@ -691,4 +762,4 @@ def get_scale(self, **kwargs): :return: NFW scale radius :rtype: ``float`` """ - return kwargs['r_s'] + return kwargs["r_s"] diff --git a/lenstronomy/LensModel/Profiles/gaussian_ellipse_kappa.py b/lenstronomy/LensModel/Profiles/gaussian_ellipse_kappa.py index cef4c3b9f..779ec2339 100644 --- a/lenstronomy/LensModel/Profiles/gaussian_ellipse_kappa.py +++ b/lenstronomy/LensModel/Profiles/gaussian_ellipse_kappa.py @@ -1,11 +1,9 @@ # -*- coding: utf-8 -*- -""" -This module defines ``class GaussianEllipseKappa`` to compute the lensing -properties of an elliptical Gaussian profile with ellipticity in the -convergence using the formulae from Shajib (2019). -""" +"""This module defines ``class GaussianEllipseKappa`` to compute the lensing properties +of an elliptical Gaussian profile with ellipticity in the convergence using the formulae +from Shajib (2019).""" -__author__ = 'ajshajib' +__author__ = "ajshajib" import numpy as np from scipy.special import wofz @@ -15,26 +13,37 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['GaussianEllipseKappa'] +__all__ = ["GaussianEllipseKappa"] class GaussianEllipseKappa(LensProfileBase): - """ - This class contains functions to evaluate the derivative and hessian matrix - of the deflection potential for an elliptical Gaussian convergence. + """This class contains functions to evaluate the derivative and hessian matrix of + the deflection potential for an elliptical Gaussian convergence. The formulae are from Shajib (2019). """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5, - 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "sigma": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5): - """ - Setup which method to use the Faddeeva function and the - ellipticity limit for spherical approximation. + """Setup which method to use the Faddeeva function and the ellipticity limit for + spherical approximation. :param use_scipy_wofz: If ``True``, use ``scipy.special.wofz``. :type use_scipy_wofz: ``bool`` @@ -51,8 +60,7 @@ def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5): super(GaussianEllipseKappa, self).__init__() def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the potential function for elliptical Gaussian convergence. + """Compute the potential function for elliptical Gaussian convergence. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -76,8 +84,7 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): phi_g, q = param_util.ellipticity2phi_q(e1, e2) if q > 1 - self.min_ellipticity: - return self.spherical.function(x, y, amp, sigma, center_x, - center_y) + return self.spherical.function(x, y, amp, sigma, center_x, center_y) # adjusting amplitude to make the notation compatible with the # formulae given in Shajib (2019). @@ -94,8 +101,8 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): x_ = cos_phi * x_shift + sin_phi * y_shift y_ = -sin_phi * x_shift + cos_phi * y_shift - _b = 1. / 2. / sigma_ ** 2 - _p = np.sqrt(_b * q ** 2 / (1. - q ** 2)) + _b = 1.0 / 2.0 / sigma_**2 + _p = np.sqrt(_b * q**2 / (1.0 - q**2)) if isinstance(x_, int) or isinstance(x_, float): return self._num_integral(x_, y_, amp_, sigma_, _p, q) @@ -114,18 +121,30 @@ def _num_integral(self, x_, y_, amp_, sigma_, _p, q): :param q: :return: """ + def pot_real_line_integrand(_x): sig_func_re, sig_func_im = self.sigma_function(_p * _x, 0, q) - alpha_x_ = amp_*sigma_ * self.sgn(_x) * np.sqrt(2*np.pi / ( - 1. - q ** 2)) * sig_func_re + alpha_x_ = ( + amp_ + * sigma_ + * self.sgn(_x) + * np.sqrt(2 * np.pi / (1.0 - q**2)) + * sig_func_re + ) return alpha_x_ def pot_imag_line_integrand(_y): sig_func_re, sig_func_im = self.sigma_function(_p * x_, _p * _y, q) - alpha_y_ = -amp_*sigma_ * self.sgn(x_ + 1j*_y) * np.sqrt(2*np.pi / (1. - q ** 2)) * sig_func_im + alpha_y_ = ( + -amp_ + * sigma_ + * self.sgn(x_ + 1j * _y) + * np.sqrt(2 * np.pi / (1.0 - q**2)) + * sig_func_im + ) return alpha_y_ @@ -134,9 +153,8 @@ def pot_imag_line_integrand(_y): return pot_on_real_line + pot_on_imag_parallel def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the derivatives of function angles :math:`\\partial - f/\\partial x`, :math:`\\partial f/\\partial y` at :math:`x,\\ y`. + """Compute the derivatives of function angles :math:`\\partial f/\\partial x`, + :math:`\\partial f/\\partial y` at :math:`x,\\ y`. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -162,8 +180,7 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): phi_g, q = param_util.ellipticity2phi_q(e1, e2) if q > 1 - self.min_ellipticity: - return self.spherical.derivatives(x, y, amp, sigma, center_x, - center_y) + return self.spherical.derivatives(x, y, amp, sigma, center_x, center_y) # adjusting amplitude to make the notation compatible with the # formulae given in Shajib (2019). @@ -181,14 +198,24 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): x_ = cos_phi * x_shift + sin_phi * y_shift y_ = -sin_phi * x_shift + cos_phi * y_shift - _p = q / sigma_ / np.sqrt(2 * (1. - q**2)) + _p = q / sigma_ / np.sqrt(2 * (1.0 - q**2)) sig_func_re, sig_func_im = self.sigma_function(_p * x_, _p * y_, q) - alpha_x_ = amp_ * sigma_ * self.sgn(x_ + 1j*y_) * np.sqrt(2*np.pi/( - 1.-q**2)) * sig_func_re - alpha_y_ = - amp_ * sigma_ * self.sgn(x_ + 1j*y_) * np.sqrt( - 2 * np.pi / (1. - q ** 2)) * sig_func_im + alpha_x_ = ( + amp_ + * sigma_ + * self.sgn(x_ + 1j * y_) + * np.sqrt(2 * np.pi / (1.0 - q**2)) + * sig_func_re + ) + alpha_y_ = ( + -amp_ + * sigma_ + * self.sgn(x_ + 1j * y_) + * np.sqrt(2 * np.pi / (1.0 - q**2)) + * sig_func_im + ) # rotate back to the original frame f_x = alpha_x_ * cos_phi - alpha_y_ * sin_phi @@ -197,10 +224,8 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute Hessian matrix of function :math:`\\partial^2f/\\partial x^2`, - :math:`\\partial^2 f/\\partial y^2`, :math:`\\partial^2/\\partial - x\\partial y`. + """Compute Hessian matrix of function :math:`\\partial^2f/\\partial x^2`, + :math:`\\partial^2 f/\\partial y^2`, :math:`\\partial^2/\\partial x\\partial y`. :param x: x coordinate :type x: ``float`` or ``numpy.array`` @@ -243,14 +268,27 @@ def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): x_ = cos_phi * x_shift + sin_phi * y_shift y_ = -sin_phi * x_shift + cos_phi * y_shift - _p = q / sigma_ / np.sqrt(2 * (1. - q**2)) + _p = q / sigma_ / np.sqrt(2 * (1.0 - q**2)) sig_func_re, sig_func_im = self.sigma_function(_p * x_, _p * y_, q) kappa = amp_ * np.exp(-(q**2 * x_**2 + y_**2) / 2 / sigma_**2) - shear = - 1/(1-q*q) * ((1+q**2)*kappa - 2*q*amp_ + np.sqrt( - 2*np.pi) * q*q * amp_ * (x_ - 1j*y_) / sigma_ / np.sqrt(1-q*q) * ( - sig_func_re - 1j*sig_func_im)) + shear = ( + -1 + / (1 - q * q) + * ( + (1 + q**2) * kappa + - 2 * q * amp_ + + np.sqrt(2 * np.pi) + * q + * q + * amp_ + * (x_ - 1j * y_) + / sigma_ + / np.sqrt(1 - q * q) + * (sig_func_re - 1j * sig_func_im) + ) + ) # in rotated frame f_xx_ = kappa + shear.real @@ -258,16 +296,21 @@ def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): f_xy_ = shear.imag # rotate back to the original frame - f_xx = f_xx_ * cos_phi**2 + f_yy_ * sin_phi**2 - 2 * sin_phi * cos_phi * f_xy_ - f_yy = f_xx_ * sin_phi**2 + f_yy_ * cos_phi**2 + 2 * sin_phi * cos_phi * f_xy_ - f_xy = sin_phi * cos_phi * (f_xx_ - f_yy_) + (cos_phi**2 - sin_phi**2) * f_xy_ + f_xx = ( + f_xx_ * cos_phi**2 + f_yy_ * sin_phi**2 - 2 * sin_phi * cos_phi * f_xy_ + ) + f_yy = ( + f_xx_ * sin_phi**2 + f_yy_ * cos_phi**2 + 2 * sin_phi * cos_phi * f_xy_ + ) + f_xy = ( + sin_phi * cos_phi * (f_xx_ - f_yy_) + (cos_phi**2 - sin_phi**2) * f_xy_ + ) return f_xx, f_xy, f_xy, f_yy def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - Compute the density of elliptical Gaussian :math:`A/(2 \\pi - \\sigma^2) \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`. + """Compute the density of elliptical Gaussian :math:`A/(2 \\pi \\sigma^2) + \\exp(-(x^2+y^2/q^2)/2\\sigma^2)`. :param x: x coordinate. :type x: ``float`` or ``numpy.array`` @@ -288,21 +331,23 @@ def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): :return: Density :math:`\\kappa` for elliptical Gaussian convergence. :rtype: ``float``, or ``numpy.array`` with shape = ``x.shape``. """ - f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, amp, sigma, e1, e2, center_x, center_y) + f_xx, f_xy, f_yx, f_yy = self.hessian( + x, y, amp, sigma, e1, e2, center_x, center_y + ) return (f_xx + f_yy) / 2 @staticmethod def sgn(z): - """ - Compute the sign function :math:`\\mathrm{sgn}(z)` factor for - deflection as sugggested by Bray (1984). For current implementation, returning 1 is sufficient. + """Compute the sign function :math:`\\mathrm{sgn}(z)` factor for deflection as + sugggested by Bray (1984). For current implementation, returning 1 is + sufficient. :param z: Complex variable :math:`z = x + \\mathrm{i}y` :type z: ``complex`` :return: :math:`\\mathrm{sgn}(z)` :rtype: ``float`` """ - return 1. + return 1.0 # np.sqrt(z*z)/z #np.sign(z.real*z.imag) # return np.sign(z.real) # if z.real != 0: @@ -312,9 +357,8 @@ def sgn(z): # return np.where(z.real == 0, np.sign(z.real), np.sign(z.imag)) def sigma_function(self, x, y, q): - r""" - Compute the function :math:`\varsigma (z; q)` from equation (4.12) - of Shajib (2019). + r"""Compute the function :math:`\varsigma (z; q)` from equation (4.12) of Shajib + (2019). :param x: Real part of complex variable, :math:`x = \mathrm{Re}(z)` :type x: ``float`` or ``numpy.array`` @@ -337,14 +381,13 @@ def sigma_function(self, x, y, q): exp_factor = np.exp(-x * x * (1 - q * q) - y_ * y_ * (1 / q / q - 1)) sigma_func_real = w.imag - exp_factor * wq.imag - sigma_func_imag = (- w.real + exp_factor * wq.real) * y_sign + sigma_func_imag = (-w.real + exp_factor * wq.real) * y_sign return sigma_func_real, sigma_func_imag @staticmethod def w_f_approx(z): - """ - Compute the Faddeeva function :math:`w_{\\mathrm F}(z)` using the + """Compute the Faddeeva function :math:`w_{\\mathrm F}(z)` using the approximation given in Zaghloul (2017). :param z: complex number @@ -357,39 +400,39 @@ def w_f_approx(z): wz = np.empty_like(z) - z_imag2 = z.imag ** 2 - abs_z2 = z.real ** 2 + z_imag2 + z_imag2 = z.imag**2 + abs_z2 = z.real**2 + z_imag2 - reg1 = (abs_z2 >= 38000.) + reg1 = abs_z2 >= 38000.0 if np.any(reg1): wz[reg1] = i_sqrt_pi / z[reg1] - reg2 = (256. <= abs_z2) & (abs_z2 < 38000.) + reg2 = (256.0 <= abs_z2) & (abs_z2 < 38000.0) if np.any(reg2): t = z[reg2] wz[reg2] = i_sqrt_pi * t / (t * t - 0.5) - reg3 = (62. <= abs_z2) & (abs_z2 < 256.) + reg3 = (62.0 <= abs_z2) & (abs_z2 < 256.0) if np.any(reg3): t = z[reg3] wz[reg3] = (i_sqrt_pi / t) * (1 + 0.5 / (t * t - 1.5)) - reg4 = (30. <= abs_z2) & (abs_z2 < 62.) & (z_imag2 >= 1e-13) + reg4 = (30.0 <= abs_z2) & (abs_z2 < 62.0) & (z_imag2 >= 1e-13) if np.any(reg4): t = z[reg4] tt = t * t - wz[reg4] = (i_sqrt_pi * t) * (tt - 2.5) / (tt * (tt - 3.) + 0.75) + wz[reg4] = (i_sqrt_pi * t) * (tt - 2.5) / (tt * (tt - 3.0) + 0.75) - reg5 = (62. > abs_z2) & np.logical_not(reg4) & (abs_z2 > 2.5) & ( - z_imag2 < 0.072) + reg5 = ( + (62.0 > abs_z2) & np.logical_not(reg4) & (abs_z2 > 2.5) & (z_imag2 < 0.072) + ) if np.any(reg5): t = z[reg5] u = -t * t f1 = sqrt_pi f2 = 1 s1 = [1.320522, 35.7668, 219.031, 1540.787, 3321.99, 36183.31] - s2 = [1.841439, 61.57037, 364.2191, 2186.181, 9022.228, 24322.84, - 32066.6] + s2 = [1.841439, 61.57037, 364.2191, 2186.181, 9022.228, 24322.84, 32066.6] for s in s1: f1 = s - f1 * u @@ -400,14 +443,20 @@ def w_f_approx(z): reg6 = (30.0 > abs_z2) & np.logical_not(reg5) if np.any(reg6): - t3 = - 1j * z[reg6] + t3 = -1j * z[reg6] f1 = sqrt_pi f2 = 1 - s1 = [5.9126262, 30.180142, 93.15558, 181.92853, 214.38239, - 122.60793] - s2 = [10.479857, 53.992907, 170.35400, 348.70392, 457.33448, - 352.73063, 122.60793] + s1 = [5.9126262, 30.180142, 93.15558, 181.92853, 214.38239, 122.60793] + s2 = [ + 10.479857, + 53.992907, + 170.35400, + 348.70392, + 457.33448, + 352.73063, + 122.60793, + ] for s in s1: f1 = f1 * t3 + s diff --git a/lenstronomy/LensModel/Profiles/gaussian_ellipse_potential.py b/lenstronomy/LensModel/Profiles/gaussian_ellipse_potential.py index c8cc11c9a..8b42424ab 100644 --- a/lenstronomy/LensModel/Profiles/gaussian_ellipse_potential.py +++ b/lenstronomy/LensModel/Profiles/gaussian_ellipse_potential.py @@ -1,25 +1,38 @@ -__author__ = 'sibirrer' -#this file contains a class to make a gaussian +__author__ = "sibirrer" +# this file contains a class to make a gaussian import numpy as np from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['GaussianEllipsePotential'] +__all__ = ["GaussianEllipsePotential"] class GaussianEllipsePotential(LensProfileBase): - """ - this class contains functions to evaluate a Gaussian function and calculates its derivative and hessian matrix - with ellipticity in the convergence + """This class contains functions to evaluate a Gaussian function and calculates its + derivative and hessian matrix with ellipticity in the convergence. the calculation follows Glenn van de Ven et al. 2009 - """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "sigma": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.spherical = GaussianKappa() @@ -27,9 +40,7 @@ def __init__(self): super(GaussianEllipsePotential, self).__init__() def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - returns Gaussian - """ + """Returns Gaussian.""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x @@ -43,9 +54,7 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): return f_ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y @@ -63,13 +72,18 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - alpha_ra, alpha_dec = self.derivatives(x, y, amp, sigma, e1, e2, center_x, center_y) + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + alpha_ra, alpha_dec = self.derivatives( + x, y, amp, sigma, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, amp, sigma, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, amp, sigma, e1, e2, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, amp, sigma, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, amp, sigma, e1, e2, center_x, center_y + ) f_xx = (alpha_ra_dx - alpha_ra) / diff f_xy = (alpha_ra_dy - alpha_ra) / diff diff --git a/lenstronomy/LensModel/Profiles/gaussian_kappa.py b/lenstronomy/LensModel/Profiles/gaussian_kappa.py index 67f054ae4..37150cf16 100644 --- a/lenstronomy/LensModel/Profiles/gaussian_kappa.py +++ b/lenstronomy/LensModel/Profiles/gaussian_kappa.py @@ -1,5 +1,5 @@ -__author__ = 'sibirrer' -#this file contains a class to make a gaussian +__author__ = "sibirrer" +# this file contains a class to make a gaussian import numpy as np import scipy.special @@ -7,16 +7,16 @@ from lenstronomy.LensModel.Profiles.gaussian_potential import Gaussian from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['GaussianKappa'] +__all__ = ["GaussianKappa"] class GaussianKappa(LensProfileBase): - """ - this class contains functions to evaluate a Gaussian function and calculates its derivative and hessian matrix - """ - param_names = ['amp', 'sigma', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'center_x': 100, 'center_y': 100} + """This class contains functions to evaluate a Gaussian function and calculates its + derivative and hessian matrix.""" + + param_names = ["amp", "sigma", "center_x", "center_y"] + lower_limit_default = {"amp": 0, "sigma": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"amp": 100, "sigma": 100, "center_x": 100, "center_y": 100} def __init__(self): self.gaussian = Gaussian() @@ -24,14 +24,12 @@ def __init__(self): super(LensProfileBase, self).__init__() def function(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - returns Gaussian - """ + """Returns Gaussian.""" x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) sigma_x, sigma_y = sigma, sigma - c = 1. / (2 * sigma_x * sigma_y) + c = 1.0 / (2 * sigma_x * sigma_y) if isinstance(x_, int) or isinstance(x_, float): num_int = self._num_integral(r, c) else: @@ -41,24 +39,18 @@ def function(self, x, y, amp, sigma, center_x=0, center_y=0): num_int = np.array(num_int) amp_density = self._amp2d_to_3d(amp, sigma_x, sigma_y) amp2d = amp_density / (np.sqrt(np.pi) * np.sqrt(sigma_x * sigma_y * 2)) - amp2d *= 2 * 1. / (2 * c) + amp2d *= 2 * 1.0 / (2 * c) return num_int * amp2d @staticmethod def _num_integral(r, c): - """ - numerical integral (1-e^{-c*x^2})/x dx [0..r] - :param r: radius - :param c: 1/2sigma^2 - :return: - """ - out = integrate.quad(lambda x: (1-np.exp(-c*x**2))/x, 0, r) + """Numerical integral (1-e^{-c*x^2})/x dx [0..r] :param r: radius :param c: + 1/2sigma^2 :return:""" + out = integrate.quad(lambda x: (1 - np.exp(-c * x**2)) / x, 0, r) return out[0] def derivatives(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" x_ = x - center_x y_ = y - center_y R = np.sqrt(x_**2 + y_**2) @@ -71,9 +63,8 @@ def derivatives(self, x, y, amp, sigma, center_x=0, center_y=0): return alpha / R * x_, alpha / R * y_ def hessian(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) @@ -85,9 +76,9 @@ def hessian(self, x, y, amp, sigma, center_x=0, center_y=0): d_alpha_dr = -self.d_alpha_dr(r, amp, sigma_x, sigma_y) alpha = self.alpha_abs(r, amp, sigma) - f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r - f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r - f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r + f_xx = -(d_alpha_dr / r + alpha / r**2) * x_**2 / r + alpha / r + f_yy = -(d_alpha_dr / r + alpha / r**2) * y_**2 / r + alpha / r + f_xy = -(d_alpha_dr / r + alpha / r**2) * x_ * y_ / r return f_xx, f_xy, f_xy, f_yy def density(self, r, amp, sigma): @@ -126,8 +117,8 @@ def mass_2d(self, R, amp, sigma): """ sigma_x, sigma_y = sigma, sigma amp2d = amp / (np.sqrt(np.pi) * np.sqrt(sigma_x * sigma_y * 2)) - c = 1./(2 * sigma_x * sigma_y) - return amp2d * 2 * np.pi * 1./(2*c) * (1. - np.exp(-c * R**2)) + c = 1.0 / (2 * sigma_x * sigma_y) + return amp2d * 2 * np.pi * 1.0 / (2 * c) * (1.0 - np.exp(-c * R**2)) def mass_2d_lens(self, R, amp, sigma): """ @@ -142,9 +133,8 @@ def mass_2d_lens(self, R, amp, sigma): return self.mass_2d(R, amp_density, sigma) def alpha_abs(self, R, amp, sigma): - """ - absolute value of the deflection - :param R: + """Absolute value of the deflection :param R: + :param amp: :param sigma: :return: @@ -163,9 +153,11 @@ def d_alpha_dr(self, R, amp, sigma_x, sigma_y): :param sigma_y: :return: """ - c = 1. / (2 * sigma_x * sigma_y) - A = self._amp2d_to_3d(amp, sigma_x, sigma_y) * np.sqrt(2/np.pi*sigma_x*sigma_y) - return 1./R**2 * (-1 + (1 + 2*c*R**2) * np.exp(-c*R**2)) * A + c = 1.0 / (2 * sigma_x * sigma_y) + A = self._amp2d_to_3d(amp, sigma_x, sigma_y) * np.sqrt( + 2 / np.pi * sigma_x * sigma_y + ) + return 1.0 / R**2 * (-1 + (1 + 2 * c * R**2) * np.exp(-c * R**2)) * A def mass_3d(self, R, amp, sigma): """ @@ -177,9 +169,16 @@ def mass_3d(self, R, amp, sigma): """ sigma_x, sigma_y = sigma, sigma A = amp / (2 * np.pi * sigma_x * sigma_y) - c = 1. / (2 * sigma_x * sigma_y) - result = 1. / (2*c) * (-R * np.exp(-c*R**2) + scipy.special.erf(np.sqrt(c) * R) * np.sqrt(np.pi/(4 * c))) - return result*A * 4 * np.pi + c = 1.0 / (2 * sigma_x * sigma_y) + result = ( + 1.0 + / (2 * c) + * ( + -R * np.exp(-c * R**2) + + scipy.special.erf(np.sqrt(c) * R) * np.sqrt(np.pi / (4 * c)) + ) + ) + return result * A * 4 * np.pi def mass_3d_lens(self, R, amp, sigma): """ @@ -195,9 +194,8 @@ def mass_3d_lens(self, R, amp, sigma): @staticmethod def _amp3d_to_2d(amp, sigma_x, sigma_y): - """ - converts 3d density into 2d density parameter - :param amp: + """Converts 3d density into 2d density parameter :param amp: + :param sigma_x: :param sigma_y: :return: @@ -206,9 +204,8 @@ def _amp3d_to_2d(amp, sigma_x, sigma_y): @staticmethod def _amp2d_to_3d(amp, sigma_x, sigma_y): - """ - converts 3d density into 2d density parameter - :param amp: + """Converts 3d density into 2d density parameter :param amp: + :param sigma_x: :param sigma_y: :return: diff --git a/lenstronomy/LensModel/Profiles/gaussian_potential.py b/lenstronomy/LensModel/Profiles/gaussian_potential.py index 7ecc8b1d4..6e8986370 100644 --- a/lenstronomy/LensModel/Profiles/gaussian_potential.py +++ b/lenstronomy/LensModel/Profiles/gaussian_potential.py @@ -1,43 +1,38 @@ -__author__ = 'sibirrer' -#this file contains a class to make a gaussian +__author__ = "sibirrer" +# this file contains a class to make a gaussian import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Gaussian'] +__all__ = ["Gaussian"] class Gaussian(LensProfileBase): - """ - this class contains functions to evaluate a Gaussian function and calculates its derivative and hessian matrix - """ - param_names = ['amp', 'sigma_x', 'sigma_y', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'center_x': 100, 'center_y': 100} + """This class contains functions to evaluate a Gaussian function and calculates its + derivative and hessian matrix.""" + + param_names = ["amp", "sigma_x", "sigma_y", "center_x", "center_y"] + lower_limit_default = {"amp": 0, "sigma": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"amp": 100, "sigma": 100, "center_x": 100, "center_y": 100} def function(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0): - """ - returns Gaussian - """ - c = amp/(2*np.pi*sigma_x*sigma_y) + """Returns Gaussian.""" + c = amp / (2 * np.pi * sigma_x * sigma_y) delta_x = x - center_x delta_y = y - center_y - exponent = -((delta_x/sigma_x)**2+(delta_y/sigma_y)**2)/2. + exponent = -((delta_x / sigma_x) ** 2 + (delta_y / sigma_y) ** 2) / 2.0 return c * np.exp(exponent) def derivatives(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y) - return f_ * (center_x-x)/sigma_x**2, f_ * (center_y-y)/sigma_y**2 + return f_ * (center_x - x) / sigma_x**2, f_ * (center_y - y) / sigma_y**2 - def hessian(self, x, y, amp, sigma_x, sigma_y, center_x = 0, center_y = 0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + def hessian(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0): + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y) - f_xx = f_ * ( (-1./sigma_x**2) + (center_x-x)**2/sigma_x**4 ) - f_yy = f_ * ( (-1./sigma_y**2) + (center_y-y)**2/sigma_y**4 ) - f_xy = f_ * (center_x-x)/sigma_x**2 * (center_y-y)/sigma_y**2 + f_xx = f_ * ((-1.0 / sigma_x**2) + (center_x - x) ** 2 / sigma_x**4) + f_yy = f_ * ((-1.0 / sigma_y**2) + (center_y - y) ** 2 / sigma_y**4) + f_xy = f_ * (center_x - x) / sigma_x**2 * (center_y - y) / sigma_y**2 return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/general_nfw.py b/lenstronomy/LensModel/Profiles/general_nfw.py index 5006d58d8..a7abb960f 100644 --- a/lenstronomy/LensModel/Profiles/general_nfw.py +++ b/lenstronomy/LensModel/Profiles/general_nfw.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" # this file contains a class to compute the Navaro-Frenk-White profile import numpy as np @@ -6,12 +6,12 @@ from scipy.special import beta from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['GNFW'] +__all__ = ["GNFW"] class GNFW(LensProfileBase): - """ - This class contains a double power law profile with flexible inner and outer logarithmic slopes g and n + """This class contains a double power law profile with flexible inner and outer + logarithmic slopes g and n. .. math:: \\rho(r) = \\frac{\\rho_0}{r^{\\gamma}} \\frac{Rs^{n}}{\\left(r^2 + Rs^2 \\right)^{(n - \\gamma)/2}} @@ -23,14 +23,37 @@ class GNFW(LensProfileBase): TODO: implement the gravitational potential for this profile """ - profile_name = 'GNFW' - param_names = ['Rs', 'alpha_Rs', 'center_x', 'center_y', 'gamma_inner', 'gamma_outer'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'center_x': -100, 'center_y': -100, 'gamma_inner': 0.1, 'gamma_outer': 1.0} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'center_x': 100, 'center_y': 100, 'gamma_inner': 2.9, 'gamma_outer': 10.0} - def derivatives(self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function which are the deflection angles + profile_name = "GNFW" + param_names = [ + "Rs", + "alpha_Rs", + "center_x", + "center_y", + "gamma_inner", + "gamma_outer", + ] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "center_x": -100, + "center_y": -100, + "gamma_inner": 0.1, + "gamma_outer": 1.0, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "center_x": 100, + "center_y": 100, + "gamma_inner": 2.9, + "gamma_outer": 10.0, + } + + def derivatives( + self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, center_y=0 + ): + """Returns df/dx and df/dy of the function which are the deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -46,11 +69,13 @@ def derivatives(self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, Rs = np.maximum(Rs, 0.00000001) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) f_x, f_y = self.nfwAlpha(R, Rs, rho0_input, gamma_inner, gamma_outer, x_, y_) return f_x, f_y - def hessian(self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, center_y=0): + def hessian( + self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, center_y=0 + ): """ :param x: angular position (normally in units of arc seconds) @@ -66,10 +91,12 @@ def hessian(self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, cent rho0_input = self.alpha2rho0(alpha_Rs, Rs, gamma_inner, gamma_outer) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, 0.00000001) kappa = self.density_2d(R, 0, Rs, rho0_input, gamma_inner, gamma_outer) - gamma1, gamma2 = self.nfwGamma(R, Rs, rho0_input, gamma_inner, gamma_outer, x_, y_) + gamma1, gamma2 = self.nfwGamma( + R, Rs, rho0_input, gamma_inner, gamma_outer, x_, y_ + ) f_xx = kappa + gamma1 f_yy = kappa - gamma1 f_xy = gamma2 @@ -77,8 +104,7 @@ def hessian(self, x, y, Rs, alpha_Rs, gamma_inner, gamma_outer, center_x=0, cent @staticmethod def density(R, Rs, rho0, gamma_inner, gamma_outer): - """ - three dimensional NFW profile + """Three dimensional NFW profile. :param R: radius of interest :type Rs: scale radius @@ -87,14 +113,14 @@ def density(R, Rs, rho0, gamma_inner, gamma_outer): :param gamma_outer: logarithmic profile slope outside Rs :return: rho(R) density """ - x = R/Rs - outer_slope = (gamma_outer-gamma_inner)/2 - return rho0 / (x**gamma_inner * (1 + x ** 2) ** outer_slope) + x = R / Rs + outer_slope = (gamma_outer - gamma_inner) / 2 + return rho0 / (x**gamma_inner * (1 + x**2) ** outer_slope) def density_lens(self, r, Rs, alpha_Rs, gamma_inner, gamma_outer): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radios :param Rs: scale radius @@ -106,9 +132,10 @@ def density_lens(self, r, Rs, alpha_Rs, gamma_inner, gamma_outer): rho0 = self.alpha2rho0(alpha_Rs, Rs, gamma_inner, gamma_outer) return self.density(r, Rs, rho0, gamma_inner, gamma_outer) - def density_2d(self, x, y, Rs, rho0, gamma_inner, gamma_outer, center_x=0, center_y=0): - """ - projected two dimensional profile + def density_2d( + self, x, y, Rs, rho0, gamma_inner, gamma_outer, center_x=0, center_y=0 + ): + """Projected two dimensional profile. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -122,15 +149,14 @@ def density_2d(self, x, y, Rs, rho0, gamma_inner, gamma_outer, center_x=0, cente """ x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) x = R / Rs Fx = self._f(x, gamma_inner, gamma_outer) return 2 * rho0 * Rs * Fx @staticmethod def mass_3d(r, Rs, rho0, gamma_inner, gamma_outer): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: 3d radius :param Rs: scale radius @@ -140,16 +166,22 @@ def mass_3d(r, Rs, rho0, gamma_inner, gamma_outer): :return: M(0 @@ -233,14 +263,14 @@ def _f(X, g, n): """ if n == 3: n = 3.001 # for numerical stability - hyp2f1_term = hyp2f1((n-1)/2, g/2, n/2, 1/(1+X**2)) - beta_term = beta((n-1)/2, 0.5) - return 0.5 * beta_term * hyp2f1_term * (1+X**2) ** ((1-n)/2) + hyp2f1_term = hyp2f1((n - 1) / 2, g / 2, n / 2, 1 / (1 + X**2)) + beta_term = beta((n - 1) / 2, 0.5) + return 0.5 * beta_term * hyp2f1_term * (1 + X**2) ** ((1 - n) / 2) @staticmethod def _g(X, g, n): - """ - analytic solution of integral for NFW profile to compute deflection angel and gamma + """Analytic solution of integral for NFW profile to compute deflection angel and + gamma. :param X: R/Rs :type X: float >0 @@ -252,14 +282,12 @@ def _g(X, g, n): n = 3.001 # for numerical stability xi = 1 + X**2 hyp2f1_term = hyp2f1((n - 3) / 2, g / 2, n / 2, 1 / xi) - beta_term_1 = beta((n - 3) / 2, (3-g)/2) - beta_term_2 = beta((n-3)/2, 1.5) - return 0.5 * (beta_term_1 - beta_term_2 * hyp2f1_term * xi ** ((3-n)/2)) + beta_term_1 = beta((n - 3) / 2, (3 - g) / 2) + beta_term_2 = beta((n - 3) / 2, 1.5) + return 0.5 * (beta_term_1 - beta_term_2 * hyp2f1_term * xi ** ((3 - n) / 2)) def alpha2rho0(self, alpha_Rs, Rs, gamma_inner, gamma_outer): - - """ - convert angle at Rs into rho0 + """Convert angle at Rs into rho0. :param alpha_Rs: deflection angle at RS :param Rs: scale radius @@ -269,13 +297,11 @@ def alpha2rho0(self, alpha_Rs, Rs, gamma_inner, gamma_outer): """ gx = self._g(1.0, gamma_inner, gamma_outer) - rho0 = alpha_Rs / (4. * Rs ** 2 * gx / 1.0 ** 2) + rho0 = alpha_Rs / (4.0 * Rs**2 * gx / 1.0**2) return rho0 def rho02alpha(self, rho0, Rs, gamma_inner, gamma_outer): - - """ - convert rho0 to angle at Rs + """Convert rho0 to angle at Rs. :param rho0: density normalization (characteristic density) :param Rs: scale radius @@ -284,5 +310,5 @@ def rho02alpha(self, rho0, Rs, gamma_inner, gamma_outer): :return: deflection angle at RS """ gx = self._g(1.0, gamma_inner, gamma_outer) - alpha_Rs = rho0 * (4. * Rs ** 2 * gx / 1.0 ** 2) + alpha_Rs = rho0 * (4.0 * Rs**2 * gx / 1.0**2) return alpha_Rs diff --git a/lenstronomy/LensModel/Profiles/hernquist.py b/lenstronomy/LensModel/Profiles/hernquist.py index 3354c0802..0054e1230 100644 --- a/lenstronomy/LensModel/Profiles/hernquist.py +++ b/lenstronomy/LensModel/Profiles/hernquist.py @@ -1,13 +1,12 @@ import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Hernquist'] +__all__ = ["Hernquist"] class Hernquist(LensProfileBase): - """ - class to compute the Hernquist 1990 model, which is in 3d: - rho(r) = rho0 / (r/Rs * (1 + (r/Rs))**3) + """Class to compute the Hernquist 1990 model, which is in 3d: rho(r) = rho0 / (r/Rs + * (1 + (r/Rs))**3) in lensing terms, the normalization parameter 'sigma0' is defined such that the deflection at projected RS leads to alpha = 2./3 * Rs * sigma0 @@ -34,32 +33,29 @@ class to compute the Hernquist 1990 model, which is in 3d: >>> from lenstronomy.LensModel.Profiles.hernquist import Hernquist >>> hernquist = Hernquist() >>> alpha_x, alpha_y = hernquist.derivatives(x=1, y=1, Rs=rs_angle, sigma0=sigma0, center_x=0, center_y=0) - - """ + _diff = 0.00001 _s = 0.00001 - param_names = ['sigma0', 'Rs', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': 0, 'Rs': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 100, 'Rs': 100, 'center_x': 100, 'center_y': 100} + param_names = ["sigma0", "Rs", "center_x", "center_y"] + lower_limit_default = {"sigma0": 0, "Rs": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"sigma0": 100, "Rs": 100, "center_x": 100, "center_y": 100} @staticmethod def density(r, rho0, Rs): - """ - computes the 3-d density + """Computes the 3-d density. :param r: 3-d radius :param rho0: density normalization :param Rs: Hernquist radius :return: density at radius r """ - rho = rho0 / (r/Rs * (1 + (r/Rs))**3) + rho = rho0 / (r / Rs * (1 + (r / Rs)) ** 3) return rho def density_lens(self, r, sigma0, Rs): - """ - Density as a function of 3d radius in lensing parameters - This function converts the lensing definition sigma0 into the 3d density + """Density as a function of 3d radius in lensing parameters This function + converts the lensing definition sigma0 into the 3d density. :param r: 3d radius :param sigma0: rho0 * Rs (units of projected density) @@ -70,8 +66,7 @@ def density_lens(self, r, sigma0, Rs): return self.density(r, rho0, Rs) def density_2d(self, x, y, rho0, Rs, center_x=0, center_y=0): - """ - projected density along the line of sight at coordinate (x, y) + """Projected density along the line of sight at coordinate (x, y) :param x: x-coordinate :param y: y-coordinate @@ -84,33 +79,32 @@ def density_2d(self, x, y, rho0, Rs, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) - X = r/Rs + X = r / Rs sigma0 = self.rho2sigma(rho0, Rs) if isinstance(X, int) or isinstance(X, float): if X == 1: X = 1.000001 else: X[X == 1] = 1.000001 - sigma = sigma0 / (X**2-1)**2 * (-3 + (2+X**2)*self._F(X)) + sigma = sigma0 / (X**2 - 1) ** 2 * (-3 + (2 + X**2) * self._F(X)) return sigma @staticmethod def mass_3d(r, rho0, Rs): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. - :param r: 3-d radius within the mass is integrated (same distance units as density definition) + :param r: 3-d radius within the mass is integrated (same distance units as + density definition) :param rho0: density normalization :param Rs: Hernquist radius :return: enclosed mass """ - mass_3d = 2*np.pi*Rs**3*rho0 * r**2/(r + Rs)**2 + mass_3d = 2 * np.pi * Rs**3 * rho0 * r**2 / (r + Rs) ** 2 return mass_3d def mass_3d_lens(self, r, sigma0, Rs): - """ - mass enclosed a 3d sphere or radius r for lens parameterisation - This function converts the lensing definition sigma0 into the 3d density + """Mass enclosed a 3d sphere or radius r for lens parameterisation This function + converts the lensing definition sigma0 into the 3d density. :param r: radius :param sigma0: rho0 * Rs (units of projected density) @@ -121,8 +115,7 @@ def mass_3d_lens(self, r, sigma0, Rs): return self.mass_3d(r, rho0, Rs) def mass_2d(self, r, rho0, Rs): - """ - mass enclosed projected 2d sphere of radius r + """Mass enclosed projected 2d sphere of radius r. :param r: projected radius :param rho0: density normalization @@ -134,38 +127,33 @@ def mass_2d(self, r, rho0, Rs): return self.mass_2d_lens(r, sigma0, Rs) def mass_2d_lens(self, r, sigma0, Rs): - """ - mass enclosed projected 2d sphere of radius r - Same as mass_2d but with input normalization in units of projected density + """Mass enclosed projected 2d sphere of radius r Same as mass_2d but with input + normalization in units of projected density. + :param r: projected radius :param sigma0: rho0 * Rs (units of projected density) :param Rs: Hernquist radius :return: mass enclosed 2d projected radius """ - X = r/Rs - alpha_r = 2*sigma0 * Rs * X * (1-self._F(X)) / (X**2-1) + X = r / Rs + alpha_r = 2 * sigma0 * Rs * X * (1 - self._F(X)) / (X**2 - 1) mass_2d = alpha_r * r * np.pi return mass_2d @staticmethod def mass_tot(rho0, Rs): - """ - total mass within the profile - :param rho0: density normalization - :param Rs: Hernquist radius - :return: total mass within profile - """ - m_tot = 2*np.pi*rho0*Rs**3 + """Total mass within the profile :param rho0: density normalization :param Rs: + Hernquist radius :return: total mass within profile.""" + m_tot = 2 * np.pi * rho0 * Rs**3 return m_tot def function(self, x, y, sigma0, Rs, center_x=0, center_y=0): - """ - lensing potential + """Lensing potential. :param x: x-coordinate position (units of angle) :param y: y-coordinate position (units of angle) - :param sigma0: normalization parameter defined such that the deflection at projected RS leads to - alpha = 2./3 * Rs * sigma0 + :param sigma0: normalization parameter defined such that the deflection at + projected RS leads to alpha = 2./3 * Rs * sigma0 :param Rs: Hernquist radius in units of angle :param center_x: x-center of the profile (units of angle) :param center_y: y-center of the profile (units of angle) @@ -176,7 +164,7 @@ def function(self, x, y, sigma0, Rs, center_x=0, center_y=0): r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) X = r / Rs - f_ = sigma0 * Rs ** 2 * (np.log(X ** 2 / 4.) + 2 * self._F(X)) + f_ = sigma0 * Rs**2 * (np.log(X**2 / 4.0) + 2 * self._F(X)) return f_ def derivatives(self, x, y, sigma0, Rs, center_x=0, center_y=0): @@ -195,42 +183,49 @@ def derivatives(self, x, y, sigma0, Rs, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) - X = r/Rs + X = r / Rs if isinstance(r, int) or isinstance(r, float): # f = (1 - self._F(X)) / (X ** 2 - 1) # this expression is 1/3 for X=1 if X == 1: - f = 1./3 + f = 1.0 / 3 else: - f = (1 - self._F(X)) / (X ** 2 - 1) + f = (1 - self._F(X)) / (X**2 - 1) else: f = np.empty_like(X) - f[X == 1] = 1./3 + f[X == 1] = 1.0 / 3 X_ = X[X != 1] - f[X != 1] = (1 - self._F(X_)) / (X_ ** 2 - 1) + f[X != 1] = (1 - self._F(X_)) / (X_**2 - 1) alpha_r = 2 * sigma0 * Rs * f * X - f_x = alpha_r * x_/r - f_y = alpha_r * y_/r + f_x = alpha_r * x_ / r + f_y = alpha_r * y_ / r return f_x, f_y def hessian(self, x, y, sigma0, Rs, center_x=0, center_y=0): - """ - Hessian terms of the function + """Hessian terms of the function. :param x: x-coordinate position (units of angle) :param y: y-coordinate position (units of angle) - :param sigma0: normalization parameter defined such that the deflection at projected RS leads to - alpha = 2./3 * Rs * sigma0 + :param sigma0: normalization parameter defined such that the deflection at + projected RS leads to alpha = 2./3 * Rs * sigma0 :param Rs: Hernquist radius in units of angle :param center_x: x-center of the profile (units of angle) :param center_y: y-center of the profile (units of angle) :return: df/dxdx, df/dxdy, df/dydx, df/dydy """ diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff / 2, y, sigma0, Rs, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff / 2, sigma0, Rs, center_x, center_y) - - alpha_ra_dx_, alpha_dec_dx_ = self.derivatives(x - diff / 2, y, sigma0, Rs, center_x, center_y) - alpha_ra_dy_, alpha_dec_dy_ = self.derivatives(x, y - diff / 2, sigma0, Rs, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff / 2, y, sigma0, Rs, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff / 2, sigma0, Rs, center_x, center_y + ) + + alpha_ra_dx_, alpha_dec_dx_ = self.derivatives( + x - diff / 2, y, sigma0, Rs, center_x, center_y + ) + alpha_ra_dy_, alpha_dec_dy_ = self.derivatives( + x, y - diff / 2, sigma0, Rs, center_x, center_y + ) f_xx = (alpha_ra_dx - alpha_ra_dx_) / diff f_xy = (alpha_ra_dy - alpha_ra_dy_) / diff @@ -240,22 +235,17 @@ def hessian(self, x, y, sigma0, Rs, center_x=0, center_y=0): @staticmethod def rho2sigma(rho0, Rs): - """ - converts 3d density into 2d projected density parameter - :param rho0: 3d density normalization of Hernquist model - :param Rs: Hernquist radius - :return: sigma0 defined quantity in projected units - """ + """Converts 3d density into 2d projected density parameter :param rho0: 3d + density normalization of Hernquist model :param Rs: Hernquist radius :return: + sigma0 defined quantity in projected units.""" return rho0 * Rs @staticmethod def sigma2rho(sigma0, Rs): - """ - converts projected density parameter (in units of deflection) into 3d density parameter - :param sigma0: density defined quantity in projected units - :param Rs: Hernquist radius - :return: rho0 the 3d density normalization of Hernquist model - """ + """Converts projected density parameter (in units of deflection) into 3d density + parameter :param sigma0: density defined quantity in projected units :param Rs: + Hernquist radius :return: rho0 the 3d density normalization of Hernquist + model.""" return sigma0 / Rs def _F(self, X): @@ -268,31 +258,30 @@ def _F(self, X): if isinstance(X, int) or isinstance(X, float): X = max(X, c) if 0 < X < 1: - a = 1. / np.sqrt(1 - X ** 2) * np.arctanh(np.sqrt(1 - X**2)) + a = 1.0 / np.sqrt(1 - X**2) * np.arctanh(np.sqrt(1 - X**2)) elif X == 1: - a = 1. + a = 1.0 elif X > 1: - a = 1. / np.sqrt(X ** 2 - 1) * np.arctan(np.sqrt(X**2 - 1)) + a = 1.0 / np.sqrt(X**2 - 1) * np.arctan(np.sqrt(X**2 - 1)) else: # X == 0: - a = 1. / np.sqrt(1 - c ** 2) * np.arctanh(np.sqrt((1 - c ** 2))) + a = 1.0 / np.sqrt(1 - c**2) * np.arctanh(np.sqrt((1 - c**2))) else: a = np.empty_like(X) X[X < c] = c x = X[X < 1] - a[X < 1] = 1 / np.sqrt(1 - x ** 2) * np.arctanh(np.sqrt((1 - x**2))) + a[X < 1] = 1 / np.sqrt(1 - x**2) * np.arctanh(np.sqrt((1 - x**2))) # x = X[X == 1] - a[X == 1] = 1. + a[X == 1] = 1.0 x = X[X > 1] - a[X > 1] = 1 / np.sqrt(x ** 2 - 1) * np.arctan(np.sqrt(x**2 - 1)) + a[X > 1] = 1 / np.sqrt(x**2 - 1) * np.arctan(np.sqrt(x**2 - 1)) # a[X>y] = 0 return a def grav_pot(self, x, y, rho0, Rs, center_x=0, center_y=0): - """ - #TODO decide whether these functions are needed or not + """#TODO decide whether these functions are needed or not gravitational potential (modulo 4 pi G and rho0 in appropriate units) :param x: x-coordinate position (units of angle) diff --git a/lenstronomy/LensModel/Profiles/hernquist_ellipse.py b/lenstronomy/LensModel/Profiles/hernquist_ellipse.py index 479ca4d14..29ed9bdb0 100644 --- a/lenstronomy/LensModel/Profiles/hernquist_ellipse.py +++ b/lenstronomy/LensModel/Profiles/hernquist_ellipse.py @@ -3,18 +3,32 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np -__all__ = ['Hernquist_Ellipse'] +__all__ = ["Hernquist_Ellipse"] class Hernquist_Ellipse(LensProfileBase): - """ - this class contains functions for the elliptical Hernquist profile. Ellipticity is defined in the potential. - + """This class contains functions for the elliptical Hernquist profile. + Ellipticity is defined in the potential. """ - param_names = ['sigma0', 'Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': 0, 'Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 100, 'Rs': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["sigma0", "Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "sigma0": 0, + "Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 100, + "Rs": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.spherical = Hernquist() @@ -22,18 +36,18 @@ def __init__(self): super(Hernquist_Ellipse, self).__init__() def function(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns double integral of NFW profile - """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + """Returns double integral of NFW profile.""" + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) f_ = self.spherical.function(x_, y_, sigma0, Rs) return f_ def derivatives(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW) - """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + """Returns df/dx and df/dy of the function (integral of NFW)""" + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) phi_G, q = param_util.ellipticity2phi_q(e1, e2) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) @@ -42,28 +56,32 @@ def derivatives(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): f_x_prim, f_y_prim = self.spherical.derivatives(x_, y_, sigma0, Rs) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - alpha_ra, alpha_dec = self.derivatives(x, y, sigma0, Rs, e1, e2, center_x, center_y) + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + alpha_ra, alpha_dec = self.derivatives( + x, y, sigma0, Rs, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, sigma0, Rs, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, sigma0, Rs, e1, e2, center_x, center_y) - - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, sigma0, Rs, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, sigma0, Rs, e1, e2, center_x, center_y + ) + + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy def density(self, r, rho0, Rs, e1=0, e2=0): - """ - computes the 3-d density + """Computes the 3-d density. :param r: 3-d radius :param rho0: density normalization @@ -73,9 +91,8 @@ def density(self, r, rho0, Rs, e1=0, e2=0): return self.spherical.density(r, rho0, Rs) def density_lens(self, r, sigma0, Rs, e1=0, e2=0): - """ - Density as a function of 3d radius in lensing parameters - This function converts the lensing definition sigma0 into the 3d density + """Density as a function of 3d radius in lensing parameters This function + converts the lensing definition sigma0 into the 3d density. :param r: 3d radius :param sigma0: rho0 * Rs (units of projected density) @@ -85,8 +102,7 @@ def density_lens(self, r, sigma0, Rs, e1=0, e2=0): return self.spherical.density_lens(r, sigma0, Rs) def density_2d(self, x, y, rho0, Rs, e1=0, e2=0, center_x=0, center_y=0): - """ - projected density along the line of sight at coordinate (x, y) + """Projected density along the line of sight at coordinate (x, y) :param x: x-coordinate :param y: y-coordinate @@ -99,9 +115,9 @@ def density_2d(self, x, y, rho0, Rs, e1=0, e2=0, center_x=0, center_y=0): return self.spherical.density_2d(x, y, rho0, Rs, center_x, center_y) def mass_2d_lens(self, r, sigma0, Rs, e1=0, e2=0): - """ - mass enclosed projected 2d sphere of radius r - Same as mass_2d but with input normalization in units of projected density + """Mass enclosed projected 2d sphere of radius r Same as mass_2d but with input + normalization in units of projected density. + :param r: projected radius :param sigma0: rho0 * Rs (units of projected density) :param Rs: Hernquist radius @@ -110,8 +126,7 @@ def mass_2d_lens(self, r, sigma0, Rs, e1=0, e2=0): return self.spherical.mass_2d_lens(r, sigma0, Rs) def mass_2d(self, r, rho0, Rs, e1=0, e2=0): - """ - mass enclosed projected 2d sphere of radius r + """Mass enclosed projected 2d sphere of radius r. :param r: projected radius :param rho0: density normalization @@ -121,10 +136,10 @@ def mass_2d(self, r, rho0, Rs, e1=0, e2=0): return self.spherical.mass_2d(r, rho0, Rs) def mass_3d(self, r, rho0, Rs, e1=0, e2=0): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. - :param r: 3-d radius within the mass is integrated (same distance units as density definition) + :param r: 3-d radius within the mass is integrated (same distance units as + density definition) :param rho0: density normalization :param Rs: Hernquist radius :return: enclosed mass @@ -132,10 +147,10 @@ def mass_3d(self, r, rho0, Rs, e1=0, e2=0): return self.spherical.mass_3d(r, rho0, Rs) def mass_3d_lens(self, r, sigma0, Rs, e1=0, e2=0): - """ - mass enclosed a 3d sphere or radius r in lensing parameterization + """Mass enclosed a 3d sphere or radius r in lensing parameterization. - :param r: 3-d radius within the mass is integrated (same distance units as density definition) + :param r: 3-d radius within the mass is integrated (same distance units as + density definition) :param sigma0: rho0 * Rs (units of projected density) :param Rs: Hernquist radius :return: enclosed mass diff --git a/lenstronomy/LensModel/Profiles/hernquist_ellipse_cse.py b/lenstronomy/LensModel/Profiles/hernquist_ellipse_cse.py index 74060d881..306e16e39 100644 --- a/lenstronomy/LensModel/Profiles/hernquist_ellipse_cse.py +++ b/lenstronomy/LensModel/Profiles/hernquist_ellipse_cse.py @@ -4,47 +4,127 @@ from lenstronomy.LensModel.Profiles.cored_steep_ellipsoid import CSEMajorAxisSet import numpy as np -__all__ = ['HernquistEllipseCSE'] +__all__ = ["HernquistEllipseCSE"] class HernquistEllipseCSE(Hernquist_Ellipse): - """ - this class contains functions for the elliptical Hernquist profile. Ellipticity is defined in the convergence. - Approximation with CSE profile introduced by Oguri 2021: https://arxiv.org/pdf/2106.11464.pdf - + """This class contains functions for the elliptical Hernquist profile. + Ellipticity is defined in the convergence. + Approximation with CSE profile introduced by Oguri 2021: https://arxiv.org/pdf/2106.11464.pdf """ - param_names = ['sigma0', 'Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': 0, 'Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 100, 'Rs': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["sigma0", "Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "sigma0": 0, + "Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 100, + "Rs": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.cse_major_axis_set = CSEMajorAxisSet() # Table 2 in Oguri 2021 - self._a_list = [9.200445e-18, 2.184724e-16, 3.548079e-15, 2.823716e-14, 1.091876e-13, - 6.998697e-13, 3.142264e-12, 1.457280e-11, 4.472783e-11, 2.042079e-10, - 8.708137e-10, 2.423649e-09, 7.353440e-09, 5.470738e-08, 2.445878e-07, - 4.541672e-07, 3.227611e-06, 1.110690e-05, 3.725101e-05, 1.056271e-04, - 6.531501e-04, 2.121330e-03, 8.285518e-03, 4.084190e-02, 5.760942e-02, - 1.788945e-01, 2.092774e-01, 3.697750e-01, 3.440555e-01, 5.792737e-01, - 2.325935e-01, 5.227961e-01, 3.079968e-01, 1.633456e-01, 7.410900e-02, - 3.123329e-02, 1.292488e-02, 2.156527e+00, 1.652553e-02, 2.314934e-02, - 3.992313e-01] - self._s_list = [1.199110e-06, 3.751762e-06, 9.927207e-06, 2.206076e-05, 3.781528e-05, - 6.659808e-05, 1.154366e-04, 1.924150e-04, 3.040440e-04, 4.683051e-04, - 7.745084e-04, 1.175953e-03, 1.675459e-03, 2.801948e-03, 9.712807e-03, - 5.469589e-03, 1.104654e-02, 1.893893e-02, 2.792864e-02, 4.152834e-02, - 6.640398e-02, 1.107083e-01, 1.648028e-01, 2.839601e-01, 4.129439e-01, - 8.239115e-01, 6.031726e-01, 1.145604e+00, 1.401895e+00, 2.512223e+00, - 2.038025e+00, 4.644014e+00, 9.301590e+00, 2.039273e+01, 4.896534e+01, - 1.252311e+02, 3.576766e+02, 2.579464e+04, 2.944679e+04, 2.834717e+03, - 5.931328e+04] + self._a_list = [ + 9.200445e-18, + 2.184724e-16, + 3.548079e-15, + 2.823716e-14, + 1.091876e-13, + 6.998697e-13, + 3.142264e-12, + 1.457280e-11, + 4.472783e-11, + 2.042079e-10, + 8.708137e-10, + 2.423649e-09, + 7.353440e-09, + 5.470738e-08, + 2.445878e-07, + 4.541672e-07, + 3.227611e-06, + 1.110690e-05, + 3.725101e-05, + 1.056271e-04, + 6.531501e-04, + 2.121330e-03, + 8.285518e-03, + 4.084190e-02, + 5.760942e-02, + 1.788945e-01, + 2.092774e-01, + 3.697750e-01, + 3.440555e-01, + 5.792737e-01, + 2.325935e-01, + 5.227961e-01, + 3.079968e-01, + 1.633456e-01, + 7.410900e-02, + 3.123329e-02, + 1.292488e-02, + 2.156527e00, + 1.652553e-02, + 2.314934e-02, + 3.992313e-01, + ] + self._s_list = [ + 1.199110e-06, + 3.751762e-06, + 9.927207e-06, + 2.206076e-05, + 3.781528e-05, + 6.659808e-05, + 1.154366e-04, + 1.924150e-04, + 3.040440e-04, + 4.683051e-04, + 7.745084e-04, + 1.175953e-03, + 1.675459e-03, + 2.801948e-03, + 9.712807e-03, + 5.469589e-03, + 1.104654e-02, + 1.893893e-02, + 2.792864e-02, + 4.152834e-02, + 6.640398e-02, + 1.107083e-01, + 1.648028e-01, + 2.839601e-01, + 4.129439e-01, + 8.239115e-01, + 6.031726e-01, + 1.145604e00, + 1.401895e00, + 2.512223e00, + 2.038025e00, + 4.644014e00, + 9.301590e00, + 2.039273e01, + 4.896534e01, + 1.252311e02, + 3.576766e02, + 2.579464e04, + 2.944679e04, + 2.834717e03, + 5.931328e04, + ] super(HernquistEllipseCSE, self).__init__() def function(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns double integral of NFW profile - """ + """Returns double integral of NFW profile.""" phi_q, q = param_util.ellipticity2phi_q(e1, e2) # shift x_ = x - center_x @@ -53,21 +133,23 @@ def function(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): x__, y__ = util.rotate(x_, y_, phi_q) # potential calculation - f_ = self.cse_major_axis_set.function(x__ / Rs, y__ / Rs, self._a_list, self._s_list, q) + f_ = self.cse_major_axis_set.function( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) const = self._normalization(sigma0, Rs, q) return const * f_ def derivatives(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW) - """ + """Returns df/dx and df/dy of the function (integral of NFW)""" phi_q, q = param_util.ellipticity2phi_q(e1, e2) # shift x_ = x - center_x y_ = y - center_y # rotate x__, y__ = util.rotate(x_, y_, phi_q) - f__x, f__y = self.cse_major_axis_set.derivatives(x__ / Rs, y__ / Rs, self._a_list, self._s_list, q) + f__x, f__y = self.cse_major_axis_set.derivatives( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) # rotate deflections back f_x, f_y = util.rotate(f__x, f__y, -phi_q) @@ -75,34 +157,34 @@ def derivatives(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): return const * f_x, const * f_y def hessian(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" phi_q, q = param_util.ellipticity2phi_q(e1, e2) # shift x_ = x - center_x y_ = y - center_y # rotate x__, y__ = util.rotate(x_, y_, phi_q) - f__xx, f__xy, __, f__yy = self.cse_major_axis_set.hessian(x__ / Rs, y__ / Rs, self._a_list, self._s_list, q) + f__xx, f__xy, __, f__yy = self.cse_major_axis_set.hessian( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) # rotate back - kappa = 1. / 2 * (f__xx + f__yy) - gamma1__ = 1. / 2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_q) * gamma1__ - np.sin(2 * phi_q) * gamma2__ gamma2 = +np.sin(2 * phi_q) * gamma1__ + np.cos(2 * phi_q) * gamma2__ f_xx = kappa + gamma1 f_yy = kappa - gamma1 f_xy = gamma2 - const = self._normalization(sigma0, Rs, q) / Rs ** 2 + const = self._normalization(sigma0, Rs, q) / Rs**2 return const * f_xx, const * f_xy, const * f_xy, const * f_yy @staticmethod def _normalization(sigma0, Rs, q): - """ - mapping to eqn 10 and 11 in Oguri 2021 from phenomenological definition + """Mapping to eqn 10 and 11 in Oguri 2021 from phenomenological definition. :param sigma0: sigma0 normalization :param Rs: scale radius @@ -110,5 +192,5 @@ def _normalization(sigma0, Rs, q): :return: normalization (m) """ rs_ = Rs / np.sqrt(q) - const = sigma0 / 2 * rs_ ** 3 + const = sigma0 / 2 * rs_**3 return const diff --git a/lenstronomy/LensModel/Profiles/hessian.py b/lenstronomy/LensModel/Profiles/hessian.py index bfce5ea83..5003178d1 100644 --- a/lenstronomy/LensModel/Profiles/hessian.py +++ b/lenstronomy/LensModel/Profiles/hessian.py @@ -1,18 +1,31 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Hessian'] +__all__ = ["Hessian"] class Hessian(LensProfileBase): - """ - class for constant Hessian distortion (second order) - The input is in the same convention as the LensModel.hessian() output. - """ - param_names = ['f_xx', 'f_yy', 'f_xy', 'f_yx', 'ra_0', 'dec_0'] - lower_limit_default = {'f_xx': -100, 'f_yy': -100, 'f_xy': -100, 'f_yx': -100, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'f_xx': 100, 'f_yy': 100, 'f_xy': 100, 'f_yx': 100, 'ra_0': 100, 'dec_0': 100} + """Class for constant Hessian distortion (second order) The input is in the same + convention as the LensModel.hessian() output.""" + + param_names = ["f_xx", "f_yy", "f_xy", "f_yx", "ra_0", "dec_0"] + lower_limit_default = { + "f_xx": -100, + "f_yy": -100, + "f_xy": -100, + "f_yx": -100, + "ra_0": -100, + "dec_0": -100, + } + upper_limit_default = { + "f_xx": 100, + "f_yy": 100, + "f_xy": 100, + "f_yx": 100, + "ra_0": 100, + "dec_0": 100, + } def function(self, x, y, f_xx, f_yy, f_xy, f_yx, ra_0=0, dec_0=0): """ @@ -29,7 +42,7 @@ def function(self, x, y, f_xx, f_yy, f_xy, f_yx, ra_0=0, dec_0=0): """ x_ = x - ra_0 y_ = y - dec_0 - f_ = 1/2. * (f_xx * x_ * x_ + (f_xy + f_yx) * x_ * y_ + f_yy * y_ * y_) + f_ = 1 / 2.0 * (f_xx * x_ * x_ + (f_xy + f_yx) * x_ * y_ + f_yy * y_ * y_) return f_ def derivatives(self, x, y, f_xx, f_yy, f_xy, f_yx, ra_0=0, dec_0=0): @@ -52,8 +65,7 @@ def derivatives(self, x, y, f_xx, f_yy, f_xy, f_yx, ra_0=0, dec_0=0): return f_x, f_y def hessian(self, x, y, f_xx, f_yy, f_xy, f_yx, ra_0=0, dec_0=0): - """ - Hessian. Attention: If f_xy != f_yx then this function is not accurate! + """Hessian. Attention: If f_xy != f_yx then this function is not accurate! :param x: x-coordinate (angle) :param y: y0-coordinate (angle) diff --git a/lenstronomy/LensModel/Profiles/interpol.py b/lenstronomy/LensModel/Profiles/interpol.py index 306c8c26f..e00e295a2 100644 --- a/lenstronomy/LensModel/Profiles/interpol.py +++ b/lenstronomy/LensModel/Profiles/interpol.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import scipy.interpolate import numpy as np @@ -6,12 +6,12 @@ import lenstronomy.Util.util as util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Interpol', 'InterpolScaled'] +__all__ = ["Interpol", "InterpolScaled"] class Interpol(LensProfileBase): - """ - class which uses an interpolation of a lens model and its first and second order derivatives + """Class which uses an interpolation of a lens model and its first and second order + derivatives. See also the tests in lenstronomy.test.test_LensModel.test_Profiles.test_interpol.py for example use cases as checks against known analytic models. @@ -19,7 +19,17 @@ class which uses an interpolation of a lens model and its first and second order The deflection angle is in the same convention as the one in the LensModel module, meaning that: source position = image position - deflection angle """ - param_names = ['grid_interp_x', 'grid_interp_y', 'f_', 'f_x', 'f_y', 'f_xx', 'f_yy', 'f_xy'] + + param_names = [ + "grid_interp_x", + "grid_interp_y", + "f_", + "f_x", + "f_y", + "f_xx", + "f_yy", + "f_xy", + ] lower_limit_default = {} upper_limit_default = {} @@ -34,12 +44,23 @@ def __init__(self, grid=False, min_grid_number=100, kwargs_spline=None): self._grid = grid self._min_grid_number = min_grid_number if kwargs_spline is None: - kwargs_spline = {'kx': 1, 'ky': 1, 's': 0} + kwargs_spline = {"kx": 1, "ky": 1, "s": 0} self._kwargs_spline = kwargs_spline super(Interpol, self).__init__() - def function(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, f_xx=None, f_yy=None, - f_xy=None): + def function( + self, + x, + y, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): """ :param x: x-coordinate (angular position), float or numpy array @@ -54,96 +75,180 @@ def function(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=No :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y :return: potential at interpolated positions (x, y) """ - #self._check_interp(grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) + # self._check_interp(grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) n = len(np.atleast_1d(x)) if n <= 1 and np.shape(x) == (): - #if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: + # if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: f_out = self.f_interp(x, y, grid_interp_x, grid_interp_y, f_) return f_out else: if self._grid and n >= self._min_grid_number: x_axes, y_axes = util.get_axes(x, y) - f_out = self.f_interp(x_axes, y_axes, grid_interp_x, grid_interp_y, f_, grid=self._grid) + f_out = self.f_interp( + x_axes, y_axes, grid_interp_x, grid_interp_y, f_, grid=self._grid + ) f_out = util.image2array(f_out) else: - #n = len(x) + # n = len(x) f_out = np.zeros(n) for i in range(n): - f_out[i] = self.f_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_) + f_out[i] = self.f_interp( + x[i], y[i], grid_interp_x, grid_interp_y, f_ + ) return f_out - def derivatives(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, f_xx=None, f_yy=None, f_xy=None): - """ - returns df/dx and df/dy of the function + def derivatives( + self, + x, + y, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): + """Returns df/dx and df/dy of the function. :param x: x-coordinate (angular position), float or numpy array :param y: y-coordinate (angular position), float or numpy array - :param grid_interp_x: numpy array (ascending) to mark the x-direction of the interpolation grid - :param grid_interp_y: numpy array (ascending) to mark the y-direction of the interpolation grid - :param f_: 2d numpy array of lensing potential, matching the grids in grid_interp_x and grid_interp_y - :param f_x: 2d numpy array of deflection in x-direction, matching the grids in grid_interp_x and grid_interp_y - :param f_y: 2d numpy array of deflection in y-direction, matching the grids in grid_interp_x and grid_interp_y - :param f_xx: 2d numpy array of df/dxx, matching the grids in grid_interp_x and grid_interp_y - :param f_yy: 2d numpy array of df/dyy, matching the grids in grid_interp_x and grid_interp_y - :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y + :param grid_interp_x: numpy array (ascending) to mark the x-direction of the + interpolation grid + :param grid_interp_y: numpy array (ascending) to mark the y-direction of the + interpolation grid + :param f_: 2d numpy array of lensing potential, matching the grids in + grid_interp_x and grid_interp_y + :param f_x: 2d numpy array of deflection in x-direction, matching the grids in + grid_interp_x and grid_interp_y + :param f_y: 2d numpy array of deflection in y-direction, matching the grids in + grid_interp_x and grid_interp_y + :param f_xx: 2d numpy array of df/dxx, matching the grids in grid_interp_x and + grid_interp_y + :param f_yy: 2d numpy array of df/dyy, matching the grids in grid_interp_x and + grid_interp_y + :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and + grid_interp_y :return: f_x, f_y at interpolated positions (x, y) """ n = len(np.atleast_1d(x)) if n <= 1 and np.shape(x) == (): - #if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: + # if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: f_x_out = self.f_x_interp(x, y, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x, y, grid_interp_x, grid_interp_y, f_y) return f_x_out, f_y_out else: if self._grid and n >= self._min_grid_number: x_, y_ = util.get_axes(x, y) - f_x_out = self.f_x_interp(x_, y_, grid_interp_x, grid_interp_y, f_x, grid=self._grid) - f_y_out = self.f_y_interp(x_, y_, grid_interp_x, grid_interp_y, f_y, grid=self._grid) + f_x_out = self.f_x_interp( + x_, y_, grid_interp_x, grid_interp_y, f_x, grid=self._grid + ) + f_y_out = self.f_y_interp( + x_, y_, grid_interp_x, grid_interp_y, f_y, grid=self._grid + ) f_x_out = util.image2array(f_x_out) f_y_out = util.image2array(f_y_out) else: - #n = len(x) + # n = len(x) f_x_out = self.f_x_interp(x, y, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x, y, grid_interp_x, grid_interp_y, f_y) return f_x_out, f_y_out - def hessian(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, f_xx=None, f_yy=None, f_xy=None): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 + def hessian( + self, + x, + y, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2. :param x: x-coordinate (angular position), float or numpy array :param y: y-coordinate (angular position), float or numpy array - :param grid_interp_x: numpy array (ascending) to mark the x-direction of the interpolation grid - :param grid_interp_y: numpy array (ascending) to mark the y-direction of the interpolation grid - :param f_: 2d numpy array of lensing potential, matching the grids in grid_interp_x and grid_interp_y - :param f_x: 2d numpy array of deflection in x-direction, matching the grids in grid_interp_x and grid_interp_y - :param f_y: 2d numpy array of deflection in y-direction, matching the grids in grid_interp_x and grid_interp_y - :param f_xx: 2d numpy array of df/dxx, matching the grids in grid_interp_x and grid_interp_y - :param f_yy: 2d numpy array of df/dyy, matching the grids in grid_interp_x and grid_interp_y - :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y + :param grid_interp_x: numpy array (ascending) to mark the x-direction of the + interpolation grid + :param grid_interp_y: numpy array (ascending) to mark the y-direction of the + interpolation grid + :param f_: 2d numpy array of lensing potential, matching the grids in + grid_interp_x and grid_interp_y + :param f_x: 2d numpy array of deflection in x-direction, matching the grids in + grid_interp_x and grid_interp_y + :param f_y: 2d numpy array of deflection in y-direction, matching the grids in + grid_interp_x and grid_interp_y + :param f_xx: 2d numpy array of df/dxx, matching the grids in grid_interp_x and + grid_interp_y + :param f_yy: 2d numpy array of df/dyy, matching the grids in grid_interp_x and + grid_interp_y + :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and + grid_interp_y :return: f_xx, f_xy, f_yx, f_yy at interpolated positions (x, y) """ - if not (hasattr(self, '_f_xx_interp')) and (f_xx is None or f_yy is None or f_xy is None): + if not (hasattr(self, "_f_xx_interp")) and ( + f_xx is None or f_yy is None or f_xy is None + ): diff = 0.000001 - alpha_ra_pp, alpha_dec_pp = self.derivatives(x + diff / 2, y + diff / 2, grid_interp_x=grid_interp_x, - grid_interp_y=grid_interp_y, f_=f_, f_x=f_x, f_y=f_y) - alpha_ra_pn, alpha_dec_pn = self.derivatives(x + diff / 2, y - diff / 2, grid_interp_x=grid_interp_x, - grid_interp_y=grid_interp_y, f_=f_, f_x=f_x, f_y=f_y) - - alpha_ra_np, alpha_dec_np = self.derivatives(x - diff / 2, y + diff / 2, grid_interp_x=grid_interp_x, - grid_interp_y=grid_interp_y, f_=f_, f_x=f_x, f_y=f_y) - alpha_ra_nn, alpha_dec_nn = self.derivatives(x - diff / 2, y - diff / 2, grid_interp_x=grid_interp_x, - grid_interp_y=grid_interp_y, f_=f_, f_x=f_x, f_y=f_y) - - f_xx_out = (alpha_ra_pp - alpha_ra_np + alpha_ra_pn - alpha_ra_nn) / diff / 2 - f_xy_out = (alpha_ra_pp - alpha_ra_pn + alpha_ra_np - alpha_ra_nn) / diff / 2 - f_yx_out = (alpha_dec_pp - alpha_dec_np + alpha_dec_pn - alpha_dec_nn) / diff / 2 - f_yy_out = (alpha_dec_pp - alpha_dec_pn + alpha_dec_np - alpha_dec_nn) / diff / 2 + alpha_ra_pp, alpha_dec_pp = self.derivatives( + x + diff / 2, + y + diff / 2, + grid_interp_x=grid_interp_x, + grid_interp_y=grid_interp_y, + f_=f_, + f_x=f_x, + f_y=f_y, + ) + alpha_ra_pn, alpha_dec_pn = self.derivatives( + x + diff / 2, + y - diff / 2, + grid_interp_x=grid_interp_x, + grid_interp_y=grid_interp_y, + f_=f_, + f_x=f_x, + f_y=f_y, + ) + + alpha_ra_np, alpha_dec_np = self.derivatives( + x - diff / 2, + y + diff / 2, + grid_interp_x=grid_interp_x, + grid_interp_y=grid_interp_y, + f_=f_, + f_x=f_x, + f_y=f_y, + ) + alpha_ra_nn, alpha_dec_nn = self.derivatives( + x - diff / 2, + y - diff / 2, + grid_interp_x=grid_interp_x, + grid_interp_y=grid_interp_y, + f_=f_, + f_x=f_x, + f_y=f_y, + ) + + f_xx_out = ( + (alpha_ra_pp - alpha_ra_np + alpha_ra_pn - alpha_ra_nn) / diff / 2 + ) + f_xy_out = ( + (alpha_ra_pp - alpha_ra_pn + alpha_ra_np - alpha_ra_nn) / diff / 2 + ) + f_yx_out = ( + (alpha_dec_pp - alpha_dec_np + alpha_dec_pn - alpha_dec_nn) / diff / 2 + ) + f_yy_out = ( + (alpha_dec_pp - alpha_dec_pn + alpha_dec_np - alpha_dec_nn) / diff / 2 + ) return f_xx_out, f_xy_out, f_yx_out, f_yy_out n = len(np.atleast_1d(x)) if n <= 1 and np.shape(x) == (): - #if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: + # if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: f_xx_out = self.f_xx_interp(x, y, grid_interp_x, grid_interp_y, f_xx) f_yy_out = self.f_yy_interp(x, y, grid_interp_x, grid_interp_y, f_yy) f_xy_out = self.f_xy_interp(x, y, grid_interp_x, grid_interp_y, f_xy) @@ -151,71 +256,119 @@ def hessian(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=Non else: if self._grid and n >= self._min_grid_number: x_, y_ = util.get_axes(x, y) - f_xx_out = self.f_xx_interp(x_, y_, grid_interp_x, grid_interp_y, f_xx, grid=self._grid) - f_yy_out = self.f_yy_interp(x_, y_, grid_interp_x, grid_interp_y, f_yy, grid=self._grid) - f_xy_out = self.f_xy_interp(x_, y_, grid_interp_x, grid_interp_y, f_xy, grid=self._grid) + f_xx_out = self.f_xx_interp( + x_, y_, grid_interp_x, grid_interp_y, f_xx, grid=self._grid + ) + f_yy_out = self.f_yy_interp( + x_, y_, grid_interp_x, grid_interp_y, f_yy, grid=self._grid + ) + f_xy_out = self.f_xy_interp( + x_, y_, grid_interp_x, grid_interp_y, f_xy, grid=self._grid + ) f_xx_out = util.image2array(f_xx_out) f_yy_out = util.image2array(f_yy_out) f_xy_out = util.image2array(f_xy_out) else: - #n = len(x) + # n = len(x) f_xx_out, f_yy_out, f_xy_out = np.zeros(n), np.zeros(n), np.zeros(n) for i in range(n): - f_xx_out[i] = self.f_xx_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_xx) - f_yy_out[i] = self.f_yy_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_yy) - f_xy_out[i] = self.f_xy_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_xy) + f_xx_out[i] = self.f_xx_interp( + x[i], y[i], grid_interp_x, grid_interp_y, f_xx + ) + f_yy_out[i] = self.f_yy_interp( + x[i], y[i], grid_interp_x, grid_interp_y, f_yy + ) + f_xy_out[i] = self.f_xy_interp( + x[i], y[i], grid_interp_x, grid_interp_y, f_xy + ) return f_xx_out, f_xy_out, f_xy_out, f_yy_out def f_interp(self, x, y, x_grid=None, y_grid=None, f_=None, grid=False): - if not hasattr(self, '_f_interp'): - self._f_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_, **self._kwargs_spline) + if not hasattr(self, "_f_interp"): + self._f_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_, **self._kwargs_spline + ) return self._f_interp(y, x, grid=grid) def f_x_interp(self, x, y, x_grid=None, y_grid=None, f_x=None, grid=False): - if not hasattr(self, '_f_x_interp'): - self._f_x_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_x, **self._kwargs_spline) + if not hasattr(self, "_f_x_interp"): + self._f_x_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_x, **self._kwargs_spline + ) return self._f_x_interp(y, x, grid=grid) def f_y_interp(self, x, y, x_grid=None, y_grid=None, f_y=None, grid=False): - if not hasattr(self, '_f_y_interp'): - self._f_y_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_y, **self._kwargs_spline) + if not hasattr(self, "_f_y_interp"): + self._f_y_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_y, **self._kwargs_spline + ) return self._f_y_interp(y, x, grid=grid) def f_xx_interp(self, x, y, x_grid=None, y_grid=None, f_xx=None, grid=False): - if not hasattr(self, '_f_xx_interp'): - self._f_xx_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_xx, **self._kwargs_spline) + if not hasattr(self, "_f_xx_interp"): + self._f_xx_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_xx, **self._kwargs_spline + ) return self._f_xx_interp(y, x, grid=grid) def f_xy_interp(self, x, y, x_grid=None, y_grid=None, f_xy=None, grid=False): - if not hasattr(self, '_f_xy_interp'): - self._f_xy_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_xy, **self._kwargs_spline) + if not hasattr(self, "_f_xy_interp"): + self._f_xy_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_xy, **self._kwargs_spline + ) return self._f_xy_interp(y, x, grid=grid) def f_yy_interp(self, x, y, x_grid=None, y_grid=None, f_yy=None, grid=False): - if not hasattr(self, '_f_yy_interp'): - self._f_yy_interp = scipy.interpolate.RectBivariateSpline(y_grid, x_grid, f_yy, **self._kwargs_spline) + if not hasattr(self, "_f_yy_interp"): + self._f_yy_interp = scipy.interpolate.RectBivariateSpline( + y_grid, x_grid, f_yy, **self._kwargs_spline + ) return self._f_yy_interp(y, x, grid=grid) def do_interp(self, x_grid, y_grid, f_, f_x, f_y, f_xx=None, f_yy=None, f_xy=None): - self._f_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_, **self._kwargs_spline) - self._f_x_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_x, **self._kwargs_spline) - self._f_y_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_y, **self._kwargs_spline) + self._f_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_, **self._kwargs_spline + ) + self._f_x_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_x, **self._kwargs_spline + ) + self._f_y_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_y, **self._kwargs_spline + ) if f_xx is not None: - self._f_xx_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_xx, **self._kwargs_spline) + self._f_xx_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_xx, **self._kwargs_spline + ) if f_xy is not None: - self._f_xy_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_xy, **self._kwargs_spline) + self._f_xy_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_xy, **self._kwargs_spline + ) if f_yy is not None: - self._f_yy_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, f_yy, **self._kwargs_spline) + self._f_yy_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, f_yy, **self._kwargs_spline + ) class InterpolScaled(LensProfileBase): - """ - class for handling an interpolated lensing map and has the freedom to scale its lensing effect. + """Class for handling an interpolated lensing map and has the freedom to scale its + lensing effect. + Applications are e.g. mass to light ratio. """ - param_names = ['scale_factor', 'grid_interp_x', 'grid_interp_y', 'f_', 'f_x', 'f_y', 'f_xx', 'f_yy', 'f_xy'] - lower_limit_default = {'scale_factor': 0} - upper_limit_default = {'scale_factor': 100} + + param_names = [ + "scale_factor", + "grid_interp_x", + "grid_interp_y", + "f_", + "f_x", + "f_y", + "f_xx", + "f_yy", + "f_xy", + ] + lower_limit_default = {"scale_factor": 0} + upper_limit_default = {"scale_factor": 100} def __init__(self, grid=True, min_grid_number=100, kwargs_spline=None): """ @@ -225,11 +378,25 @@ def __init__(self, grid=True, min_grid_number=100, kwargs_spline=None): :param kwargs_spline: keyword arguments for the scipy.interpolate.RectBivariateSpline() interpolation (optional) if =None, a default linear interpolation is chosen. """ - self.interp_func = Interpol(grid, min_grid_number=min_grid_number, kwargs_spline=kwargs_spline) + self.interp_func = Interpol( + grid, min_grid_number=min_grid_number, kwargs_spline=kwargs_spline + ) super(InterpolScaled, self).__init__() - def function(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, - f_xx=None, f_yy=None, f_xy=None): + def function( + self, + x, + y, + scale_factor=1, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): """ :param x: x-coordinate (angular position), float or numpy array @@ -245,12 +412,26 @@ def function(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=None, :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y :return: potential at interpolated positions (x, y) """ - f_out = self.interp_func.function(x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) + f_out = self.interp_func.function( + x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy + ) f_out *= scale_factor return f_out - def derivatives(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, - f_xx=None, f_yy=None, f_xy=None): + def derivatives( + self, + x, + y, + scale_factor=1, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): """ :param x: x-coordinate (angular position), float or numpy array @@ -266,13 +447,27 @@ def derivatives(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=No :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y :return: deflection angles in x- and y-direction at position (x, y) """ - f_x_out, f_y_out = self.interp_func.derivatives(x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) + f_x_out, f_y_out = self.interp_func.derivatives( + x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy + ) f_x_out *= scale_factor f_y_out *= scale_factor return f_x_out, f_y_out - def hessian(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, - f_xx=None, f_yy=None, f_xy=None): + def hessian( + self, + x, + y, + scale_factor=1, + grid_interp_x=None, + grid_interp_y=None, + f_=None, + f_x=None, + f_y=None, + f_xx=None, + f_yy=None, + f_xy=None, + ): """ :param x: x-coordinate (angular position), float or numpy array @@ -288,7 +483,9 @@ def hessian(self, x, y, scale_factor=1, grid_interp_x=None, grid_interp_y=None, :param f_xy: 2d numpy array of df/dxy, matching the grids in grid_interp_x and grid_interp_y :return: second derivatives of the lensing potential f_xx, f_yy, f_xy at position (x, y) """ - f_xx_out, f_xy_out, f_yx_out, f_yy_out = self.interp_func.hessian(x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) + f_xx_out, f_xy_out, f_yx_out, f_yy_out = self.interp_func.hessian( + x, y, grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy + ) f_xx_out *= scale_factor f_yy_out *= scale_factor f_xy_out *= scale_factor diff --git a/lenstronomy/LensModel/Profiles/multi_gaussian_kappa.py b/lenstronomy/LensModel/Profiles/multi_gaussian_kappa.py index 2545d6a4d..cb8e68370 100644 --- a/lenstronomy/LensModel/Profiles/multi_gaussian_kappa.py +++ b/lenstronomy/LensModel/Profiles/multi_gaussian_kappa.py @@ -1,18 +1,19 @@ import numpy as np from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa -from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import GaussianEllipsePotential +from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import ( + GaussianEllipsePotential, +) from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['MultiGaussianKappa', 'MultiGaussianKappaEllipse'] +__all__ = ["MultiGaussianKappa", "MultiGaussianKappaEllipse"] class MultiGaussianKappa(LensProfileBase): - """ + """""" - """ - param_names = ['amp', 'sigma', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'center_x': 100, 'center_y': 100} + param_names = ["amp", "sigma", "center_x", "center_y"] + lower_limit_default = {"amp": 0, "sigma": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"amp": 100, "sigma": 100, "center_x": 100, "center_y": 100} def __init__(self): self.gaussian_kappa = GaussianKappa() @@ -31,8 +32,14 @@ def function(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): """ f_ = np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_ += self.gaussian_kappa.function(x, y, amp=scale_factor*amp[i], sigma=sigma[i], - center_x=center_x, center_y=center_y) + f_ += self.gaussian_kappa.function( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + center_x=center_x, + center_y=center_y, + ) return f_ def derivatives(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): @@ -48,8 +55,14 @@ def derivatives(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): """ f_x, f_y = np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_x_i, f_y_i = self.gaussian_kappa.derivatives(x, y, amp=scale_factor*amp[i], sigma=sigma[i], - center_x=center_x, center_y=center_y) + f_x_i, f_y_i = self.gaussian_kappa.derivatives( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + center_x=center_x, + center_y=center_y, + ) f_x += f_x_i f_y += f_y_i return f_x, f_y @@ -65,11 +78,20 @@ def hessian(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): :param center_y: :return: """ - f_xx, f_yy, f_xy = np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float) + f_xx, f_yy, f_xy = ( + np.zeros_like(x, dtype=float), + np.zeros_like(x, dtype=float), + np.zeros_like(x, dtype=float), + ) for i in range(len(amp)): - f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_kappa.hessian(x, y, amp=scale_factor*amp[i], - sigma=sigma[i], center_x=center_x, - center_y=center_y) + f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_kappa.hessian( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + center_x=center_x, + center_y=center_y, + ) f_xx += f_xx_i f_yy += f_yy_i f_xy += f_xy_i @@ -85,7 +107,7 @@ def density(self, r, amp, sigma, scale_factor=1): """ d_ = np.zeros_like(r, dtype=float) for i in range(len(amp)): - d_ += self.gaussian_kappa.density(r, scale_factor*amp[i], sigma[i]) + d_ += self.gaussian_kappa.density(r, scale_factor * amp[i], sigma[i]) return d_ def density_2d(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): @@ -99,7 +121,9 @@ def density_2d(self, x, y, amp, sigma, center_x=0, center_y=0, scale_factor=1): """ d_3d = np.zeros_like(x, dtype=float) for i in range(len(amp)): - d_3d += self.gaussian_kappa.density_2d(x, y, scale_factor*amp[i], sigma[i], center_x, center_y) + d_3d += self.gaussian_kappa.density_2d( + x, y, scale_factor * amp[i], sigma[i], center_x, center_y + ) return d_3d def mass_3d_lens(self, R, amp, sigma, scale_factor=1): @@ -112,23 +136,40 @@ def mass_3d_lens(self, R, amp, sigma, scale_factor=1): """ mass_3d = np.zeros_like(R, dtype=float) for i in range(len(amp)): - mass_3d += self.gaussian_kappa.mass_3d_lens(R, scale_factor*amp[i], sigma[i]) + mass_3d += self.gaussian_kappa.mass_3d_lens( + R, scale_factor * amp[i], sigma[i] + ) return mass_3d class MultiGaussianKappaEllipse(LensProfileBase): - """ - - """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + """""" + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "sigma": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.gaussian_kappa = GaussianEllipsePotential() super(MultiGaussianKappaEllipse, self).__init__() - def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1): + def function( + self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1 + ): """ :param x: @@ -141,11 +182,21 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_facto """ f_ = np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_ += self.gaussian_kappa.function(x, y, amp=scale_factor*amp[i], sigma=sigma[i], e1=e1, e2=e2, - center_x=center_x, center_y=center_y) + f_ += self.gaussian_kappa.function( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) return f_ - def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1): + def derivatives( + self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1 + ): """ :param x: @@ -158,8 +209,16 @@ def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_fa """ f_x, f_y = np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float) for i in range(len(amp)): - f_x_i, f_y_i = self.gaussian_kappa.derivatives(x, y, amp=scale_factor*amp[i], sigma=sigma[i], e1=e1, e2=e2, - center_x=center_x, center_y=center_y) + f_x_i, f_y_i = self.gaussian_kappa.derivatives( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) f_x += f_x_i f_y += f_y_i return f_x, f_y @@ -175,10 +234,22 @@ def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor :param center_y: :return: """ - f_xx, f_yy, f_xy = np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float), np.zeros_like(x, dtype=float) + f_xx, f_yy, f_xy = ( + np.zeros_like(x, dtype=float), + np.zeros_like(x, dtype=float), + np.zeros_like(x, dtype=float), + ) for i in range(len(amp)): - f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_kappa.hessian(x, y, amp=scale_factor*amp[i], sigma=sigma[i], e1=e1, e2=e2, - center_x=center_x, center_y=center_y) + f_xx_i, f_xy_i, _, f_yy_i = self.gaussian_kappa.hessian( + x, + y, + amp=scale_factor * amp[i], + sigma=sigma[i], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) f_xx += f_xx_i f_yy += f_yy_i f_xy += f_xy_i @@ -194,10 +265,14 @@ def density(self, r, amp, sigma, e1, e2, scale_factor=1): """ d_ = np.zeros_like(r, dtype=float) for i in range(len(amp)): - d_ += self.gaussian_kappa.density(r, scale_factor*amp[i], sigma[i], e1, e2) + d_ += self.gaussian_kappa.density( + r, scale_factor * amp[i], sigma[i], e1, e2 + ) return d_ - def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1): + def density_2d( + self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_factor=1 + ): """ :param R: @@ -208,7 +283,9 @@ def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0, scale_fac """ d_3d = np.zeros_like(x, dtype=float) for i in range(len(amp)): - d_3d += self.gaussian_kappa.density_2d(x, y, scale_factor*amp[i], sigma[i], e1, e2, center_x, center_y) + d_3d += self.gaussian_kappa.density_2d( + x, y, scale_factor * amp[i], sigma[i], e1, e2, center_x, center_y + ) return d_3d def mass_3d_lens(self, R, amp, sigma, e1, e2, scale_factor=1): @@ -221,5 +298,7 @@ def mass_3d_lens(self, R, amp, sigma, e1, e2, scale_factor=1): """ mass_3d = np.zeros_like(R, dtype=float) for i in range(len(amp)): - mass_3d += self.gaussian_kappa.mass_3d_lens(R, scale_factor*amp[i], sigma[i], e1, e2) + mass_3d += self.gaussian_kappa.mass_3d_lens( + R, scale_factor * amp[i], sigma[i], e1, e2 + ) return mass_3d diff --git a/lenstronomy/LensModel/Profiles/multipole.py b/lenstronomy/LensModel/Profiles/multipole.py index a407c4ffe..630c1451a 100644 --- a/lenstronomy/LensModel/Profiles/multipole.py +++ b/lenstronomy/LensModel/Profiles/multipole.py @@ -1,11 +1,11 @@ -__author__ = 'lynevdv' +__author__ = "lynevdv" import numpy as np import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Multipole'] +__all__ = ["Multipole"] class Multipole(LensProfileBase): @@ -17,9 +17,22 @@ class Multipole(LensProfileBase): a_m : float, multipole strength phi_m : float, multipole orientation in radian """ - param_names = ['m', 'a_m', 'phi_m', 'center_x', 'center_y'] - lower_limit_default = {'m': 2, 'a_m': 0, 'phi_m': -np.pi, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'m': 100, 'a_m': 100, 'phi_m': np.pi, 'center_x': 100, 'center_y': 100} + + param_names = ["m", "a_m", "phi_m", "center_x", "center_y"] + lower_limit_default = { + "m": 2, + "a_m": 0, + "phi_m": -np.pi, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "m": 100, + "a_m": 100, + "phi_m": np.pi, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): """ @@ -36,7 +49,7 @@ def function(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): :return: lensing potential """ r, phi = param_util.cart2polar(x, y, center_x=center_x, center_y=center_y) - f_ = r*a_m / (1-m**2) * np.cos(m*(phi-phi_m)) + f_ = r * a_m / (1 - m**2) * np.cos(m * (phi - phi_m)) return f_ def derivatives(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): @@ -55,8 +68,12 @@ def derivatives(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): :return: deflection angles alpha_x, alpha_y """ r, phi = param_util.cart2polar(x, y, center_x=center_x, center_y=center_y) - f_x = np.cos(phi)*a_m/(1-m**2) * np.cos(m*(phi-phi_m)) + np.sin(phi)*m*a_m/(1-m**2)*np.sin(m*(phi-phi_m)) - f_y = np.sin(phi)*a_m/(1-m**2) * np.cos(m*(phi-phi_m)) - np.cos(phi)*m*a_m/(1-m**2)*np.sin(m*(phi-phi_m)) + f_x = np.cos(phi) * a_m / (1 - m**2) * np.cos(m * (phi - phi_m)) + np.sin( + phi + ) * m * a_m / (1 - m**2) * np.sin(m * (phi - phi_m)) + f_y = np.sin(phi) * a_m / (1 - m**2) * np.cos(m * (phi - phi_m)) - np.cos( + phi + ) * m * a_m / (1 - m**2) * np.sin(m * (phi - phi_m)) return f_x, f_y def hessian(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): @@ -75,7 +92,7 @@ def hessian(self, x, y, m, a_m, phi_m, center_x=0, center_y=0): """ r, phi = param_util.cart2polar(x, y, center_x=center_x, center_y=center_y) r = np.maximum(r, 0.000001) - f_xx = 1./r * np.sin(phi)**2 * a_m * np.cos(m*(phi-phi_m)) - f_yy = 1./r * np.cos(phi)**2 * a_m * np.cos(m*(phi-phi_m)) - f_xy = -1./r * a_m * np.cos(phi) * np.sin(phi) * np.cos(m*(phi-phi_m)) + f_xx = 1.0 / r * np.sin(phi) ** 2 * a_m * np.cos(m * (phi - phi_m)) + f_yy = 1.0 / r * np.cos(phi) ** 2 * a_m * np.cos(m * (phi - phi_m)) + f_xy = -1.0 / r * a_m * np.cos(phi) * np.sin(phi) * np.cos(m * (phi - phi_m)) return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/nfw.py b/lenstronomy/LensModel/Profiles/nfw.py index d02c74af5..a2db9d612 100644 --- a/lenstronomy/LensModel/Profiles/nfw.py +++ b/lenstronomy/LensModel/Profiles/nfw.py @@ -1,16 +1,15 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to compute the Navaro-Frenk-White profile import numpy as np import scipy.interpolate as interp from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['NFW'] +__all__ = ["NFW"] class NFW(LensProfileBase): - """ - this class contains functions concerning the NFW profile + """This class contains functions concerning the NFW profile. relation are: R_200 = c * Rs The definition of 'Rs' is in angular (arc second) units and the normalization is put in with regard to a deflection @@ -39,12 +38,12 @@ class NFW(LensProfileBase): >>> from lenstronomy.LensModel.Profiles.nfw import NFW >>> nfw = NFW() >>> alpha_x, alpha_y = nfw.derivatives(x=1, y=1, Rs=Rs_angle, alpha_Rs=alpha_Rs, center_x=0, center_y=0) - """ - profile_name = 'NFW' - param_names = ['Rs', 'alpha_Rs', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'center_x': 100, 'center_y': 100} + + profile_name = "NFW" + param_names = ["Rs", "alpha_Rs", "center_x", "center_y"] + lower_limit_default = {"Rs": 0, "alpha_Rs": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"Rs": 100, "alpha_Rs": 10, "center_x": 100, "center_y": 100} def __init__(self, interpol=False, num_interp_X=1000, max_interp_X=10): """ @@ -61,7 +60,7 @@ def __init__(self, interpol=False, num_interp_X=1000, max_interp_X=10): def function(self, x, y, Rs, alpha_Rs, center_x=0, center_y=0): """ - + :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) :param Rs: turn over point in the slope of the NFW profile in angular unit @@ -80,8 +79,8 @@ def function(self, x, y, Rs, alpha_Rs, center_x=0, center_y=0): return f_ def derivatives(self, x, y, Rs, alpha_Rs, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW), which are the deflection angles + """Returns df/dx and df/dy of the function (integral of NFW), which are the + deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -126,8 +125,7 @@ def hessian(self, x, y, Rs, alpha_Rs, center_x=0, center_y=0): @staticmethod def density(R, Rs, rho0): - """ - three-dimensional NFW profile + """Three-dimensional NFW profile. :param R: radius of interest :type R: float/numpy array @@ -137,12 +135,12 @@ def density(R, Rs, rho0): :type rho0: float :return: rho(R) density """ - return rho0/(R/Rs*(1+R/Rs)**2) + return rho0 / (R / Rs * (1 + R / Rs) ** 2) def density_lens(self, r, Rs, alpha_Rs): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radios :param Rs: turn-over radius of NFW profile @@ -153,8 +151,7 @@ def density_lens(self, r, Rs, alpha_Rs): return self.density(r, Rs, rho0) def density_2d(self, x, y, Rs, rho0, center_x=0, center_y=0): - """ - projected two-dimensional NFW profile (kappa) + """Projected two-dimensional NFW profile (kappa) :param x: x-coordinate :param y: y-coordinate @@ -169,13 +166,12 @@ def density_2d(self, x, y, Rs, rho0, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y R = np.sqrt(x_**2 + y_**2) - x = R/Rs + x = R / Rs Fx = self.F_(x) - return 2*rho0*Rs*Fx + return 2 * rho0 * Rs * Fx def mass_3d(self, r, Rs, rho0): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: 3d radius :param Rs: scale radius @@ -183,13 +179,12 @@ def mass_3d(self, r, Rs, rho0): :return: M(0 """ if isinstance(X, int) or isinstance(X, float): if X < 1 and X > 0: - a = 1/(X**2-1)*(1-2/np.sqrt(1-X**2)*np.arctanh(np.sqrt((1-X)/(1+X)))) + a = ( + 1 + / (X**2 - 1) + * ( + 1 + - 2 + / np.sqrt(1 - X**2) + * np.arctanh(np.sqrt((1 - X) / (1 + X))) + ) + ) elif X == 1: - a = 1./3 + a = 1.0 / 3 elif X > 1: - a = 1/(X**2-1)*(1-2/np.sqrt(X**2-1)*np.arctan(np.sqrt((X-1)/(1+X)))) + a = ( + 1 + / (X**2 - 1) + * ( + 1 + - 2 + / np.sqrt(X**2 - 1) + * np.arctan(np.sqrt((X - 1) / (1 + X))) + ) + ) else: # X == 0: c = 0.0000001 - a = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) + a = ( + 1 + / (-1) + * (1 - 2 / np.sqrt(1) * np.arctanh(np.sqrt((1 - c) / (1 + c)))) + ) else: a = np.empty_like(X) x = X[(X < 1) & (X > 0)] - a[(X < 1) & (X > 0)] = 1/(x**2-1)*(1-2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1-x)/(1+x)))) + a[(X < 1) & (X > 0)] = ( + 1 + / (x**2 - 1) + * (1 - 2 / np.sqrt(1 - x**2) * np.arctanh(np.sqrt((1 - x) / (1 + x)))) + ) - a[X == 1] = 1./3. + a[X == 1] = 1.0 / 3.0 x = X[X > 1] - a[X > 1] = 1/(x**2-1)*(1-2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1)/(1+x)))) + a[X > 1] = ( + 1 + / (x**2 - 1) + * (1 - 2 / np.sqrt(x**2 - 1) * np.arctan(np.sqrt((x - 1) / (1 + x)))) + ) # a[X>y] = 0 c = 0.0000001 - a[X == 0] = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) + a[X == 0] = ( + 1 / (-1) * (1 - 2 / np.sqrt(1) * np.arctanh(np.sqrt((1 - c) / (1 + c)))) + ) return a def g_(self, X): - """ - computes h() + """Computes h() :param X: R/Rs :type X: float >0 :return: """ if self._interpol: - if not hasattr(self, '_g_interp'): + if not hasattr(self, "_g_interp"): x = np.linspace(0, self._max_interp_X, self._num_interp_X) g_x = self._g(x) - self._g_interp = interp.interp1d(x, g_x, kind='linear', axis=-1, copy=False, bounds_error=False, - fill_value=0, assume_sorted=True) + self._g_interp = interp.interp1d( + x, + g_x, + kind="linear", + axis=-1, + copy=False, + bounds_error=False, + fill_value=0, + assume_sorted=True, + ) return self._g_interp(X) else: return self._g(X) @staticmethod def _g(X): - """ - - analytic solution of integral for NFW profile to compute deflection angel and gamma + """Analytic solution of integral for NFW profile to compute deflection angel and + gamma. :param X: R/Rs :type X: float >0 @@ -372,45 +405,50 @@ def _g(X): if isinstance(X, int) or isinstance(X, float): if X < 1: x = max(c, X) - a = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x) + a = np.log(x / 2.0) + 1 / np.sqrt(1 - x**2) * np.arccosh(1.0 / x) elif X == 1: - a = 1 + np.log(1./2.) + a = 1 + np.log(1.0 / 2.0) else: # X > 1: - a = np.log(X/2) + 1/np.sqrt(X**2-1)*np.arccos(1./X) + a = np.log(X / 2) + 1 / np.sqrt(X**2 - 1) * np.arccos(1.0 / X) else: a = np.empty_like(X) X[X <= c] = c x = X[X < 1] - a[X < 1] = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x) - a[X == 1] = 1 + np.log(1./2.) + a[X < 1] = np.log(x / 2.0) + 1 / np.sqrt(1 - x**2) * np.arccosh(1.0 / x) + a[X == 1] = 1 + np.log(1.0 / 2.0) x = X[X > 1] - a[X > 1] = np.log(x/2) + 1/np.sqrt(x**2-1)*np.arccos(1./x) + a[X > 1] = np.log(x / 2) + 1 / np.sqrt(x**2 - 1) * np.arccos(1.0 / x) return a def h_(self, X): - """ - computes h() + """Computes h() :param X: R/Rs :type X: float >0 :return: h(X) """ if self._interpol: - if not hasattr(self, '_h_interp'): + if not hasattr(self, "_h_interp"): x = np.linspace(0, self._max_interp_X, self._num_interp_X) h_x = self._h(x) - self._h_interp = interp.interp1d(x, h_x, kind='linear', axis=-1, copy=False, bounds_error=False, - fill_value=0, assume_sorted=True) + self._h_interp = interp.interp1d( + x, + h_x, + kind="linear", + axis=-1, + copy=False, + bounds_error=False, + fill_value=0, + assume_sorted=True, + ) return self._h_interp(X) else: return self._h(X) @staticmethod def _h(X): - """ - - analytic solution of integral for NFW profile to compute the potential + """Analytic solution of integral for NFW profile to compute the potential. :param X: R/Rs :type X: float >0 @@ -419,42 +457,38 @@ def _h(X): if isinstance(X, int) or isinstance(X, float): if X < 1: x = max(0.001, X) - a = np.log(x/2.)**2 - np.arccosh(1./x)**2 + a = np.log(x / 2.0) ** 2 - np.arccosh(1.0 / x) ** 2 else: # X >= 1: - a = np.log(X/2.)**2 + np.arccos(1./X)**2 + a = np.log(X / 2.0) ** 2 + np.arccos(1.0 / X) ** 2 else: a = np.empty_like(X) X[X <= c] = 0.000001 x = X[X < 1] - a[X < 1] = np.log(x/2.)**2 - np.arccosh(1./x)**2 + a[X < 1] = np.log(x / 2.0) ** 2 - np.arccosh(1.0 / x) ** 2 x = X[X >= 1] - a[X >= 1] = np.log(x/2.)**2 + np.arccos(1./x)**2 + a[X >= 1] = np.log(x / 2.0) ** 2 + np.arccos(1.0 / x) ** 2 return a @staticmethod def alpha2rho0(alpha_Rs, Rs): - - """ - convert angle at Rs into rho0 + """Convert angle at Rs into rho0. :param alpha_Rs: deflection angle at RS :param Rs: scale radius :return: density normalization (characteristic density) """ - rho0 = alpha_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.))) + rho0 = alpha_Rs / (4.0 * Rs**2 * (1.0 + np.log(1.0 / 2.0))) return rho0 @staticmethod def rho02alpha(rho0, Rs): - - """ - convert rho0 to angle at Rs + """Convert rho0 to angle at Rs. :param rho0: density normalization (characteristic density) :param Rs: scale radius :return: deflection angle at RS """ - alpha_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.))) + alpha_Rs = rho0 * (4 * Rs**2 * (1 + np.log(1.0 / 2.0))) return alpha_Rs diff --git a/lenstronomy/LensModel/Profiles/nfw_core_truncated.py b/lenstronomy/LensModel/Profiles/nfw_core_truncated.py index 525e9c1f6..56f5689be 100644 --- a/lenstronomy/LensModel/Profiles/nfw_core_truncated.py +++ b/lenstronomy/LensModel/Profiles/nfw_core_truncated.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" # this file contains a class to compute lensing proprerties of a pseudo Navaro-Frenk-White profile with a core and truncation # radius @@ -6,13 +6,12 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from scipy.integrate import quad -__all__ = ['TNFWC'] +__all__ = ["TNFWC"] class TNFWC(LensProfileBase): - """ - - This class contains an pseudo NFW profile with a core radius and a truncation radius. The density in 3D is given by + """This class contains an pseudo NFW profile with a core radius and a truncation + radius. The density in 3D is given by. .. math:: \\rho(r) = \\frac{\\rho_0 r_s^3}{\\left(r^2+r_c^2\\right)^{1/2} \\left(r_s^2+r^2\\right)} \\left(\\frac{r_t^2}{r^2+r_t^2}\\right) @@ -23,16 +22,28 @@ class TNFWC(LensProfileBase): TODO: add the gravitational potential for this profile TODO: add analytic solution for 3D mass """ - profile_name = 'TNFWC' - param_names = ['Rs', 'alpha_Rs', 'center_x', 'center_y', 'r_trunc', 'r_core'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'center_x': -100, 'center_y': -100, 'r_trunc': 0.001, - 'r_core': 0.00001} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'center_x': 100, 'center_y': 100, 'r_trunc': 1000.0, - 'r_core': 1000.0} + + profile_name = "TNFWC" + param_names = ["Rs", "alpha_Rs", "center_x", "center_y", "r_trunc", "r_core"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "center_x": -100, + "center_y": -100, + "r_trunc": 0.001, + "r_core": 0.00001, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "center_x": 100, + "center_y": 100, + "r_trunc": 1000.0, + "r_core": 1000.0, + } def derivatives(self, x, y, Rs, alpha_Rs, r_core, r_trunc, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function which are the deflection angles + """Returns df/dx and df/dy of the function which are the deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -48,7 +59,7 @@ def derivatives(self, x, y, Rs, alpha_Rs, r_core, r_trunc, center_x=0, center_y= Rs = np.maximum(Rs, 0.00000001) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) f_x, f_y = self.nfw_alpha(R, Rs, rho0_input, r_core, r_trunc, x_, y_) return f_x, f_y @@ -68,7 +79,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, r_trunc, center_x=0, center_y=0): rho0_input = self.alpha2rho0(alpha_Rs, Rs, r_core, r_trunc) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, 0.00000001) kappa = self.density_2d(R, 0, Rs, rho0_input, r_core, r_trunc) gamma1, gamma2 = self.nfw_gamma(R, Rs, rho0_input, r_core, r_trunc, x_, y_) @@ -79,8 +90,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_core, r_trunc, center_x=0, center_y=0): @staticmethod def density(R, Rs, rho0, r_core, r_trunc): - """ - 3D density profile + """3D density profile. :param R: radius of interest :type Rs: scale radius @@ -92,16 +102,16 @@ def density(R, Rs, rho0, r_core, r_trunc): x = R / Rs beta = r_core / Rs tau = r_trunc / Rs - denom_core = (beta ** 2 + x ** 2) ** 0.5 - denom_nfw = (1 + x ** 2) - denom_trunc = (x ** 2 + tau ** 2) / tau ** 2 + denom_core = (beta**2 + x**2) ** 0.5 + denom_nfw = 1 + x**2 + denom_trunc = (x**2 + tau**2) / tau**2 denom = denom_core * denom_nfw * denom_trunc return rho0 / denom def density_lens(self, r, Rs, alpha_Rs, r_core, r_trunc): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radios :param Rs: scale radius @@ -114,8 +124,7 @@ def density_lens(self, r, Rs, alpha_Rs, r_core, r_trunc): return self.density(r, Rs, rho0, r_core, r_trunc) def density_2d(self, x, y, Rs, rho0, r_core, r_trunc, center_x=0, center_y=0): - """ - 2D (projected) density profile + """2D (projected) density profile. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -129,7 +138,7 @@ def density_2d(self, x, y, Rs, rho0, r_core, r_trunc, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) x = R / Rs beta = r_core / Rs tau = r_trunc / Rs @@ -137,8 +146,7 @@ def density_2d(self, x, y, Rs, rho0, r_core, r_trunc, center_x=0, center_y=0): return 2 * rho0 * Rs * Fx def mass_3d(self, r, Rs, rho0, r_core, r_trunc): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: 3d radius :param Rs: scale radius @@ -147,13 +155,12 @@ def mass_3d(self, r, Rs, rho0, r_core, r_trunc): :param r_trunc: truncation radius [arcsec] :return: M(0 @@ -239,12 +244,12 @@ def _f(self, x, b, t): :param t: truncation radius divided by the scale radius :return: solution to the projection integral """ - prefactor = t ** 2 / (t ** 2 - 1) + prefactor = t**2 / (t**2 - 1) return prefactor * (self._u1(x, b, 1.0) - self._u1(x, b, t)) def _g(self, x, b, t): - """ - analytic solution of integral for NFW profile to compute deflection angle and gamma + """Analytic solution of integral for NFW profile to compute deflection angle and + gamma. :param X: R/Rs :type X: float >0 @@ -254,8 +259,13 @@ def _g(self, x, b, t): """ if b == t: t += 1e-3 - prefactor = abs(t ** 2 / (t ** 2 - 1)) - return prefactor * (-self._u2(x, b, t) + self._u2(0.0, b, t) + self._u2(x, b, 1.0) - self._u2(0.0, b, 1.0)) + prefactor = abs(t**2 / (t**2 - 1)) + return prefactor * ( + -self._u2(x, b, t) + + self._u2(0.0, b, t) + + self._u2(x, b, 1.0) + - self._u2(0.0, b, 1.0) + ) @staticmethod def _u1(x, b, t): @@ -265,9 +275,9 @@ def _u1(x, b, t): :param t: truncation radius divided by the scale radius """ - t2x2 = t ** 2 + x ** 2 - b2x2 = b ** 2 + x ** 2 - b2mt2 = b ** 2 - t ** 2 + t2x2 = t**2 + x**2 + b2x2 = b**2 + x**2 + b2mt2 = b**2 - t**2 if t > b: func = np.arccosh b2mt2 *= -1 @@ -283,12 +293,10 @@ def _u2(self, x, b, t): :param t: truncation radius divided by the scale radius """ - return (t ** 2 + x ** 2) * self._u1(x, b, t) + return (t**2 + x**2) * self._u1(x, b, t) def alpha2rho0(self, alpha_Rs, Rs, r_core, r_trunc): - - """ - convert angle at Rs into rho0 + """Convert angle at Rs into rho0. :param alpha_Rs: deflection angle at RS :param Rs: scale radius @@ -300,13 +308,11 @@ def alpha2rho0(self, alpha_Rs, Rs, r_core, r_trunc): beta = r_core / Rs tau = r_trunc / Rs gx = self._g(1.0, beta, tau) - rho0 = alpha_Rs / (4 * Rs ** 2 * gx) + rho0 = alpha_Rs / (4 * Rs**2 * gx) return rho0 def rho02alpha(self, rho0, Rs, r_core, r_trunc): - - """ - convert rho0 to angle at Rs + """Convert rho0 to angle at Rs. :param rho0: density normalization :param Rs: scale radius diff --git a/lenstronomy/LensModel/Profiles/nfw_ellipse.py b/lenstronomy/LensModel/Profiles/nfw_ellipse.py index cc2899dbd..22d83f53f 100644 --- a/lenstronomy/LensModel/Profiles/nfw_ellipse.py +++ b/lenstronomy/LensModel/Profiles/nfw_ellipse.py @@ -1,26 +1,41 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.nfw import NFW import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['NFW_ELLIPSE'] +__all__ = ["NFW_ELLIPSE"] class NFW_ELLIPSE(LensProfileBase): - """ - this class contains functions concerning the NFW profile with an ellipticity defined in the potential - parameterization of alpha_Rs and Rs is the same as for the spherical NFW profile + """This class contains functions concerning the NFW profile with an ellipticity + defined in the potential parameterization of alpha_Rs and Rs is the same as for the + spherical NFW profile. from Glose & Kneib: https://cds.cern.ch/record/529584/files/0112138.pdf relation are: R_200 = c * Rs """ - profile_name = 'NFW_ELLIPSE' - param_names = ['Rs', 'alpha_Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + profile_name = "NFW_ELLIPSE" + param_names = ["Rs", "alpha_Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, interpol=False, num_interp_X=1000, max_interp_X=10): """ @@ -30,13 +45,14 @@ def __init__(self, interpol=False, num_interp_X=1000, max_interp_X=10): :param max_interp_X: float (only considered if interpol=True), maximum r/r_s value to be interpolated (returning zeros outside) """ - self.nfw = NFW(interpol=interpol, num_interp_X=num_interp_X, max_interp_X=max_interp_X) + self.nfw = NFW( + interpol=interpol, num_interp_X=num_interp_X, max_interp_X=max_interp_X + ) self._diff = 0.0000000001 super(NFW_ELLIPSE, self).__init__() def function(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns elliptically distorted NFW lensing potential + """Returns elliptically distorted NFW lensing potential. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -48,7 +64,9 @@ def function(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): :param center_y: center of halo (in angular units) :return: lensing potential """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) R_ = np.sqrt(x_**2 + y_**2) rho0_input = self.nfw.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) if Rs < 0.0000001: @@ -57,9 +75,8 @@ def function(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): return f_ def derivatives(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function, calculated as an elliptically distorted deflection angle of the - spherical NFW profile + """Returns df/dx and df/dy of the function, calculated as an elliptically + distorted deflection angle of the spherical NFW profile. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -71,28 +88,29 @@ def derivatives(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): :param center_y: center of halo (in angular units) :return: deflection in x-direction, deflection in y-direction """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) phi_G, q = param_util.ellipticity2phi_q(e1, e2) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) e = param_util.q2e(q) # e = abs(1 - q) - R_ = np.sqrt(x_ ** 2 + y_ ** 2) + R_ = np.sqrt(x_**2 + y_**2) rho0_input = self.nfw.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) if Rs < 0.0000001: Rs = 0.0000001 f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, x_, y_) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy - the calculation is performed as a numerical differential from the deflection field. Analytical relations are - possible + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy the + calculation is performed as a numerical differential from the deflection field. + Analytical relations are possible. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -104,15 +122,21 @@ def hessian(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): :param center_y: center of halo (in angular units) :return: d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 """ - alpha_ra, alpha_dec = self.derivatives(x, y, Rs, alpha_Rs, e1, e2, center_x, center_y) + alpha_ra, alpha_dec = self.derivatives( + x, y, Rs, alpha_Rs, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, Rs, alpha_Rs, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, Rs, alpha_Rs, e1, e2, center_x, center_y) - - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, Rs, alpha_Rs, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, Rs, alpha_Rs, e1, e2, center_x, center_y + ) + + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy @@ -129,9 +153,9 @@ def mass_3d_lens(self, r, Rs, alpha_Rs, e1=1, e2=0): return self.nfw.mass_3d_lens(r, Rs, alpha_Rs) def density_lens(self, r, Rs, alpha_Rs, e1=1, e2=0): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radios :param Rs: turn-over radius of NFW profile diff --git a/lenstronomy/LensModel/Profiles/nfw_ellipse_cse.py b/lenstronomy/LensModel/Profiles/nfw_ellipse_cse.py index 98e74375f..3b86d58e8 100644 --- a/lenstronomy/LensModel/Profiles/nfw_ellipse_cse.py +++ b/lenstronomy/LensModel/Profiles/nfw_ellipse_cse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.Util import util @@ -7,7 +7,7 @@ from lenstronomy.LensModel.Profiles.cored_steep_ellipsoid import CSEProductAvgSet import lenstronomy.Util.param_util as param_util -__all__ = ['NFW_ELLIPSE_CSE'] +__all__ = ["NFW_ELLIPSE_CSE"] class NFW_ELLIPSE_CSE(NFW_ELLIPSE): @@ -21,10 +21,25 @@ class NFW_ELLIPSE_CSE(NFW_ELLIPSE): """ - profile_name = 'NFW_ELLIPSE_CSE' - param_names = ['Rs', 'alpha_Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + profile_name = "NFW_ELLIPSE_CSE" + param_names = ["Rs", "alpha_Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, high_accuracy=True): """ @@ -36,41 +51,142 @@ def __init__(self, high_accuracy=True): self.nfw = NFW() if high_accuracy is True: # Table 1 in Oguri 2021 - self._s_list = [1.082411e-06, 8.786566e-06, 3.292868e-06, 1.860019e-05, 3.274231e-05, - 6.232485e-05, 9.256333e-05, 1.546762e-04, 2.097321e-04, 3.391140e-04, - 5.178790e-04, 8.636736e-04, 1.405152e-03, 2.193855e-03, 3.179572e-03, - 4.970987e-03, 7.631970e-03, 1.119413e-02, 1.827267e-02, 2.945251e-02, - 4.562723e-02, 6.782509e-02, 1.596987e-01, 1.127751e-01, 2.169469e-01, - 3.423835e-01, 5.194527e-01, 8.623185e-01, 1.382737e+00, 2.034929e+00, - 3.402979e+00, 5.594276e+00, 8.052345e+00, 1.349045e+01, 2.603825e+01, - 4.736823e+01, 6.559320e+01, 1.087932e+02, 1.477673e+02, 2.495341e+02, - 4.305999e+02, 7.760206e+02, 2.143057e+03, 1.935749e+03] - self._a_list = [1.648988e-18, 6.274458e-16, 3.646620e-17, 3.459206e-15, 2.457389e-14, - 1.059319e-13, 4.211597e-13, 1.142832e-12, 4.391215e-12, 1.556500e-11, - 6.951271e-11, 3.147466e-10, 1.379109e-09, 3.829778e-09, 1.384858e-08, - 5.370951e-08, 1.804384e-07, 5.788608e-07, 3.205256e-06, 1.102422e-05, - 4.093971e-05, 1.282206e-04, 4.575541e-04, 7.995270e-04, 5.013701e-03, - 1.403508e-02, 5.230727e-02, 1.898907e-01, 3.643448e-01, 7.203734e-01, - 1.717667e+00, 2.217566e+00, 3.187447e+00, 8.194898e+00, 1.765210e+01, - 1.974319e+01, 2.783688e+01, 4.482311e+01, 5.598897e+01, 1.426485e+02, - 2.279833e+02, 5.401335e+02, 9.743682e+02, 1.775124e+03] + self._s_list = [ + 1.082411e-06, + 8.786566e-06, + 3.292868e-06, + 1.860019e-05, + 3.274231e-05, + 6.232485e-05, + 9.256333e-05, + 1.546762e-04, + 2.097321e-04, + 3.391140e-04, + 5.178790e-04, + 8.636736e-04, + 1.405152e-03, + 2.193855e-03, + 3.179572e-03, + 4.970987e-03, + 7.631970e-03, + 1.119413e-02, + 1.827267e-02, + 2.945251e-02, + 4.562723e-02, + 6.782509e-02, + 1.596987e-01, + 1.127751e-01, + 2.169469e-01, + 3.423835e-01, + 5.194527e-01, + 8.623185e-01, + 1.382737e00, + 2.034929e00, + 3.402979e00, + 5.594276e00, + 8.052345e00, + 1.349045e01, + 2.603825e01, + 4.736823e01, + 6.559320e01, + 1.087932e02, + 1.477673e02, + 2.495341e02, + 4.305999e02, + 7.760206e02, + 2.143057e03, + 1.935749e03, + ] + self._a_list = [ + 1.648988e-18, + 6.274458e-16, + 3.646620e-17, + 3.459206e-15, + 2.457389e-14, + 1.059319e-13, + 4.211597e-13, + 1.142832e-12, + 4.391215e-12, + 1.556500e-11, + 6.951271e-11, + 3.147466e-10, + 1.379109e-09, + 3.829778e-09, + 1.384858e-08, + 5.370951e-08, + 1.804384e-07, + 5.788608e-07, + 3.205256e-06, + 1.102422e-05, + 4.093971e-05, + 1.282206e-04, + 4.575541e-04, + 7.995270e-04, + 5.013701e-03, + 1.403508e-02, + 5.230727e-02, + 1.898907e-01, + 3.643448e-01, + 7.203734e-01, + 1.717667e00, + 2.217566e00, + 3.187447e00, + 8.194898e00, + 1.765210e01, + 1.974319e01, + 2.783688e01, + 4.482311e01, + 5.598897e01, + 1.426485e02, + 2.279833e02, + 5.401335e02, + 9.743682e02, + 1.775124e03, + ] else: # Table 3 in Oguri 2021 - self._a_list = [1.434960e-16, 5.232413e-14, 2.666660e-12, 7.961761e-11, 2.306895e-09, - 6.742968e-08, 1.991691e-06, 5.904388e-05, 1.693069e-03, 4.039850e-02, - 5.665072e-01, 3.683242e+00, 1.582481e+01, 6.340984e+01, 2.576763e+02, - 1.422619e+03] - self._s_list = [4.041628e-06, 3.086267e-05, 1.298542e-04, 4.131977e-04, 1.271373e-03, - 3.912641e-03, 1.208331e-02, 3.740521e-02, 1.153247e-01, 3.472038e-01, - 1.017550e+00, 3.253031e+00, 1.190315e+01, 4.627701e+01, 1.842613e+02, - 8.206569e+02] + self._a_list = [ + 1.434960e-16, + 5.232413e-14, + 2.666660e-12, + 7.961761e-11, + 2.306895e-09, + 6.742968e-08, + 1.991691e-06, + 5.904388e-05, + 1.693069e-03, + 4.039850e-02, + 5.665072e-01, + 3.683242e00, + 1.582481e01, + 6.340984e01, + 2.576763e02, + 1.422619e03, + ] + self._s_list = [ + 4.041628e-06, + 3.086267e-05, + 1.298542e-04, + 4.131977e-04, + 1.271373e-03, + 3.912641e-03, + 1.208331e-02, + 3.740521e-02, + 1.153247e-01, + 3.472038e-01, + 1.017550e00, + 3.253031e00, + 1.190315e01, + 4.627701e01, + 1.842613e02, + 8.206569e02, + ] super(NFW_ELLIPSE_CSE, self).__init__() def function(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns elliptically distorted NFW lensing potential + """Returns elliptically distorted NFW lensing potential. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -90,14 +206,15 @@ def function(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): x__, y__ = util.rotate(x_, y_, phi_q) # potential calculation - f_ = self.cse_major_axis_set.function(x__/Rs, y__/Rs, self._a_list, self._s_list, q) + f_ = self.cse_major_axis_set.function( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) const = self._normalization(alpha_Rs, Rs, q) return const * f_ def derivatives(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function, calculated as an elliptically distorted deflection angle of the - spherical NFW profile + """Returns df/dx and df/dy of the function, calculated as an elliptically + distorted deflection angle of the spherical NFW profile. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -115,7 +232,9 @@ def derivatives(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): y_ = y - center_y # rotate x__, y__ = util.rotate(x_, y_, phi_q) - f__x, f__y = self.cse_major_axis_set.derivatives(x__/Rs, y__/Rs, self._a_list, self._s_list, q) + f__x, f__y = self.cse_major_axis_set.derivatives( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) # rotate deflections back f_x, f_y = util.rotate(f__x, f__y, -phi_q) @@ -123,9 +242,8 @@ def derivatives(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): return const * f_x, const * f_y def hessian(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy - the calculation is performed as a numerical differential from the deflection field. + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy the + calculation is performed as a numerical differential from the deflection field. Analytical relations are possible. :param x: angular position (normally in units of arc seconds) @@ -144,11 +262,13 @@ def hessian(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): y_ = y - center_y # rotate x__, y__ = util.rotate(x_, y_, phi_q) - f__xx, f__xy, __, f__yy = self.cse_major_axis_set.hessian(x__/Rs, y__/Rs, self._a_list, self._s_list, q) + f__xx, f__xy, __, f__yy = self.cse_major_axis_set.hessian( + x__ / Rs, y__ / Rs, self._a_list, self._s_list, q + ) # rotate back - kappa = 1. / 2 * (f__xx + f__yy) - gamma1__ = 1. / 2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_q) * gamma1__ - np.sin(2 * phi_q) * gamma2__ gamma2 = +np.sin(2 * phi_q) * gamma1__ + np.cos(2 * phi_q) * gamma2__ @@ -160,8 +280,7 @@ def hessian(self, x, y, Rs, alpha_Rs, e1, e2, center_x=0, center_y=0): return const * f_xx, const * f_xy, const * f_xy, const * f_yy def _normalization(self, alpha_Rs, Rs, q): - """ - applying to eqn 7 and 8 in Oguri 2021 from phenomenological definition + """Applying to eqn 7 and 8 in Oguri 2021 from phenomenological definition. :param alpha_Rs: deflection at Rs :param Rs: scale radius @@ -170,5 +289,5 @@ def _normalization(self, alpha_Rs, Rs, q): """ rho0 = self.nfw.alpha2rho0(alpha_Rs, Rs) rs_ = Rs - const = 4 * rho0 * rs_ ** 3 + const = 4 * rho0 * rs_**3 return const diff --git a/lenstronomy/LensModel/Profiles/nfw_mass_concentration.py b/lenstronomy/LensModel/Profiles/nfw_mass_concentration.py index 32e718d59..7049b5508 100644 --- a/lenstronomy/LensModel/Profiles/nfw_mass_concentration.py +++ b/lenstronomy/LensModel/Profiles/nfw_mass_concentration.py @@ -1,11 +1,11 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to compute the Navaro-Frank-White function in mass/kappa space from lenstronomy.LensModel.Profiles.nfw import NFW from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['NFWMC'] +__all__ = ["NFWMC"] class NFWMC(LensProfileBase): @@ -20,9 +20,20 @@ class NFWMC(LensProfileBase): recommended to use the default NFW lensing profile parameterized in reduced deflection angles. """ - param_names = ['logM', 'concentration', 'center_x', 'center_y'] - lower_limit_default = {'logM': 0, 'concentration': 0.01, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'logM': 16, 'concentration': 1000, 'center_x': 100, 'center_y': 100} + + param_names = ["logM", "concentration", "center_x", "center_y"] + lower_limit_default = { + "logM": 0, + "concentration": 0.01, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "logM": 16, + "concentration": 1000, + "center_x": 100, + "center_y": 100, + } def __init__(self, z_lens, z_source, cosmo=None, static=False): """ @@ -36,6 +47,7 @@ def __init__(self, z_lens, z_source, cosmo=None, static=False): if cosmo is None: # TODO: print waring if these lines get executed from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self._lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) self._static = static @@ -50,7 +62,7 @@ def _m_c2deflections(self, logM, concentration): """ if self._static is True: return self._Rs_static, self._alpha_Rs_static - M = 10 ** logM + M = 10**logM Rs, alpha_Rs = self._lens_cosmo.nfw_physical2angle(M, concentration) return Rs, alpha_Rs @@ -64,8 +76,10 @@ def set_static(self, logM, concentration, center_x=0, center_y=0): :return: """ self._static = True - M = 10 ** logM - self._Rs_static, self._alpha_Rs_static = self._lens_cosmo.nfw_physical2angle(M, concentration) + M = 10**logM + self._Rs_static, self._alpha_Rs_static = self._lens_cosmo.nfw_physical2angle( + M, concentration + ) def set_dynamic(self): """ @@ -73,9 +87,9 @@ def set_dynamic(self): :return: """ self._static = False - if hasattr(self, '_Rs_static'): + if hasattr(self, "_Rs_static"): del self._Rs_static - if hasattr(self, '_alpha_Rs_static'): + if hasattr(self, "_alpha_Rs_static"): del self._alpha_Rs_static def function(self, x, y, logM, concentration, center_x=0, center_y=0): @@ -90,18 +104,17 @@ def function(self, x, y, logM, concentration, center_x=0, center_y=0): :return: """ Rs, alpha_Rs = self._m_c2deflections(logM, concentration) - return self._nfw.function(x, y, alpha_Rs=alpha_Rs, Rs=Rs, center_x=center_x, center_y=center_y) + return self._nfw.function( + x, y, alpha_Rs=alpha_Rs, Rs=Rs, center_x=center_x, center_y=center_y + ) def derivatives(self, x, y, logM, concentration, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW) - """ + """Returns df/dx and df/dy of the function (integral of NFW)""" Rs, alpha_Rs = self._m_c2deflections(logM, concentration) return self._nfw.derivatives(x, y, Rs, alpha_Rs, center_x, center_y) def hessian(self, x, y, logM, concentration, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" Rs, alpha_Rs = self._m_c2deflections(logM, concentration) return self._nfw.hessian(x, y, Rs, alpha_Rs, center_x, center_y) diff --git a/lenstronomy/LensModel/Profiles/nfw_vir_trunc.py b/lenstronomy/LensModel/Profiles/nfw_vir_trunc.py index 94e5cb5a9..dc089c4bb 100644 --- a/lenstronomy/LensModel/Profiles/nfw_vir_trunc.py +++ b/lenstronomy/LensModel/Profiles/nfw_vir_trunc.py @@ -1,14 +1,14 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to compute the Navaro-Frank-White function in mass/kappa space # the potential therefore is its integral import numpy as np -from lenstronomy.Util import constants as const +from lenstronomy.Util import constants as const from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.Cosmo.lens_cosmo import LensCosmo -__all__ = ['NFWVirTrunc'] +__all__ = ["NFWVirTrunc"] class NFWVirTrunc(LensProfileBase): @@ -18,6 +18,7 @@ class NFWVirTrunc(LensProfileBase): relation are: R_200 = c * Rs """ + def __init__(self, z_lens, z_source, cosmo=None): """ @@ -28,26 +29,33 @@ def __init__(self, z_lens, z_source, cosmo=None): if cosmo is None: from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self._lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) super(NFWVirTrunc, self).__init__() def kappa(self, theta, logM, c): - """ - projected surface brightness + """Projected surface brightness. :param theta: radial angle from the center of the profile :param logM: log_10 halo mass in physical units of M_sun :param c: concentration of the halo; r_200 = c * r_s :return: convergence at theta """ - M = 10. ** logM + M = 10.0**logM theta_r200 = self._lens_cosmo.nfw_M_theta_r200(M) - #r_vir = theta_vir * self._lens_cosmo.D_d * const.arcsec # physical Mpc - #print(r_vir, 'r_vir') + # r_vir = theta_vir * self._lens_cosmo.D_d * const.arcsec # physical Mpc + # print(r_vir, 'r_vir') x = c * theta / theta_r200 f = self._f(c) - return M / self._lens_cosmo.sigma_crit_angle * c ** 2 * f / (2 * np.pi * theta_r200 ** 2) * self._G(x, c) + return ( + M + / self._lens_cosmo.sigma_crit_angle + * c**2 + * f + / (2 * np.pi * theta_r200**2) + * self._G(x, c) + ) def _G(self, x, c): """ @@ -61,12 +69,15 @@ def _G(self, x, c): if isinstance(x, int) or isinstance(x, float): if x < 1: x = max(s, x) - a = - np.sqrt(c**2 - x**2) / (1 - x**2) / (1 + c) + 1 / (1 - x**2)**(3./2) * np.arccosh((x**2 + c) / (x * (1 + c))) + a = -np.sqrt(c**2 - x**2) / (1 - x**2) / (1 + c) + 1 / ( + 1 - x**2 + ) ** (3.0 / 2) * np.arccosh((x**2 + c) / (x * (1 + c))) elif x == 1: - a = np.sqrt(c**2 - 1) / (3 * (1 + c)) * (1 + 1 / (c + 1.)) + a = np.sqrt(c**2 - 1) / (3 * (1 + c)) * (1 + 1 / (c + 1.0)) elif x <= c: # X > 1: - a = - np.sqrt(c ** 2 - x ** 2) / (1 - x ** 2) / (1 + c) - 1 / (x ** 2 - 1) ** (3. / 2) * np.arccos( - (x ** 2 + c) / (x * (1 + c))) + a = -np.sqrt(c**2 - x**2) / (1 - x**2) / (1 + c) - 1 / ( + x**2 - 1 + ) ** (3.0 / 2) * np.arccos((x**2 + c) / (x * (1 + c))) else: a = 0 @@ -74,12 +85,17 @@ def _G(self, x, c): a = np.zeros_like(x) x[x <= s] = s x_ = x[x < 1] - a[x < 1] = - np.sqrt(c**2 - x_**2) / ((1 - x_**2) * (1 + c)) + 1 / (1 - x_**2)**(3./2) * np.arccosh((x_**2 + c) / (x_ * (1 + c))) - a[x == 1] = np.sqrt(c**2 - 1) / (3 * (1 + c)) * (1 + 1 / (c + 1.)) + a[x < 1] = -np.sqrt(c**2 - x_**2) / ((1 - x_**2) * (1 + c)) + 1 / ( + 1 - x_**2 + ) ** (3.0 / 2) * np.arccosh((x_**2 + c) / (x_ * (1 + c))) + a[x == 1] = np.sqrt(c**2 - 1) / (3 * (1 + c)) * (1 + 1 / (c + 1.0)) x_ = x[(x > 1) & (x <= c)] - a[(x > 1) & (x <= c)] = - np.sqrt(c ** 2 - x_ ** 2) / (1 - x_ ** 2) / (1 + c) - 1 / (x_ ** 2 - 1) ** (3. / 2) * np.arccos( - (x_ ** 2 + c) / (x_ * (1 + c))) - #a[x > c] = 0 + a[(x > 1) & (x <= c)] = -np.sqrt(c**2 - x_**2) / (1 - x_**2) / ( + 1 + c + ) - 1 / (x_**2 - 1) ** (3.0 / 2) * np.arccos( + (x_**2 + c) / (x_ * (1 + c)) + ) + # a[x > c] = 0 return a def _f(self, c): @@ -88,7 +104,7 @@ def _f(self, c): :param c: concentration :return: dimensionless normalization of Halo mass """ - return 1. / (np.log(1 + c) - c / (1 + c)) + return 1.0 / (np.log(1 + c) - c / (1 + c)) -# https://arxiv.org/pdf/astro-ph/0304034.pdf equation 17 for shear +# https://arxiv.org/pdf/astro-ph/0304034.pdf equation 17 for shear diff --git a/lenstronomy/LensModel/Profiles/nie.py b/lenstronomy/LensModel/Profiles/nie.py index b92ea7c87..d5606c7d1 100644 --- a/lenstronomy/LensModel/Profiles/nie.py +++ b/lenstronomy/LensModel/Profiles/nie.py @@ -1,24 +1,37 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import lenstronomy.Util.util as util import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['NIE', 'NIEMajorAxis'] +__all__ = ["NIE", "NIEMajorAxis"] class NIE(LensProfileBase): - """ - Non-singular isothermal ellipsoid (NIE) + """Non-singular isothermal ellipsoid (NIE) .. math:: \\kappa = \\theta_E/2 \\left[s^2_{scale} + qx^2 + y^2/q]−1/2 - """ - param_names = ['theta_E', 'e1', 'e2', 's_scale', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'e1': -0.5, 'e2': -0.5, 's_scale': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 10, 'e1': 0.5, 'e2': 0.5, 's_scale': 100, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "e1", "e2", "s_scale", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "e1": -0.5, + "e2": -0.5, + "s_scale": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 10, + "e1": 0.5, + "e2": 0.5, + "s_scale": 100, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.nie_major_axis = NIEMajorAxis() @@ -95,8 +108,8 @@ def hessian(self, x, y, theta_E, e1, e2, s_scale, center_x=0, center_y=0): # evaluate f__xx, f__xy, _, f__yy = self.nie_major_axis.hessian(x__, y__, b, s, q) # rotate back - kappa = 1./2 * (f__xx + f__yy) - gamma1__ = 1./2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_G) * gamma1__ - np.sin(2 * phi_G) * gamma2__ gamma2 = +np.sin(2 * phi_G) * gamma1__ + np.cos(2 * phi_G) * gamma2__ @@ -106,8 +119,8 @@ def hessian(self, x, y, theta_E, e1, e2, s_scale, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def density_lens(self, r, theta_E, e1, e2, s_scale, center_x=0, center_y=0): - """ - 3d mass density at 3d radius r. This function assumes spherical symmetry/ignoring the eccentricity. + """3d mass density at 3d radius r. This function assumes spherical + symmetry/ignoring the eccentricity. :param r: 3d radius :param theta_E: Einstein radius @@ -123,8 +136,8 @@ def density_lens(self, r, theta_E, e1, e2, s_scale, center_x=0, center_y=0): return rho0 / (r**2 + s_scale**2) def mass_3d_lens(self, r, theta_E, e1, e2, s_scale, center_x=0, center_y=0): - """ - mass enclosed a 3d radius r. This function assumes spherical symmetry/ignoring the eccentricity. + """Mass enclosed a 3d radius r. This function assumes spherical + symmetry/ignoring the eccentricity. :param r: 3d radius :param theta_E: Einstein radius @@ -136,7 +149,7 @@ def mass_3d_lens(self, r, theta_E, e1, e2, s_scale, center_x=0, center_y=0): :return: 3d mass density at 3d radius r """ rho0 = 1 / 2 * theta_E / np.pi - return rho0 * 4 * np.pi * (r - s_scale * np.arctan(r/s_scale)) + return rho0 * 4 * np.pi * (r - s_scale * np.arctan(r / s_scale)) def param_conv(self, theta_E, e1, e2, s_scale): if self._static is True: @@ -158,7 +171,7 @@ def _param_conv(self, theta_E, e1, e2, s_scale): phi_G, q = param_util.ellipticity2phi_q(e1, e2) theta_E_conv = self._theta_E_prod_average2major_axis(theta_E, q) - b = theta_E_conv * np.sqrt((1 + q**2)/2) + b = theta_E_conv * np.sqrt((1 + q**2) / 2) s = s_scale / np.sqrt(q) # s = s_scale * np.sqrt((1 + q**2) / (2*q**2)) return b, s, q, phi_G @@ -175,7 +188,12 @@ def set_static(self, theta_E, e1, e2, s_scale, center_x=0, center_y=0): :return: self variables set """ self._static = True - self._b_static, self._s_static, self._q_static, self._phi_G_static = self._param_conv(theta_E, e1, e2, s_scale) + ( + self._b_static, + self._s_static, + self._q_static, + self._phi_G_static, + ) = self._param_conv(theta_E, e1, e2, s_scale) def set_dynamic(self): """ @@ -183,21 +201,20 @@ def set_dynamic(self): :return: """ self._static = False - if hasattr(self, '_b_static'): + if hasattr(self, "_b_static"): del self._b_static - if hasattr(self, '_s_static'): + if hasattr(self, "_s_static"): del self._s_static - if hasattr(self, '_phi_G_static'): + if hasattr(self, "_phi_G_static"): del self._phi_G_static - if hasattr(self, '_q_static'): + if hasattr(self, "_q_static"): del self._q_static @staticmethod def _theta_E_prod_average2major_axis(theta_E, q): - """ - Converts a product averaged Einstein radius (of semi-minor and semi-major axis) to a major axis Einstein radius - for an Isothermal ellipse. - The standard lenstronomy conventions are product averaged Einstein radii while other codes + """Converts a product averaged Einstein radius (of semi-minor and semi-major + axis) to a major axis Einstein radius for an Isothermal ellipse. The standard + lenstronomy conventions are product averaged Einstein radii while other codes (such as e.g. gravlens) use the semi-major axis convention. .. math:: @@ -207,7 +224,7 @@ def _theta_E_prod_average2major_axis(theta_E, q): :param q: axis ratio minor/major :return: theta_E in convention of kappa= b *(q^2(s^2 + x^2) + y^2􏰉)^{−1/2} (major axis) """ - theta_E_major_axis = theta_E / (np.sqrt((1.+q**2) / (2. * q))) + theta_E_major_axis = theta_E / (np.sqrt((1.0 + q**2) / (2.0 * q))) return theta_E_major_axis @@ -221,7 +238,7 @@ class NIEMajorAxis(LensProfileBase): """ - param_names = ['b', 's', 'q', 'center_x', 'center_y'] + param_names = ["b", "s", "q", "center_x", "center_y"] def __init__(self, diff=0.0000000001): self._diff = diff @@ -230,24 +247,31 @@ def __init__(self, diff=0.0000000001): def function(self, x, y, b, s, q): psi = self._psi(x, y, q, s) alpha_x, alpha_y = self.derivatives(x, y, b, s, q) - f_ = x * alpha_x + y * alpha_y - b * s * 1. / 2. * np.log((psi + s) ** 2 + (1. - q ** 2) * x ** 2) + f_ = ( + x * alpha_x + + y * alpha_y + - b * s * 1.0 / 2.0 * np.log((psi + s) ** 2 + (1.0 - q**2) * x**2) + ) return f_ def derivatives(self, x, y, b, s, q): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" if q >= 1: q = 0.99999999 psi = self._psi(x, y, q, s) - f_x = b / np.sqrt(1. - q ** 2) * np.arctan(np.sqrt(1. - q ** 2) * x / (psi + s)) - f_y = b / np.sqrt(1. - q ** 2) * np.arctanh(np.sqrt(1. - q ** 2) * y / (psi + q ** 2 * s)) + f_x = ( + b / np.sqrt(1.0 - q**2) * np.arctan(np.sqrt(1.0 - q**2) * x / (psi + s)) + ) + f_y = ( + b + / np.sqrt(1.0 - q**2) + * np.arctanh(np.sqrt(1.0 - q**2) * y / (psi + q**2 * s)) + ) return f_x, f_y def hessian(self, x, y, b, s, q): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" alpha_ra, alpha_dec = self.derivatives(x, y, b, s, q) diff = self._diff alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, b, s, q) @@ -261,8 +285,7 @@ def hessian(self, x, y, b, s, q): @staticmethod def kappa(x, y, b, s, q): - """ - convergence + """convergence. :param x: major axis coordinate :param y: minor axis coordinate @@ -271,13 +294,12 @@ def kappa(x, y, b, s, q): :param q: axis ratio :return: convergence """ - kappa = b/2. * (q**2 * (s**2 + x**2) + y**2)**(-1./2) + kappa = b / 2.0 * (q**2 * (s**2 + x**2) + y**2) ** (-1.0 / 2) return kappa @staticmethod def _psi(x, y, q, s): - """ - expression after equation (8) in Keeton&Kochanek 1998 + """Expression after equation (8) in Keeton&Kochanek 1998. :param x: semi-major axis coordinate :param y: semi-minor axis coordinate diff --git a/lenstronomy/LensModel/Profiles/nie_potential.py b/lenstronomy/LensModel/Profiles/nie_potential.py index 0136b5fe8..0ca09f416 100644 --- a/lenstronomy/LensModel/Profiles/nie_potential.py +++ b/lenstronomy/LensModel/Profiles/nie_potential.py @@ -1,48 +1,69 @@ -__author__ = 'gipagano' +__author__ = "gipagano" import numpy as np import lenstronomy.Util.util as util import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['NIE_POTENTIAL', 'NIEPotentialMajorAxis'] +__all__ = ["NIE_POTENTIAL", "NIEPotentialMajorAxis"] class NIE_POTENTIAL(LensProfileBase): - """ - this class implements the elliptical potential of Eq. (67) of `LECTURES ON GRAVITATIONAL LENSING `_ + """This class implements the elliptical potential of Eq. + + (67) of `LECTURES ON GRAVITATIONAL LENSING `_ and Eq. (1) of `Blandford & Kochanek 1987 `_, mapped to Eq. (8) of `Barnaka1998 `_ to find the ellipticity bounds """ - - param_names = ['center_x', 'center_y', 'theta_E', 'theta_c', 'e1', 'e2'] - lower_limit_default = {'center_x': -100, 'center_y': -100, 'theta_E': 0, 'theta_c': 0, 'e1': 0, 'e2': 0} - upper_limit_default = {'center_x': 100, 'center_y': 100, 'theta_E': 10, 'theta_c': 10, 'e1': 0.2, 'e2': 0.2} - + + param_names = ["center_x", "center_y", "theta_E", "theta_c", "e1", "e2"] + lower_limit_default = { + "center_x": -100, + "center_y": -100, + "theta_E": 0, + "theta_c": 0, + "e1": 0, + "e2": 0, + } + upper_limit_default = { + "center_x": 100, + "center_y": 100, + "theta_E": 10, + "theta_c": 10, + "e1": 0.2, + "e2": 0.2, + } + def __init__(self): self.nie_potential_major_axis = NIEPotentialMajorAxis() super(NIE_POTENTIAL, self).__init__() - + def param_conv(self, theta_E, theta_c, e1, e2): if self._static is True: - return self._thetaE_transf_static, self._thetac_static, self._eps_static, self._phi_G_static + return ( + self._thetaE_transf_static, + self._thetac_static, + self._eps_static, + self._phi_G_static, + ) return self._param_conv(theta_E, theta_c, e1, e2) def _param_conv(self, theta_E, theta_c, e1, e2): - """ - convert the spherical averaged Einstein radius to an elliptical (major axis) Einstein radius and - the individual eccentricities to the modulus of the eccentricity + """Convert the spherical averaged Einstein radius to an elliptical (major axis) + Einstein radius and the individual eccentricities to the modulus of the + eccentricity. :param theta_E: Einstein radius :param theta_c: core radius :param e1: eccentricity component :param e2: eccentricity component - :return: transformed Einstein radius, core radius, ellipticity modulus, orientation angle phi_G + :return: transformed Einstein radius, core radius, ellipticity modulus, + orientation angle phi_G """ - eps = np.sqrt(e1**2+e2**2) - phi_G, q = param_util.ellipticity2phi_q(e1, e2) + eps = np.sqrt(e1**2 + e2**2) + phi_G, q = param_util.ellipticity2phi_q(e1, e2) theta_E_conv = self._theta_q_convert(theta_E, q) theta_c_conv = self._theta_q_convert(theta_c, q) return theta_E_conv, theta_c_conv, eps, phi_G @@ -61,7 +82,12 @@ def set_static(self, theta_E, theta_c, e1, e2, center_x=0, center_y=0): :return: self variables set """ self._static = True - self._thetaE_transf_static, self._thetac_static, self._eps_static, self._phi_G_static = self._param_conv(theta_E, theta_c, e1, e2) + ( + self._thetaE_transf_static, + self._thetac_static, + self._eps_static, + self._phi_G_static, + ) = self._param_conv(theta_E, theta_c, e1, e2) def set_dynamic(self): """ @@ -69,95 +95,106 @@ def set_dynamic(self): :return: """ self._static = False - if hasattr(self, '_thetaE_transf_static'): + if hasattr(self, "_thetaE_transf_static"): del self._thetaE_transf_static - if hasattr(self, '_thetac_static'): + if hasattr(self, "_thetac_static"): del self._thetac_static - if hasattr(self, '_eps_static'): + if hasattr(self, "_eps_static"): del self._eps_static - if hasattr(self, '_phi_G_static'): + if hasattr(self, "_phi_G_static"): del self._phi_G_static - + def function(self, x, y, theta_E, theta_c, e1, e2, center_x=0, center_y=0): - """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) :param theta_E: Einstein radius (in angles) :param theta_c: core radius (in angles) - :param e1: eccentricity component, x direction(dimensionless) - :param e2: eccentricity component, y direction (dimensionless) - :return: lensing potential + :param e1: eccentricity component, x direction(dimensionless) + :param e2: eccentricity component, y direction (dimensionless) + :return: lensing potential """ - theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv(theta_E, theta_c, e1, e2) - + theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv( + theta_E, theta_c, e1, e2 + ) + # shift x_ = x - center_x y_ = y - center_y - + # rotate x__, y__ = util.rotate(x_, y_, phi_G) - + # evaluate - f_ = self.nie_potential_major_axis.function(x__, y__, theta_E_conv, theta_c_conv, eps) - + f_ = self.nie_potential_major_axis.function( + x__, y__, theta_E_conv, theta_c_conv, eps + ) + # rotate back return f_ def derivatives(self, x, y, theta_E, theta_c, e1, e2, center_x=0, center_y=0): """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) :param theta_E: Einstein radius (in angles) :param theta_c: core radius (in angles) - :param e1: eccentricity component, x direction(dimensionless) - :param e2: eccentricity component, y direction (dimensionless) + :param e1: eccentricity component, x direction(dimensionless) + :param e2: eccentricity component, y direction (dimensionless) :return: deflection angle (in angles) - """ - theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv(theta_E, theta_c, e1, e2) - + """ + theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv( + theta_E, theta_c, e1, e2 + ) + # shift x_ = x - center_x y_ = y - center_y - + # rotate x__, y__ = util.rotate(x_, y_, phi_G) - + # evaluate - f__x, f__y = self.nie_potential_major_axis.derivatives(x__, y__, theta_E_conv, theta_c_conv, eps) - + f__x, f__y = self.nie_potential_major_axis.derivatives( + x__, y__, theta_E_conv, theta_c_conv, eps + ) + # rotate back f_x, f_y = util.rotate(f__x, f__y, -phi_G) return f_x, f_y def hessian(self, x, y, theta_E, theta_c, e1, e2, center_x=0, center_y=0): """ - + :param x: x-coord (in angles) :param y: y-coord (in angles) :param theta_E: Einstein radius (in angles) :param theta_c: core radius (in angles) - :param e1: eccentricity component, x direction(dimensionless) - :param e2: eccentricity component, y direction (dimensionless) + :param e1: eccentricity component, x direction(dimensionless) + :param e2: eccentricity component, y direction (dimensionless) :return: hessian matrix (in angles) """ - theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv(theta_E, theta_c, e1, e2) - + theta_E_conv, theta_c_conv, eps, phi_G = self.param_conv( + theta_E, theta_c, e1, e2 + ) + # shift x_ = x - center_x y_ = y - center_y - + # rotate x__, y__ = util.rotate(x_, y_, phi_G) - + # evaluate - f__xx, f__xy, _, f__yy = self.nie_potential_major_axis.hessian(x__, y__, theta_E_conv, theta_c_conv, eps) - + f__xx, f__xy, _, f__yy = self.nie_potential_major_axis.hessian( + x__, y__, theta_E_conv, theta_c_conv, eps + ) + # rotate back - kappa = 1./2 * (f__xx + f__yy) - gamma1__ = 1./2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_G) * gamma1__ - np.sin(2 * phi_G) * gamma2__ gamma2 = +np.sin(2 * phi_G) * gamma1__ + np.cos(2 * phi_G) * gamma2__ @@ -165,54 +202,55 @@ def hessian(self, x, y, theta_E, theta_c, e1, e2, center_x=0, center_y=0): f_yy = kappa - gamma1 f_xy = gamma2 return f_xx, f_xy, f_xy, f_yy - + def _theta_q_convert(self, theta_E, q): - """ - converts a spherical averaged Einstein radius/core radius to an elliptical (major axis) Einstein radius. - This then follows the convention of the SPEMD profile in lenstronomy. - (theta_E / theta_E_gravlens) = sqrt[ (1+q^2) / (2 q) ] + """Converts a spherical averaged Einstein radius/core radius to an elliptical + (major axis) Einstein radius. This then follows the convention of the SPEMD + profile in lenstronomy. (theta_E / theta_E_gravlens) = sqrt[ (1+q^2) / (2 q) ] :param theta_E: Einstein radius in lenstronomy conventions :param q: axis ratio minor/major :return: theta_E in convention of kappa= b *(q2(s2 + x2) + y2􏰉)−1/2 """ - theta_E_new = theta_E / (np.sqrt((1.+q**2) / (2. * q))) #/ (1+(1-q)/2.) + theta_E_new = theta_E / (np.sqrt((1.0 + q**2) / (2.0 * q))) # / (1+(1-q)/2.) return theta_E_new class NIEPotentialMajorAxis(LensProfileBase): - """ - this class implements the elliptical potential of Eq. (67) of `LECTURES ON GRAVITATIONAL LENSING `_ + """This class implements the elliptical potential of Eq. + + (67) of `LECTURES ON GRAVITATIONAL LENSING `_ and Eq. (1) of `Blandford & Kochanek 1987 `_, mapped to Eq. (8) of `Barnaka1998 `_ to find the ellipticity bounds """ - param_names = ['theta_E', 'theta_c', 'eps', 'center_x', 'center_y'] + param_names = ["theta_E", "theta_c", "eps", "center_x", "center_y"] def __init__(self, diff=0.0000000001): self._diff = diff super(NIEPotentialMajorAxis, self).__init__() def function(self, x, y, theta_E, theta_c, eps): - f_ = theta_E*np.sqrt(theta_c**2+(1-eps)*x**2+(1+eps)*y**2) + f_ = theta_E * np.sqrt(theta_c**2 + (1 - eps) * x**2 + (1 + eps) * y**2) return f_ - + def derivatives(self, x, y, theta_E, theta_c, eps): - """ - returns df/dx and df/dy of the function - """ - factor = np.sqrt(theta_c**2+(1-eps)*x**2+(1+eps)*y**2) - f_x = (theta_E/factor)*(1-eps)*x - f_y = (theta_E/factor)*(1+eps)*y + """Returns df/dx and df/dy of the function.""" + factor = np.sqrt(theta_c**2 + (1 - eps) * x**2 + (1 + eps) * y**2) + f_x = (theta_E / factor) * (1 - eps) * x + f_y = (theta_E / factor) * (1 + eps) * y return f_x, f_y def hessian(self, x, y, theta_E, theta_c, eps): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - factor = np.sqrt(theta_c**2+(1-eps)*x**2+(1+eps)*y**2) - f_xx = (1-eps)*(theta_E/factor) -(theta_E/factor**3)*(1-eps)**2*x**2 - f_yy = (1+eps)*(theta_E/factor) -(theta_E/factor**3)*(1+eps)**2*y**2 - f_xy = -(theta_E/factor**3)*(1-eps**2)*x*y + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + factor = np.sqrt(theta_c**2 + (1 - eps) * x**2 + (1 + eps) * y**2) + f_xx = (1 - eps) * (theta_E / factor) - (theta_E / factor**3) * ( + 1 - eps + ) ** 2 * x**2 + f_yy = (1 + eps) * (theta_E / factor) - (theta_E / factor**3) * ( + 1 + eps + ) ** 2 * y**2 + f_xy = -(theta_E / factor**3) * (1 - eps**2) * x * y return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/numerical_deflections.py b/lenstronomy/LensModel/Profiles/numerical_deflections.py index 9190ac650..e8e482a1b 100644 --- a/lenstronomy/LensModel/Profiles/numerical_deflections.py +++ b/lenstronomy/LensModel/Profiles/numerical_deflections.py @@ -1,27 +1,26 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['TabulatedDeflections'] +__all__ = ["TabulatedDeflections"] class TabulatedDeflections(LensProfileBase): + """A user-defined class that returns deflection angles given a set of observed + coordinates on the sky (x, y). + This class has similar functionality as INTERPOL, with the difference being that the + interpolation for this class is done prior to class creation. When used with + routines in the lenstronomy.Sampling, this class effectively acts as a fixed lens + model with no keyword arguments. """ - A user-defined class that returns deflection angles given a set of observed coordinates on the sky (x, y). - This class has similar functionality as INTERPOL, with the difference being that the interpolation for this class is - done prior to class creation. When used with routines in the lenstronomy.Sampling, this class effectively acts as - a fixed lens model with no keyword arguments. - """ - - profile_name = 'TABULATED_DEFLECTIONS' + profile_name = "TABULATED_DEFLECTIONS" param_names = [] lower_limit_default = {} upper_limit_default = {} def __init__(self, custom_class): - """ :param custom_class: a user-defined class that has a __call___ method that returns deflection angles @@ -29,7 +28,7 @@ def __init__(self, custom_class): >>> custom_class = CustomLensingClass() >>> alpha_x, alpha_y = custom_class(x, y, **kwargs) - + or equivalently: >>> from lenstronomy.LensModel.lens_model import LensModel @@ -42,11 +41,9 @@ def __init__(self, custom_class): super(TabulatedDeflections, self).__init__() def function(self, x, y, center_x=0, center_y=0, **kwargs): - - raise Exception('no potential for this class.') + raise Exception("no potential for this class.") def derivatives(self, x, y, center_x=0, center_y=0, **kwargs): - """ :param x: x coordinate [arcsec] @@ -64,21 +61,23 @@ def derivatives(self, x, y, center_x=0, center_y=0, **kwargs): return f_x, f_y def hessian(self, x, y, center_x=0, center_y=0, **kwargs): - """ - Returns the components of the hessian matrix - :param x: x coordinate [arcsec] - :param y: y coordinate [arcsec] - :param center_x: the deflector x coordinate - :param center_y: the deflector y coordinate - :param kwargs: keyword arguments for the profile - :return: the derivatives of the deflection angles that make up the hessian matrix - """ + """Returns the components of the hessian matrix :param x: x coordinate [arcsec] + :param y: y coordinate [arcsec] :param center_x: the deflector x coordinate + :param center_y: the deflector y coordinate :param kwargs: keyword arguments for + the profile :return: the derivatives of the deflection angles that make up the + hessian matrix.""" diff = 1e-6 - alpha_ra, alpha_dec = self.derivatives(x, y, center_x=center_x, center_y=center_y, **kwargs) - - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, center_x=center_x, center_y=center_y, **kwargs) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, center_x=center_x, center_y=center_y, **kwargs) + alpha_ra, alpha_dec = self.derivatives( + x, y, center_x=center_x, center_y=center_y, **kwargs + ) + + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, center_x=center_x, center_y=center_y, **kwargs + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, center_x=center_x, center_y=center_y, **kwargs + ) dalpha_rara = (alpha_ra_dx - alpha_ra) / diff dalpha_radec = (alpha_ra_dy - alpha_ra) / diff diff --git a/lenstronomy/LensModel/Profiles/p_jaffe.py b/lenstronomy/LensModel/Profiles/p_jaffe.py index 006ae55ac..7bff44b3e 100644 --- a/lenstronomy/LensModel/Profiles/p_jaffe.py +++ b/lenstronomy/LensModel/Profiles/p_jaffe.py @@ -1,7 +1,7 @@ import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['PJaffe'] +__all__ = ["PJaffe"] class PJaffe(LensProfileBase): @@ -37,20 +37,29 @@ class to compute the DUAL PSEUDO ISOTHERMAL ELLIPTICAL MASS DISTRIBUTION """ - param_names = ['sigma0', 'Ra', 'Rs', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': 0, 'Ra': 0, 'Rs': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 10, 'Ra': 100, 'Rs': 100, 'center_x': 100, 'center_y': 100} + param_names = ["sigma0", "Ra", "Rs", "center_x", "center_y"] + lower_limit_default = { + "sigma0": 0, + "Ra": 0, + "Rs": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 10, + "Ra": 100, + "Rs": 100, + "center_x": 100, + "center_y": 100, + } def __init__(self): - """ - - """ + """""" LensProfileBase.__init__(self) self._s = 0.0001 def density(self, r, rho0, Ra, Rs): - """ - computes the density + """Computes the density. :param r: radial distance from the center (in 3D) :param rho0: density normalization (see class documentation above) @@ -63,8 +72,7 @@ def density(self, r, rho0, Ra, Rs): return rho def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0): - """ - projected density + """Projected density. :param x: projected coordinate on the sky :param y: projected coordinate on the sky @@ -80,12 +88,17 @@ def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) sigma0 = self.rho2sigma(rho0, Ra, Rs) - sigma = sigma0 * Ra * Rs / (Rs - Ra) * (1 / np.sqrt(Ra ** 2 + r ** 2) - 1 / np.sqrt(Rs ** 2 + r ** 2)) + sigma = ( + sigma0 + * Ra + * Rs + / (Rs - Ra) + * (1 / np.sqrt(Ra**2 + r**2) - 1 / np.sqrt(Rs**2 + r**2)) + ) return sigma def mass_3d(self, r, rho0, Ra, Rs): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: radial distance from the center (in 3D) :param rho0: density normalization (see class documentation above) @@ -93,12 +106,20 @@ def mass_3d(self, r, rho0, Ra, Rs): :param Rs: transition radius from logarithmic slope -2 to -4 :return: M( Ra + """Sorts Ra and Rs to make sure Rs > Ra. :param Ra: :param Rs: diff --git a/lenstronomy/LensModel/Profiles/p_jaffe_ellipse.py b/lenstronomy/LensModel/Profiles/p_jaffe_ellipse.py index cb8455f0d..9eaab8bad 100644 --- a/lenstronomy/LensModel/Profiles/p_jaffe_ellipse.py +++ b/lenstronomy/LensModel/Profiles/p_jaffe_ellipse.py @@ -3,7 +3,7 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np -__all__ = ['PJaffe_Ellipse'] +__all__ = ["PJaffe_Ellipse"] class PJaffe_Ellipse(LensProfileBase): @@ -39,9 +39,26 @@ class to compute the DUAL PSEUDO ISOTHERMAL ELLIPTICAL MASS DISTRIBUTION \\sigma_0 = \\frac{\\Sigma_0}{\\Sigma_{\\rm crit}} """ - param_names = ['sigma0', 'Ra', 'Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'sigma0': 0, 'Ra': 0, 'Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'sigma0': 10, 'Ra': 100, 'Rs': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["sigma0", "Ra", "Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "sigma0": 0, + "Ra": 0, + "Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "sigma0": 10, + "Ra": 100, + "Rs": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.spherical = PJaffe() @@ -49,42 +66,49 @@ def __init__(self): super(PJaffe_Ellipse, self).__init__() def function(self, x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0): - """ - returns double integral of NFW profile - """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + """Returns double integral of NFW profile.""" + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) f_ = self.spherical.function(x_, y_, sigma0, Ra, Rs) return f_ def derivatives(self, x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function (integral of NFW) - """ + """Returns df/dx and df/dy of the function (integral of NFW)""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) e = param_util.q2e(q) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - f_x_prim, f_y_prim = self.spherical.derivatives(x_, y_, sigma0, Ra, Rs, center_x=0, center_y=0) + f_x_prim, f_y_prim = self.spherical.derivatives( + x_, y_, sigma0, Ra, Rs, center_x=0, center_y=0 + ) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - alpha_ra, alpha_dec = self.derivatives(x, y, sigma0, Ra, Rs, e1, e2, center_x, center_y) + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + alpha_ra, alpha_dec = self.derivatives( + x, y, sigma0, Ra, Rs, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, sigma0, Ra, Rs, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, sigma0, Ra, Rs, e1, e2, center_x, center_y) - - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, sigma0, Ra, Rs, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, sigma0, Ra, Rs, e1, e2, center_x, center_y + ) + + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy diff --git a/lenstronomy/LensModel/Profiles/pemd.py b/lenstronomy/LensModel/Profiles/pemd.py index da3962cf4..e11858ed3 100644 --- a/lenstronomy/LensModel/Profiles/pemd.py +++ b/lenstronomy/LensModel/Profiles/pemd.py @@ -1,19 +1,18 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.spp import SPP from lenstronomy.LensModel.Profiles.spemd import SPEMD from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['PEMD'] +__all__ = ["PEMD"] class PEMD(LensProfileBase): - """ - class for power law ellipse mass density profile. - This class effectively calls the class SPEMD_SMOOTH with a fixed and very small central smoothing scale - to perform the numerical integral using the FASTELL code by Renan Barkana. - An alternative implementation of the same model using pure python with analytical functions is probided as 'EPL' - profile. + """Class for power law ellipse mass density profile. This class effectively calls + the class SPEMD_SMOOTH with a fixed and very small central smoothing scale to + perform the numerical integral using the FASTELL code by Renan Barkana. An + alternative implementation of the same model using pure python with analytical + functions is probided as 'EPL' profile. .. math:: \\kappa(x, y) = \\frac{3-\\gamma}{2} \\left(\\frac{\\theta_{E}}{\\sqrt{q x^2 + y^2/q}} \\right)^{\\gamma-1} @@ -37,12 +36,25 @@ class for power law ellipse mass density profile. .. math:: \\left(\\frac{\\theta'_{\\rm E}}{\\theta_{\\rm E}}\\right)^{2} = \\frac{2q}{1+q^2}. - - """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "gamma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 1.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 2.5, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, suppress_fastell=False): """ @@ -67,7 +79,9 @@ def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): :param center_y: y-position of lens center :return: lensing potential """ - return self.spemd_smooth.function(x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y) + return self.spemd_smooth.function( + x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y + ) def derivatives(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): """ @@ -82,7 +96,9 @@ def derivatives(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): :param center_y: y-position of lens center :return: deflection angles alpha_x, alpha_y """ - return self.spemd_smooth.derivatives(x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y) + return self.spemd_smooth.derivatives( + x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y + ) def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): """ @@ -97,24 +113,21 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): :param center_y: y-position of lens center :return: Hessian components f_xx, f_xy, f_yx, f_yy """ - return self.spemd_smooth.hessian(x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y) + return self.spemd_smooth.hessian( + x, y, theta_E, gamma, e1, e2, self._s_scale, center_x, center_y + ) def mass_3d_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the spherical power-law mass enclosed (with SPP routine) - :param r: radius within the mass is computed - :param theta_E: Einstein radius - :param gamma: power-law slope - :param e1: eccentricity component (not used) - :param e2: eccentricity component (not used) - :return: mass enclosed a 3D radius r - """ + """Computes the spherical power-law mass enclosed (with SPP routine) :param r: + radius within the mass is computed :param theta_E: Einstein radius :param gamma: + power-law slope :param e1: eccentricity component (not used) :param e2: + eccentricity component (not used) :return: mass enclosed a 3D radius r.""" return self.spp.mass_3d_lens(r, theta_E, gamma) def density_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius within the mass is computed :param theta_E: Einstein radius diff --git a/lenstronomy/LensModel/Profiles/point_mass.py b/lenstronomy/LensModel/Profiles/point_mass.py index 7ca96b5ab..be27178c3 100644 --- a/lenstronomy/LensModel/Profiles/point_mass.py +++ b/lenstronomy/LensModel/Profiles/point_mass.py @@ -1,22 +1,22 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['PointMass'] +__all__ = ["PointMass"] class PointMass(LensProfileBase): - """ - class to compute the physical deflection angle of a point mass, given as an Einstein radius - """ - param_names = ['theta_E', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'center_x': 100, 'center_y': 100} + """Class to compute the physical deflection angle of a point mass, given as an + Einstein radius.""" + + param_names = ["theta_E", "center_x", "center_y"] + lower_limit_default = {"theta_E": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"theta_E": 100, "center_x": 100, "center_y": 100} def __init__(self): - self.r_min = 10**(-25) + self.r_min = 10 ** (-25) super(PointMass, self).__init__() # alpha = 4*const.G * (mass*const.M_sun)/const.c**2/(r*const.Mpc) @@ -37,7 +37,7 @@ def function(self, x, y, theta_E, center_x=0, center_y=0): r = np.empty_like(a) r[a > self.r_min] = a[a > self.r_min] # in the SIS regime r[a <= self.r_min] = self.r_min - phi = theta_E**2*np.log(r) + phi = theta_E**2 * np.log(r) return phi def derivatives(self, x, y, theta_E, center_x=0, center_y=0): @@ -57,8 +57,8 @@ def derivatives(self, x, y, theta_E, center_x=0, center_y=0): r = np.empty_like(a) r[a > self.r_min] = a[a > self.r_min] # in the SIS regime r[a <= self.r_min] = self.r_min - alpha = theta_E**2/r - return alpha*x_/r, alpha*y_/r + alpha = theta_E**2 / r + return alpha * x_ / r, alpha * y_ / r def hessian(self, x, y, theta_E, center_x=0, center_y=0): """ @@ -78,7 +78,7 @@ def hessian(self, x, y, theta_E, center_x=0, center_y=0): r2 = np.empty_like(a) r2[a > self.r_min**2] = a[a > self.r_min**2] # in the SIS regime r2[a <= self.r_min**2] = self.r_min**2 - f_xx = C * (y_**2-x_**2)/r2**2 - f_yy = C * (x_**2-y_**2)/r2**2 - f_xy = -C * 2*x_*y_/r2**2 + f_xx = C * (y_**2 - x_**2) / r2**2 + f_yy = C * (x_**2 - y_**2) / r2**2 + f_xy = -C * 2 * x_ * y_ / r2**2 return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/sersic.py b/lenstronomy/LensModel/Profiles/sersic.py index ae8ed4a56..698409ea5 100644 --- a/lenstronomy/LensModel/Profiles/sersic.py +++ b/lenstronomy/LensModel/Profiles/sersic.py @@ -1,11 +1,11 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import lenstronomy.Util.util as util from lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Sersic'] +__all__ = ["Sersic"] class Sersic(SersicUtil, LensProfileBase): @@ -49,9 +49,22 @@ class Sersic(SersicUtil, LensProfileBase): >>> alpha_x, alpha_y = sersic.derivatives(x=1, y=1, k_eff=k_eff, R_sersic=R_sersic, center_x=0, center_y=0) """ - param_names = ['k_eff', 'R_sersic', 'n_sersic', 'center_x', 'center_y'] - lower_limit_default = {'k_eff': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'k_eff': 10, 'R_sersic': 100, 'n_sersic': 8, 'center_x': 100, 'center_y': 100} + + param_names = ["k_eff", "R_sersic", "n_sersic", "center_x", "center_y"] + lower_limit_default = { + "k_eff": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "k_eff": 10, + "R_sersic": 100, + "n_sersic": 8, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): """ @@ -69,16 +82,16 @@ def function(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): n = n_sersic x_red = self._x_reduced(x, y, n, R_sersic, center_x, center_y) b = self.b_n(n) - #hyper2f2_b = util.hyper2F2_array(2*n, 2*n, 1+2*n, 1+2*n, -b) - hyper2f2_bx = util.hyper2F2_array(2*n, 2*n, 1+2*n, 1+2*n, -b*x_red) - f_eff = np.exp(b) * R_sersic ** 2 / 2. * k_eff# * hyper2f2_b - f_ = f_eff * x_red**(2*n) * hyper2f2_bx# / hyper2f2_b + # hyper2f2_b = util.hyper2F2_array(2*n, 2*n, 1+2*n, 1+2*n, -b) + hyper2f2_bx = util.hyper2F2_array( + 2 * n, 2 * n, 1 + 2 * n, 1 + 2 * n, -b * x_red + ) + f_eff = np.exp(b) * R_sersic**2 / 2.0 * k_eff # * hyper2f2_b + f_ = f_eff * x_red ** (2 * n) * hyper2f2_bx # / hyper2f2_b return f_ def derivatives(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) @@ -92,9 +105,8 @@ def derivatives(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) @@ -102,11 +114,13 @@ def hessian(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): r = max(self._s, r) else: r[r < self._s] = self._s - d_alpha_dr = self.d_alpha_dr(x, y, n_sersic, R_sersic, k_eff, center_x, center_y) + d_alpha_dr = self.d_alpha_dr( + x, y, n_sersic, R_sersic, k_eff, center_x, center_y + ) alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y) - f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r - f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r - f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r + f_xx = -(d_alpha_dr / r + alpha / r**2) * x_**2 / r + alpha / r + f_yy = -(d_alpha_dr / r + alpha / r**2) * y_**2 / r + alpha / r + f_xy = -(d_alpha_dr / r + alpha / r**2) * x_ * y_ / r return f_xx, f_xy, f_xy, f_yy diff --git a/lenstronomy/LensModel/Profiles/sersic_ellipse_kappa.py b/lenstronomy/LensModel/Profiles/sersic_ellipse_kappa.py index 5de5be8a5..58c1cbff4 100644 --- a/lenstronomy/LensModel/Profiles/sersic_ellipse_kappa.py +++ b/lenstronomy/LensModel/Profiles/sersic_ellipse_kappa.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from scipy.integrate import quad import numpy as np @@ -6,30 +6,42 @@ from lenstronomy.Util import param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SersicEllipseKappa'] +__all__ = ["SersicEllipseKappa"] class SersicEllipseKappa(LensProfileBase): - """ - this class contains the function and the derivatives of an elliptical sersic profile - with the ellipticity introduced in the convergence (not the potential). + """This class contains the function and the derivatives of an elliptical sersic + profile with the ellipticity introduced in the convergence (not the potential). This requires the use of numerical integrals (Keeton 2004) """ - param_names = ['k_eff', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'k_eff': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, - 'center_y': -100} - upper_limit_default = {'k_eff': 10, 'R_sersic': 100, 'n_sersic': 8, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, - 'center_y': 100} - def __init__(self): + param_names = ["k_eff", "R_sersic", "n_sersic", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "k_eff": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "k_eff": 10, + "R_sersic": 100, + "n_sersic": 8, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + def __init__(self): self._sersic = Sersic() super(SersicEllipseKappa, self).__init__() def function(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0): - - raise Exception('not yet implemented') + raise Exception("not yet implemented") # phi_G, q = param_util.ellipticity2phi_q(e1, e2) # @@ -60,20 +72,28 @@ def function(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y # # return 0.5 * q * integral - def derivatives(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y = 0): - + def derivatives( + self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0 + ): phi_G, gam = param_util.shear_cartesian2polar(e1, e2) - q = max(1-gam, 0.00001) + q = max(1 - gam, 0.00001) x, y = self._coord_rotate(x, y, phi_G, center_x, center_y) if isinstance(x, float) and isinstance(y, float): - - alpha_x, alpha_y = self._compute_derivative_atcoord(x, y, n_sersic, R_sersic, k_eff, - phi_G, q, center_x=center_x, center_y = center_y) + alpha_x, alpha_y = self._compute_derivative_atcoord( + x, + y, + n_sersic, + R_sersic, + k_eff, + phi_G, + q, + center_x=center_x, + center_y=center_y, + ) else: - assert isinstance(x, np.ndarray) or isinstance(x, list) assert isinstance(y, np.ndarray) or isinstance(y, list) x = np.array(x) @@ -87,11 +107,20 @@ def derivatives(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, cente phiG = np.ones_like(alpha_x) * float(phi_G) q = np.ones_like(alpha_x) * float(q) - for i, (x_i, y_i, phi_i, q_i) in \ - enumerate(zip(x.ravel(), y.ravel(), phiG.ravel(), q.ravel())): - - fxi, fyi = self._compute_derivative_atcoord(x_i, y_i, n_sersic, R_sersic, k_eff, - phi_i, q_i, center_x=center_x, center_y = center_y) + for i, (x_i, y_i, phi_i, q_i) in enumerate( + zip(x.ravel(), y.ravel(), phiG.ravel(), q.ravel()) + ): + fxi, fyi = self._compute_derivative_atcoord( + x_i, + y_i, + n_sersic, + R_sersic, + k_eff, + phi_i, + q_i, + center_x=center_x, + center_y=center_y, + ) alpha_x[i], alpha_y[i] = fxi, fyi @@ -102,66 +131,83 @@ def derivatives(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, cente return alpha_x, alpha_y - def hessian(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y = 0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - alpha_ra, alpha_dec = self.derivatives(x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) + def hessian(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0): + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + alpha_ra, alpha_dec = self.derivatives( + x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) diff = 0.000001 - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) - - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) + + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy - def projected_mass(self, x, y, q, n_sersic, R_sersic, k_eff, u = 1, power = 1): - + def projected_mass(self, x, y, q, n_sersic, R_sersic, k_eff, u=1, power=1): b_n = self._sersic.b_n(n_sersic) elliptical_coord = self._elliptical_coord_u(x, y, u, q) ** power - elliptical_coord *= R_sersic ** -power + elliptical_coord *= R_sersic**-power - exponent = -b_n * (elliptical_coord**(1./n_sersic) - 1) + exponent = -b_n * (elliptical_coord ** (1.0 / n_sersic) - 1) return k_eff * np.exp(exponent) def _integrand_J(self, u, x, y, n_sersic, q, R_sersic, k_eff, n_integral): - - kappa = self.projected_mass(x, y, q, n_sersic, R_sersic, k_eff, u = u, power=1) + kappa = self.projected_mass(x, y, q, n_sersic, R_sersic, k_eff, u=u, power=1) power = -(n_integral + 0.5) - return kappa * (1 - (1 - q**2)*u) ** power + return kappa * (1 - (1 - q**2) * u) ** power def _integrand_I(self, u, x, y, q, n_sersic, R_sersic, keff, centerx, centery): - ellip_coord = self._elliptical_coord_u(x, y, u, q) - def_angle_circular = self._sersic.alpha_abs(ellip_coord, 0, n_sersic, R_sersic, keff, centerx, centery) - - return ellip_coord * def_angle_circular * (1 - (1-q**2)*u) ** -0.5 * u ** -1 - - def _compute_derivative_atcoord(self, x, y, n_sersic, R_sersic, k_eff, phi_G, q, center_x=0, center_y = 0): - - alpha_x = x * q * quad(self._integrand_J, 0, 1, args=(x, y, n_sersic, q, R_sersic, k_eff, 0))[0] - alpha_y = y * q * quad(self._integrand_J, 0, 1, args=(x, y, n_sersic, q, R_sersic, k_eff, 1))[0] + def_angle_circular = self._sersic.alpha_abs( + ellip_coord, 0, n_sersic, R_sersic, keff, centerx, centery + ) + + return ( + ellip_coord * def_angle_circular * (1 - (1 - q**2) * u) ** -0.5 * u**-1 + ) + + def _compute_derivative_atcoord( + self, x, y, n_sersic, R_sersic, k_eff, phi_G, q, center_x=0, center_y=0 + ): + alpha_x = ( + x + * q + * quad( + self._integrand_J, 0, 1, args=(x, y, n_sersic, q, R_sersic, k_eff, 0) + )[0] + ) + alpha_y = ( + y + * q + * quad( + self._integrand_J, 0, 1, args=(x, y, n_sersic, q, R_sersic, k_eff, 1) + )[0] + ) return alpha_x, alpha_y @staticmethod def _elliptical_coord_u(x, y, u, q): - fac = 1 - (1 - q**2) * u - return (u * (x**2 + y**2 * fac**-1) )**0.5 + return (u * (x**2 + y**2 * fac**-1)) ** 0.5 @staticmethod def _coord_rotate(x, y, phi_G, center_x, center_y): - x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) diff --git a/lenstronomy/LensModel/Profiles/sersic_ellipse_potential.py b/lenstronomy/LensModel/Profiles/sersic_ellipse_potential.py index 15a9495d4..330c1359b 100644 --- a/lenstronomy/LensModel/Profiles/sersic_ellipse_potential.py +++ b/lenstronomy/LensModel/Profiles/sersic_ellipse_potential.py @@ -1,21 +1,38 @@ -__author__ = 'sibirrer' -#this file contains a class to make a gaussian +__author__ = "sibirrer" +# this file contains a class to make a gaussian import numpy as np from lenstronomy.LensModel.Profiles.sersic import Sersic import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SersicEllipse'] +__all__ = ["SersicEllipse"] class SersicEllipse(LensProfileBase): """ this class contains functions to evaluate a Sersic mass profile: https://arxiv.org/pdf/astro-ph/0311559.pdf """ - param_names = ['k_eff', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'k_eff': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'k_eff': 10, 'R_sersic': 100, 'n_sersic': 8, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["k_eff", "R_sersic", "n_sersic", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "k_eff": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "k_eff": 10, + "R_sersic": 100, + "n_sersic": 8, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.sersic = Sersic() @@ -23,45 +40,52 @@ def __init__(self): super(SersicEllipse, self).__init__() def function(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0): - """ - returns Gaussian - """ + """Returns Gaussian.""" # phi_G, q = param_util.ellipticity2phi_q(e1, e2) - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) # x_, y_ = self._coord_transf(x, y, q, phi_G, center_x, center_y) f_ = self.sersic.function(x_, y_, n_sersic, R_sersic, k_eff) return f_ - def derivatives(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + def derivatives( + self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0 + ): + """Returns df/dx and df/dy of the function.""" phi_G, q = param_util.ellipticity2phi_q(e1, e2) e = param_util.q2e(q) # e = abs(1. - q) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) # x_, y_ = self._coord_transf(x, y, q, phi_G, center_x, center_y) f_x_prim, f_y_prim = self.sersic.derivatives(x_, y_, n_sersic, R_sersic, k_eff) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ - alpha_ra, alpha_dec = self.derivatives(x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" + alpha_ra, alpha_dec = self.derivatives( + x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y) + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y + ) - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy diff --git a/lenstronomy/LensModel/Profiles/sersic_utils.py b/lenstronomy/LensModel/Profiles/sersic_utils.py index c184d7553..f9a624a8f 100644 --- a/lenstronomy/LensModel/Profiles/sersic_utils.py +++ b/lenstronomy/LensModel/Profiles/sersic_utils.py @@ -3,11 +3,10 @@ import scipy from lenstronomy.Util import param_util -__all__ = ['SersicUtil'] +__all__ = ["SersicUtil"] class SersicUtil(object): - _s = 0.00001 def __init__(self, smoothing=_s, sersic_major_axis=False): @@ -23,38 +22,36 @@ def __init__(self, smoothing=_s, sersic_major_axis=False): self._sersic_major_axis = sersic_major_axis def k_bn(self, n, Re): - """ - returns normalisation of the sersic profile such that Re is the half light radius given n_sersic slope - """ + """Returns normalisation of the sersic profile such that Re is the half light + radius given n_sersic slope.""" bn = self.b_n(n) - k = bn*Re**(-1./n) + k = bn * Re ** (-1.0 / n) return k, bn def k_Re(self, n, k): - """ - - """ + """""" bn = self.b_n(n) - Re = (bn/k)**n + Re = (bn / k) ** n return Re @staticmethod def b_n(n): - """ - b(n) computation. This is the approximation of the exact solution to the relation, - 2*incomplete_gamma_function(2n; b_n) = Gamma_function(2*n). + """B(n) computation. This is the approximation of the exact solution to the + relation, 2*incomplete_gamma_function(2n; b_n) = Gamma_function(2*n). :param n: the sersic index :return: b(n) """ - bn = 1.9992*n - 0.3271 - bn = np.maximum(bn, 0.00001) # make sure bn is strictly positive as a save guard for very low n_sersic + bn = 1.9992 * n - 0.3271 + bn = np.maximum( + bn, 0.00001 + ) # make sure bn is strictly positive as a save guard for very low n_sersic return bn def get_distance_from_center(self, x, y, e1, e2, center_x, center_y): - """ - Get the distance from the center of Sersic, accounting for orientation and axis ratio - :param x: + """Get the distance from the center of Sersic, accounting for orientation and + axis ratio :param x: + :param y: :param e1: eccentricity :param e2: eccentricity @@ -68,19 +65,20 @@ def get_distance_from_center(self, x, y, e1, e2, center_x, center_y): y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - xt1 = cos_phi*x_shift+sin_phi*y_shift - xt2 = -sin_phi*x_shift+cos_phi*y_shift - xt2difq2 = xt2/(q*q) - r = np.sqrt(xt1*xt1+xt2*xt2difq2) + xt1 = cos_phi * x_shift + sin_phi * y_shift + xt2 = -sin_phi * x_shift + cos_phi * y_shift + xt2difq2 = xt2 / (q * q) + r = np.sqrt(xt1 * xt1 + xt2 * xt2difq2) else: - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) r = np.sqrt(x_**2 + y_**2) return r def _x_reduced(self, x, y, n_sersic, r_eff, center_x, center_y): - """ - coordinate transform to normalized radius - :param x: + """Coordinate transform to normalized radius :param x: + :param y: :param center_x: :param center_y: @@ -93,19 +91,25 @@ def _x_reduced(self, x, y, n_sersic, r_eff, center_x, center_y): r = max(self._s, r) else: r[r < self._s] = self._s - x_reduced = (r/r_eff)**(1./n_sersic) + x_reduced = (r / r_eff) ** (1.0 / n_sersic) return x_reduced def _alpha_eff(self, r_eff, n_sersic, k_eff): - """ - deflection angle at r_eff - :param r_eff: + """Deflection angle at r_eff :param r_eff: + :param n_sersic: :param k_eff: :return: """ b = self.b_n(n_sersic) - alpha_eff = n_sersic * r_eff * k_eff * b**(-2*n_sersic) * np.exp(b) * special.gamma(2*n_sersic) + alpha_eff = ( + n_sersic + * r_eff + * k_eff + * b ** (-2 * n_sersic) + * np.exp(b) + * special.gamma(2 * n_sersic) + ) return -alpha_eff def alpha_abs(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0): @@ -124,7 +128,7 @@ def alpha_abs(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0): x_red = self._x_reduced(x, y, n_sersic, r_eff, center_x, center_y) b = self.b_n(n_sersic) a_eff = self._alpha_eff(r_eff, n_sersic, k_eff) - alpha = 2. * a_eff * x_red ** (-n) * (special.gammainc(2 * n, b * x_red)) + alpha = 2.0 * a_eff * x_red ** (-n) * (special.gammainc(2 * n, b * x_red)) return alpha def d_alpha_dr(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0): @@ -144,21 +148,19 @@ def d_alpha_dr(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) alpha = self.alpha_abs(r, 0, n_sersic, r_eff, k_eff) - alpha_dr = self.alpha_abs(r+_dr, 0, n_sersic, r_eff, k_eff) - d_alpha_dr = (alpha_dr - alpha)/_dr + alpha_dr = self.alpha_abs(r + _dr, 0, n_sersic, r_eff, k_eff) + d_alpha_dr = (alpha_dr - alpha) / _dr return d_alpha_dr def density(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0): - """ - de-projection of the Sersic profile based on - Prugniel & Simien (1997) - :return: - """ - raise ValueError("not implemented! Use a Multi-Gaussian-component decomposition.") + """De-projection of the Sersic profile based on Prugniel & Simien (1997) + :return:""" + raise ValueError( + "not implemented! Use a Multi-Gaussian-component decomposition." + ) def _total_flux(self, r_eff, I_eff, n_sersic): - """ - computes total flux of a round Sersic profile + """Computes total flux of a round Sersic profile. :param r_eff: projected half light radius :param I_eff: surface brightness at r_eff (in same units as r_eff) @@ -166,13 +168,22 @@ def _total_flux(self, r_eff, I_eff, n_sersic): :return: integrated flux to infinity """ bn = self.b_n(n_sersic) - return I_eff * r_eff**2 * 2 * np.pi * n_sersic * np.exp(bn) / bn**(2*n_sersic) * scipy.special.gamma(2*n_sersic) + return ( + I_eff + * r_eff**2 + * 2 + * np.pi + * n_sersic + * np.exp(bn) + / bn ** (2 * n_sersic) + * scipy.special.gamma(2 * n_sersic) + ) def total_flux(self, amp, R_sersic, n_sersic, e1=0, e2=0, **kwargs): - """ - computes analytical integral to compute total flux of the Sersic profile + """Computes analytical integral to compute total flux of the Sersic profile. - :param amp: amplitude parameter in Sersic function (surface brightness at R_sersic + :param amp: amplitude parameter in Sersic function (surface brightness at + R_sersic :param R_sersic: half-light radius in semi-major axis :param n_sersic: Sersic index :param e1: eccentricity @@ -189,14 +200,13 @@ def total_flux(self, amp, R_sersic, n_sersic, e1=0, e2=0, **kwargs): return self._total_flux(r_eff=r_eff, I_eff=amp, n_sersic=n_sersic) def _R_stable(self, R): - """ - Floor R_ at self._smoothing for numerical stability - :param R: radius - :return: smoothed and stabilized radius - """ + """Floor R_ at self._smoothing for numerical stability :param R: radius :return: + smoothed and stabilized radius.""" return np.maximum(self._smoothing, R) - def _r_sersic(self, R, R_sersic, n_sersic, max_R_frac=1000.0, alpha=1.0, R_break=0.0): + def _r_sersic( + self, R, R_sersic, n_sersic, max_R_frac=1000.0, alpha=1.0, R_break=0.0 + ): """ :param R: radius (array or float) @@ -214,11 +224,11 @@ def _r_sersic(self, R, R_sersic, n_sersic, max_R_frac=1000.0, alpha=1.0, R_break if R_frac > max_R_frac: result = 0 else: - exponent = -bn * (R_frac ** (1. / n_sersic) - 1.) + exponent = -bn * (R_frac ** (1.0 / n_sersic) - 1.0) result = np.exp(exponent) else: R_frac_real = R_frac[R_frac <= max_R_frac] - exponent = -bn * (R_frac_real ** (1. / n_sersic) - 1.) + exponent = -bn * (R_frac_real ** (1.0 / n_sersic) - 1.0) result = np.zeros_like(R_) result[R_frac <= max_R_frac] = np.exp(exponent) return np.nan_to_num(result) diff --git a/lenstronomy/LensModel/Profiles/shapelet_pot_cartesian.py b/lenstronomy/LensModel/Profiles/shapelet_pot_cartesian.py index 8215c1cd2..24c80afe8 100644 --- a/lenstronomy/LensModel/Profiles/shapelet_pot_cartesian.py +++ b/lenstronomy/LensModel/Profiles/shapelet_pot_cartesian.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # description of the polar shapelets in potential space @@ -7,16 +7,21 @@ import numpy.polynomial.hermite as hermite from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['CartShapelets'] +__all__ = ["CartShapelets"] class CartShapelets(LensProfileBase): - """ - this class contains the function and the derivatives of the cartesian shapelets - """ - param_names = ['coeffs', 'beta', 'center_x', 'center_y'] - lower_limit_default = {'coeffs': [0], 'beta': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'coeffs': [100], 'beta': 100, 'center_x': 100, 'center_y': 100} + """This class contains the function and the derivatives of the cartesian + shapelets.""" + + param_names = ["coeffs", "beta", "center_x", "center_y"] + lower_limit_default = {"coeffs": [0], "beta": 0, "center_x": -100, "center_y": -100} + upper_limit_default = { + "coeffs": [100], + "beta": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, coeffs, beta, center_x=0, center_y=0): shapelets = self._createShapelet(coeffs) @@ -30,9 +35,7 @@ def function(self, x, y, coeffs, beta, center_x=0, center_y=0): return f_ def derivatives(self, x, y, coeffs, beta, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" shapelets = self._createShapelet(coeffs) n_order = self._get_num_n(len(coeffs)) dx_shapelets = self._dx_shapelets(shapelets, beta) @@ -42,15 +45,14 @@ def derivatives(self, x, y, coeffs, beta, center_x=0, center_y=0): f_x = self._shapeletOutput(x, y, beta, dx_shapelets, precalc=False) f_y = self._shapeletOutput(x, y, beta, dy_shapelets, precalc=False) else: - H_x, H_y = self.pre_calc(x, y, beta, n_order+1, center_x, center_y) + H_x, H_y = self.pre_calc(x, y, beta, n_order + 1, center_x, center_y) f_x = self._shapeletOutput(H_x, H_y, beta, dx_shapelets) f_y = self._shapeletOutput(H_x, H_y, beta, dy_shapelets) return f_x, f_y def hessian(self, x, y, coeffs, beta, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" shapelets = self._createShapelet(coeffs) n_order = self._get_num_n(len(coeffs)) dxx_shapelets = self._dxx_shapelets(shapelets, beta) @@ -62,30 +64,29 @@ def hessian(self, x, y, coeffs, beta, center_x=0, center_y=0): f_yy = self._shapeletOutput(x, y, beta, dyy_shapelets, precalc=False) f_xy = self._shapeletOutput(x, y, beta, dxy_shapelets, precalc=False) else: - H_x, H_y = self.pre_calc(x, y, beta, n_order+2, center_x, center_y) + H_x, H_y = self.pre_calc(x, y, beta, n_order + 2, center_x, center_y) f_xx = self._shapeletOutput(H_x, H_y, beta, dxx_shapelets) f_yy = self._shapeletOutput(H_x, H_y, beta, dyy_shapelets) f_xy = self._shapeletOutput(H_x, H_y, beta, dxy_shapelets) return f_xx, f_xy, f_xy, f_yy def _createShapelet(self, coeffs): - """ - returns a shapelet array out of the coefficients *a, up to order l + """Returns a shapelet array out of the coefficients *a, up to order l. :param num_l: order of shapelets :type num_l: int. :param coeff: shapelet coefficients :type coeff: floats - :returns: complex array + :returns: complex array :raises: AttributeError, KeyError """ n_coeffs = len(coeffs) num_n = self._get_num_n(n_coeffs) - shapelets=np.zeros((num_n+1, num_n+1)) + shapelets = np.zeros((num_n + 1, num_n + 1)) n = 0 k = 0 for coeff in coeffs: - shapelets[n-k][k] = coeff + shapelets[n - k][k] = coeff k += 1 if k == n + 1: n += 1 @@ -93,26 +94,24 @@ def _createShapelet(self, coeffs): return shapelets def _shapeletOutput(self, x, y, beta, shapelets, precalc=True): - """ - returns the the numerical values of a set of shapelets at polar coordinates - :param shapelets: set of shapelets [l=,r=,a_lr=] - :type shapelets: array of size (n,3) - :param coordPolar: set of coordinates in polar units - :type coordPolar: array of size (n,2) - :returns: array of same size with coords [r,phi] - :raises: AttributeError, KeyError - """ + """Returns the the numerical values of a set of shapelets at polar coordinates + :param shapelets: set of shapelets [l=,r=,a_lr=] :type shapelets: array of size + (n,3) :param coordPolar: set of coordinates in polar units :type coordPolar: + array of size (n,2) :returns: array of same size with coords [r,phi] :raises: + AttributeError, KeyError.""" n = len(np.atleast_1d(x)) if n <= 1: - values = 0. + values = 0.0 else: values = np.zeros(len(x[0])) n = 0 k = 0 i = 0 num_n = len(shapelets) - while i < num_n * (num_n+1)/2: - values += self._function(x, y, shapelets[n-k][k], beta, n-k, k, precalc=precalc) + while i < num_n * (num_n + 1) / 2: + values += self._function( + x, y, shapelets[n - k][k], beta, n - k, k, precalc=precalc + ) k += 1 if k == n + 1: n += 1 @@ -135,41 +134,39 @@ def _function(self, x, y, amp, beta, n1, n2, center_x=0, center_y=0, precalc=Fal return amp * x[n1] * y[n2] / beta x_ = x - center_x y_ = y - center_y - return amp * self.phi_n(n1, x_/beta) * self.phi_n(n2, y_/beta) /beta + return amp * self.phi_n(n1, x_ / beta) * self.phi_n(n2, y_ / beta) / beta def _dx_shapelets(self, shapelets, beta): - """ - computes the derivative d/dx of the shapelet coeffs - :param shapelets: + """Computes the derivative d/dx of the shapelet coeffs :param shapelets: + :param beta: :return: """ num_n = len(shapelets) - dx = np.zeros((num_n+1, num_n+1)) + dx = np.zeros((num_n + 1, num_n + 1)) for n1 in range(num_n): for n2 in range(num_n): amp = shapelets[n1][n2] - dx[n1+1][n2] -= np.sqrt((n1+1)/2.) * amp + dx[n1 + 1][n2] -= np.sqrt((n1 + 1) / 2.0) * amp if n1 > 0: - dx[n1-1][n2] += np.sqrt(n1/2.) * amp - return dx/beta + dx[n1 - 1][n2] += np.sqrt(n1 / 2.0) * amp + return dx / beta def _dy_shapelets(self, shapelets, beta): - """ - computes the derivative d/dx of the shapelet coeffs - :param shapelets: + """Computes the derivative d/dx of the shapelet coeffs :param shapelets: + :param beta: :return: """ num_n = len(shapelets) - dy = np.zeros((num_n+1, num_n+1)) + dy = np.zeros((num_n + 1, num_n + 1)) for n1 in range(num_n): for n2 in range(num_n): amp = shapelets[n1][n2] - dy[n1][n2+1] -= np.sqrt((n2+1)/2.) * amp + dy[n1][n2 + 1] -= np.sqrt((n2 + 1) / 2.0) * amp if n2 > 0: - dy[n1][n2-1] += np.sqrt(n2/2.) * amp - return dy/beta + dy[n1][n2 - 1] += np.sqrt(n2 / 2.0) * amp + return dy / beta def _dxx_shapelets(self, shapelets, beta): dx_shapelets = self._dx_shapelets(shapelets, beta) @@ -184,38 +181,37 @@ def _dxy_shapelets(self, shapelets, beta): return self._dx_shapelets(dy_shapelets, beta) def H_n(self, n, x): - """ - constructs the Hermite polynomial of order n at position x (dimensionless) + """Constructs the Hermite polynomial of order n at position x (dimensionless) :param n: The n'the basis function. :type name: int. :param x: 1-dim position (dimensionless) :type state: float or numpy array. - :returns: array-- H_n(x). + :returns: array-- H_n(x). :raises: AttributeError, KeyError """ - n_array = np.zeros(n+1) + n_array = np.zeros(n + 1) n_array[n] = 1 - return hermite.hermval(x, n_array, tensor=False) #attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) + return hermite.hermval( + x, n_array, tensor=False + ) # attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) - def phi_n(self,n,x): - """ - constructs the 1-dim basis function (formula (1) in Refregier et al. 2001) + def phi_n(self, n, x): + """Constructs the 1-dim basis function (formula (1) in Refregier et al. 2001) :param n: The n'the basis function. :type name: int. :param x: 1-dim position (dimensionless) :type state: float or numpy array. - :returns: array-- phi_n(x). + :returns: array-- phi_n(x). :raises: AttributeError, KeyError """ - prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) - return prefactor*self.H_n(n,x)*np.exp(-x**2/2.) + prefactor = 1.0 / np.sqrt(2**n * np.sqrt(np.pi) * math.factorial(n)) + return prefactor * self.H_n(n, x) * np.exp(-(x**2) / 2.0) def pre_calc(self, x, y, beta, n_order, center_x, center_y): - """ - calculates the H_n(x) and H_n(y) for a given x-array and y-array - :param x: + """Calculates the H_n(x) and H_n(y) for a given x-array and y-array :param x: + :param y: :param amp: :param beta: @@ -228,14 +224,22 @@ def pre_calc(self, x, y, beta, n_order, center_x, center_y): n = len(np.atleast_1d(x)) x_ = x - center_x y_ = y - center_y - H_x = np.empty((n_order+1, n)) - H_y = np.empty((n_order+1, n)) - for n in range(0,n_order+1): - prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) - n_array = np.zeros(n+1) + H_x = np.empty((n_order + 1, n)) + H_y = np.empty((n_order + 1, n)) + for n in range(0, n_order + 1): + prefactor = 1.0 / np.sqrt(2**n * np.sqrt(np.pi) * math.factorial(n)) + n_array = np.zeros(n + 1) n_array[n] = 1 - H_x[n] = hermite.hermval(x_/beta, n_array) * prefactor * np.exp(-(x_/beta)**2/2.) - H_y[n] = hermite.hermval(y_/beta, n_array) * prefactor * np.exp(-(y_/beta)**2/2.) + H_x[n] = ( + hermite.hermval(x_ / beta, n_array) + * prefactor + * np.exp(-((x_ / beta) ** 2) / 2.0) + ) + H_y[n] = ( + hermite.hermval(y_ / beta, n_array) + * prefactor + * np.exp(-((y_ / beta) ** 2) / 2.0) + ) return H_x, H_y def _get_num_n(self, n_coeffs): @@ -244,5 +248,5 @@ def _get_num_n(self, n_coeffs): :param n_coeffs: number of coeffs :return: number of n_l of order of the shapelets """ - num_n = round((math.sqrt(8*n_coeffs + 1) -1)/2. +0.499) + num_n = round((math.sqrt(8 * n_coeffs + 1) - 1) / 2.0 + 0.499) return int(num_n) diff --git a/lenstronomy/LensModel/Profiles/shapelet_pot_polar.py b/lenstronomy/LensModel/Profiles/shapelet_pot_polar.py index 2595d8a03..1e3f3923d 100644 --- a/lenstronomy/LensModel/Profiles/shapelet_pot_polar.py +++ b/lenstronomy/LensModel/Profiles/shapelet_pot_polar.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # description of the polar shapelets in potential space @@ -9,16 +9,21 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['PolarShapelets'] +__all__ = ["PolarShapelets"] class PolarShapelets(LensProfileBase): - """ - this class contains the function and the derivatives of the Singular Isothermal Sphere - """ - param_names = ['coeffs', 'beta', 'center_x', 'center_y'] - lower_limit_default = {'coeffs': [0], 'beta': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'coeffs': [100], 'beta': 100, 'center_x': 100, 'center_y': 100} + """This class contains the function and the derivatives of the Singular Isothermal + Sphere.""" + + param_names = ["coeffs", "beta", "center_x", "center_y"] + lower_limit_default = {"coeffs": [0], "beta": 0, "center_x": -100, "center_y": -100} + upper_limit_default = { + "coeffs": [100], + "beta": 100, + "center_x": 100, + "center_y": 100, + } def __init__(self): n = 10 @@ -35,9 +40,7 @@ def function(self, x, y, coeffs, beta, center_x=0, center_y=0): return f_ def derivatives(self, x, y, coeffs, beta, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" shapelets = self._createShapelet(coeffs) r, phi = param_util.cart2polar(x, y, center_x=center_x, center_y=center_y) alpha1_shapelets, alpha2_shapelets = self._alphaShapelets(shapelets, beta) @@ -46,183 +49,193 @@ def derivatives(self, x, y, coeffs, beta, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, coeffs, beta, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" shapelets = self._createShapelet(coeffs) r, phi = param_util.cart2polar(x, y, center_x=center_x, center_y=center_y) - kappa_shapelets=self._kappaShapelets(shapelets, beta) - gamma1_shapelets, gamma2_shapelets=self._gammaShapelets(shapelets, beta) - kappa_value=self._shapeletOutput(r, phi, beta, kappa_shapelets) - gamma1_value=self._shapeletOutput(r, phi, beta, gamma1_shapelets) - gamma2_value=self._shapeletOutput(r, phi, beta, gamma2_shapelets) + kappa_shapelets = self._kappaShapelets(shapelets, beta) + gamma1_shapelets, gamma2_shapelets = self._gammaShapelets(shapelets, beta) + kappa_value = self._shapeletOutput(r, phi, beta, kappa_shapelets) + gamma1_value = self._shapeletOutput(r, phi, beta, gamma1_shapelets) + gamma2_value = self._shapeletOutput(r, phi, beta, gamma2_shapelets) f_xx = kappa_value + gamma1_value f_xy = gamma2_value f_yy = kappa_value - gamma1_value return f_xx, f_xy, f_xy, f_yy - def _createShapelet(self,coeff): - """ - returns a shapelet array out of the coefficients *a, up to order l + def _createShapelet(self, coeff): + """Returns a shapelet array out of the coefficients *a, up to order l. :param num_l: order of shapelets :type num_l: int. :param coeff: shapelet coefficients :type coeff: floats - :returns: complex array + :returns: complex array :raises: AttributeError, KeyError """ n_coeffs = len(coeff) num_l = self._get_num_l(n_coeffs) - shapelets=np.zeros((num_l+1,num_l+1),'complex') - nl=0 - k=0 - i=0 + shapelets = np.zeros((num_l + 1, num_l + 1), "complex") + nl = 0 + k = 0 + i = 0 while i < len(coeff): - if i%2==0: - shapelets[nl][k]+=coeff[i]/2. - shapelets[k][nl]+=coeff[i]/2. - if k==nl: - nl+=1 - k=0 - i+=1 + if i % 2 == 0: + shapelets[nl][k] += coeff[i] / 2.0 + shapelets[k][nl] += coeff[i] / 2.0 + if k == nl: + nl += 1 + k = 0 + i += 1 continue else: - k+=1 - i+=1 + k += 1 + i += 1 continue else: - shapelets[nl][k] += 1j*coeff[i]/2. - shapelets[k][nl] -= 1j*coeff[i]/2. - i+=1 + shapelets[nl][k] += 1j * coeff[i] / 2.0 + shapelets[k][nl] -= 1j * coeff[i] / 2.0 + i += 1 return shapelets def _shapeletOutput(self, r, phi, beta, shapelets): - """ - returns the the numerical values of a set of shapelets at polar coordinates - :param shapelets: set of shapelets [l=,r=,a_lr=] - :type shapelets: array of size (n,3) - :param coordPolar: set of coordinates in polar units - :type coordPolar: array of size (n,2) - :returns: array of same size with coords [r,phi] - :raises: AttributeError, KeyError - """ - if type(r) == float or type(r) == int or type(r) == type(np.float64(1)) or len(r) <= 1: - values = 0. + """Returns the the numerical values of a set of shapelets at polar coordinates + :param shapelets: set of shapelets [l=,r=,a_lr=] :type shapelets: array of size + (n,3) :param coordPolar: set of coordinates in polar units :type coordPolar: + array of size (n,2) :returns: array of same size with coords [r,phi] :raises: + AttributeError, KeyError.""" + if ( + type(r) == float + or type(r) == int + or type(r) == type(np.float64(1)) + or len(r) <= 1 + ): + values = 0.0 else: - values = np.zeros(len(r), 'complex') - for nl in range(0,len(shapelets)): #sum over different shapelets - for nr in range(0,len(shapelets)): - value = shapelets[nl][nr]*self._chi_lr(r, phi, nl, nr, beta) + values = np.zeros(len(r), "complex") + for nl in range(0, len(shapelets)): # sum over different shapelets + for nr in range(0, len(shapelets)): + value = shapelets[nl][nr] * self._chi_lr(r, phi, nl, nr, beta) values += value return values.real def _chi_lr(self, r, phi, nl, nr, beta): - """ - computes the generalized polar basis function in the convention of Massey&Refregier eqn 8 + """Computes the generalized polar basis function in the convention of + Massey&Refregier eqn 8. :param nl: left basis :type nl: int :param nr: right basis :type nr: int - :param beta: beta --the characteristic scale typically choosen to be close to the size of the object. + :param beta: beta --the characteristic scale typically choosen to be close to + the size of the object. :type beta: float. :param coord: coordinates [r,phi] :type coord: array(n,2) - :returns: values at positions of coordinates. + :returns: values at positions of coordinates. :raises: AttributeError, KeyError """ - m=int((nr-nl).real) - n=int((nr+nl).real) - p=int((n-abs(m))/2) - p2=int((n+abs(m))/2) - q=int(abs(m)) - if p % 2==0: #if p is even - prefac=1 + m = int((nr - nl).real) + n = int((nr + nl).real) + p = int((n - abs(m)) / 2) + p2 = int((n + abs(m)) / 2) + q = int(abs(m)) + if p % 2 == 0: # if p is even + prefac = 1 else: - prefac=-1 - prefactor=prefac/beta**(abs(m)+1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2))) - poly=self.poly[p][q] - return prefactor*r**q*poly((r/beta)**2)*np.exp(-(r/beta)**2/2)*np.exp(-1j*m*phi) + prefac = -1 + prefactor = ( + prefac + / beta ** (abs(m) + 1) + * np.sqrt(math.factorial(p) / (np.pi * math.factorial(p2))) + ) + poly = self.poly[p][q] + return ( + prefactor + * r**q + * poly((r / beta) ** 2) + * np.exp(-((r / beta) ** 2) / 2) + * np.exp(-1j * m * phi) + ) def _kappaShapelets(self, shapelets, beta): - """ - calculates the convergence kappa given lensing potential shapelet coefficients (laplacian/2) - :param shapelets: set of shapelets [l=,r=,a_lr=] - :type shapelets: array of size (n,3) - :returns: set of kappa shapelets. + """Calculates the convergence kappa given lensing potential shapelet + coefficients (laplacian/2) :param shapelets: set of shapelets [l=,r=,a_lr=] + :type shapelets: array of size (n,3) :returns: set of kappa shapelets. + :raises: AttributeError, KeyError """ - output=np.zeros((len(shapelets)+1,len(shapelets)+1),'complex') - for nl in range(0,len(shapelets)): - for nr in range(0,len(shapelets)): - a_lr=shapelets[nl][nr] - if nl>0: - output[nl-1][nr+1]+=a_lr*np.sqrt(nl*(nr+1))/2 - if nr>0: - output[nl-1][nr-1]+=a_lr*np.sqrt(nl*nr)/2 - output[nl+1][nr+1]+=a_lr*np.sqrt((nl+1)*(nr+1))/2 - if nr>0: - output[nl+1][nr-1]+=a_lr*np.sqrt((nl+1)*nr)/2 - return output/beta**2 - - def _alphaShapelets(self,shapelets, beta): - """ - calculates the deflection angles given lensing potential shapelet coefficients (laplacian/2) - :param shapelets: set of shapelets [l=,r=,a_lr=] - :type shapelets: array of size (n,3) - :returns: set of alpha shapelets. + output = np.zeros((len(shapelets) + 1, len(shapelets) + 1), "complex") + for nl in range(0, len(shapelets)): + for nr in range(0, len(shapelets)): + a_lr = shapelets[nl][nr] + if nl > 0: + output[nl - 1][nr + 1] += a_lr * np.sqrt(nl * (nr + 1)) / 2 + if nr > 0: + output[nl - 1][nr - 1] += a_lr * np.sqrt(nl * nr) / 2 + output[nl + 1][nr + 1] += a_lr * np.sqrt((nl + 1) * (nr + 1)) / 2 + if nr > 0: + output[nl + 1][nr - 1] += a_lr * np.sqrt((nl + 1) * nr) / 2 + return output / beta**2 + + def _alphaShapelets(self, shapelets, beta): + """Calculates the deflection angles given lensing potential shapelet + coefficients (laplacian/2) :param shapelets: set of shapelets [l=,r=,a_lr=] + :type shapelets: array of size (n,3) :returns: set of alpha shapelets. + :raises: AttributeError, KeyError """ - output_x = np.zeros((len(shapelets)+1, len(shapelets)+1), 'complex') - output_y = np.zeros((len(shapelets)+1, len(shapelets)+1), 'complex') - for nl in range(0,len(shapelets)): - for nr in range(0,len(shapelets)): - a_lr=shapelets[nl][nr] - output_x[nl][nr+1]-=a_lr*np.sqrt(nr+1)/2 - output_y[nl][nr+1]-=a_lr*np.sqrt(nr+1)/2*1j - output_x[nl+1][nr]-=a_lr*np.sqrt(nl+1)/2 - output_y[nl+1][nr]+=a_lr*np.sqrt(nl+1)/2*1j - if nl>0: - output_x[nl-1][nr]+=a_lr*np.sqrt(nl)/2 - output_y[nl-1][nr]-=a_lr*np.sqrt(nl)/2*1j - if nr>0: - output_x[nl][nr-1]+=a_lr*np.sqrt(nr)/2 - output_y[nl][nr-1]+=a_lr*np.sqrt(nr)/2*1j - return output_x/beta,output_y/beta #attention complex numbers!!!! - - def _gammaShapelets(self,shapelets, beta): - """ - calculates the shear gamma given lensing potential shapelet coefficients - :param shapelets: set of shapelets [l=,r=,a_lr=] - :type shapelets: array of size (n,3) - :returns: set of alpha shapelets. + output_x = np.zeros((len(shapelets) + 1, len(shapelets) + 1), "complex") + output_y = np.zeros((len(shapelets) + 1, len(shapelets) + 1), "complex") + for nl in range(0, len(shapelets)): + for nr in range(0, len(shapelets)): + a_lr = shapelets[nl][nr] + output_x[nl][nr + 1] -= a_lr * np.sqrt(nr + 1) / 2 + output_y[nl][nr + 1] -= a_lr * np.sqrt(nr + 1) / 2 * 1j + output_x[nl + 1][nr] -= a_lr * np.sqrt(nl + 1) / 2 + output_y[nl + 1][nr] += a_lr * np.sqrt(nl + 1) / 2 * 1j + if nl > 0: + output_x[nl - 1][nr] += a_lr * np.sqrt(nl) / 2 + output_y[nl - 1][nr] -= a_lr * np.sqrt(nl) / 2 * 1j + if nr > 0: + output_x[nl][nr - 1] += a_lr * np.sqrt(nr) / 2 + output_y[nl][nr - 1] += a_lr * np.sqrt(nr) / 2 * 1j + return output_x / beta, output_y / beta # attention complex numbers!!!! + + def _gammaShapelets(self, shapelets, beta): + """Calculates the shear gamma given lensing potential shapelet coefficients + :param shapelets: set of shapelets [l=,r=,a_lr=] :type shapelets: array of size + (n,3) :returns: set of alpha shapelets. + :raises: AttributeError, KeyError """ - output_x = np.zeros((len(shapelets)+2,len(shapelets)+2),'complex') - output_y = np.zeros((len(shapelets)+2,len(shapelets)+2),'complex') + output_x = np.zeros((len(shapelets) + 2, len(shapelets) + 2), "complex") + output_y = np.zeros((len(shapelets) + 2, len(shapelets) + 2), "complex") for nl in range(0, len(shapelets)): for nr in range(0, len(shapelets)): a_lr = shapelets[nl][nr] - output_x[nl+2][nr] += a_lr*np.sqrt((nl+1)*(nl+2))/2 - output_x[nl][nr+2] += a_lr*np.sqrt((nr+1)*(nr+2))/2 - output_x[nl][nr] += a_lr*(1-(nr+1)-(nl+1)) - if nl>1: - output_x[nl-2][nr] += a_lr*np.sqrt((nl)*(nl-1))/2 - if nr>1: - output_x[nl][nr-2] += a_lr*np.sqrt((nr)*(nr-1))/2 - - output_y[nl+2][nr] += a_lr*np.sqrt((nl+1)*(nl+2))*1j/4 - output_y[nl][nr+2] -= a_lr*np.sqrt((nr+1)*(nr+2))*1j/4 - if nl>0: - output_y[nl-1][nr+1] += a_lr*np.sqrt((nl)*(nr+1))*1j/2 - if nr>0: - output_y[nl+1][nr-1] -= a_lr*np.sqrt((nr)*(nl+1))*1j/2 - if nl>1: - output_y[nl-2][nr] -= a_lr*np.sqrt((nl)*(nl-1))*1j/4 - if nr>1: - output_y[nl][nr-2] += a_lr*np.sqrt((nr)*(nr-1))*1j/4 - return output_x/beta**2, output_y/beta**2 #attention complex numbers!!!! + output_x[nl + 2][nr] += a_lr * np.sqrt((nl + 1) * (nl + 2)) / 2 + output_x[nl][nr + 2] += a_lr * np.sqrt((nr + 1) * (nr + 2)) / 2 + output_x[nl][nr] += a_lr * (1 - (nr + 1) - (nl + 1)) + if nl > 1: + output_x[nl - 2][nr] += a_lr * np.sqrt((nl) * (nl - 1)) / 2 + if nr > 1: + output_x[nl][nr - 2] += a_lr * np.sqrt((nr) * (nr - 1)) / 2 + + output_y[nl + 2][nr] += a_lr * np.sqrt((nl + 1) * (nl + 2)) * 1j / 4 + output_y[nl][nr + 2] -= a_lr * np.sqrt((nr + 1) * (nr + 2)) * 1j / 4 + if nl > 0: + output_y[nl - 1][nr + 1] += a_lr * np.sqrt((nl) * (nr + 1)) * 1j / 2 + if nr > 0: + output_y[nl + 1][nr - 1] -= a_lr * np.sqrt((nr) * (nl + 1)) * 1j / 2 + if nl > 1: + output_y[nl - 2][nr] -= a_lr * np.sqrt((nl) * (nl - 1)) * 1j / 4 + if nr > 1: + output_y[nl][nr - 2] += a_lr * np.sqrt((nr) * (nr - 1)) * 1j / 4 + return ( + output_x / beta**2, + output_y / beta**2, + ) # attention complex numbers!!!! @staticmethod def _get_num_l(n_coeffs): @@ -231,5 +244,5 @@ def _get_num_l(n_coeffs): :param n_coeffs: number of coeffs :return: number of n_l of order of the shapelets """ - num_l = int(round((math.sqrt(8*n_coeffs + 9)-3)/2 +0.499)) + num_l = int(round((math.sqrt(8 * n_coeffs + 9) - 3) / 2 + 0.499)) return num_l diff --git a/lenstronomy/LensModel/Profiles/shear.py b/lenstronomy/LensModel/Profiles/shear.py index 75a607fc4..08598970d 100644 --- a/lenstronomy/LensModel/Profiles/shear.py +++ b/lenstronomy/LensModel/Profiles/shear.py @@ -1,20 +1,19 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.LensModel.Profiles.convergence import Convergence import numpy as np -__all__ = ['Shear', 'ShearGammaPsi', 'ShearReduced'] +__all__ = ["Shear", "ShearGammaPsi", "ShearReduced"] class Shear(LensProfileBase): - """ - class for external shear gamma1, gamma2 expression - """ - param_names = ['gamma1', 'gamma2', 'ra_0', 'dec_0'] - lower_limit_default = {'gamma1': -0.5, 'gamma2': -0.5, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'gamma1': 0.5, 'gamma2': 0.5, 'ra_0': 100, 'dec_0': 100} + """Class for external shear gamma1, gamma2 expression.""" + + param_names = ["gamma1", "gamma2", "ra_0", "dec_0"] + lower_limit_default = {"gamma1": -0.5, "gamma2": -0.5, "ra_0": -100, "dec_0": -100} + upper_limit_default = {"gamma1": 0.5, "gamma2": 0.5, "ra_0": 100, "dec_0": 100} def function(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): """ @@ -29,7 +28,7 @@ def function(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): """ x_ = x - ra_0 y_ = y - dec_0 - f_ = 1/2. * (gamma1 * x_ * x_ + 2 * gamma2 * x_ * y_ - gamma1 * y_ * y_) + f_ = 1 / 2.0 * (gamma1 * x_ * x_ + 2 * gamma2 * x_ * y_ - gamma1 * y_ * y_) return f_ def derivatives(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): @@ -79,9 +78,15 @@ class to model a shear field with shear strength and direction. The translation \\gamma_2 = \\gamma_{ext} \\sin(2 \\phi_{ext}) """ - param_names = ['gamma_ext', 'psi_ext', 'ra_0', 'dec_0'] - lower_limit_default = {'gamma_ext': 0, 'psi_ext': -np.pi, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'gamma_ext': 1, 'psi_ext': np.pi, 'ra_0': 100, 'dec_0': 100} + + param_names = ["gamma_ext", "psi_ext", "ra_0", "dec_0"] + lower_limit_default = { + "gamma_ext": 0, + "psi_ext": -np.pi, + "ra_0": -100, + "dec_0": -100, + } + upper_limit_default = {"gamma_ext": 1, "psi_ext": np.pi, "ra_0": 100, "dec_0": 100} def __init__(self): self._shear_e1e2 = Shear() @@ -100,8 +105,8 @@ def function(x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0): :return: """ # change to polar coordinate - r, phi = param_util.cart2polar(x-ra_0, y-dec_0) - f_ = 1. / 2 * gamma_ext * r ** 2 * np.cos(2 * (phi - psi_ext)) + r, phi = param_util.cart2polar(x - ra_0, y - dec_0) + f_ = 1.0 / 2 * gamma_ext * r**2 * np.cos(2 * (phi - psi_ext)) return f_ def derivatives(self, x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0): @@ -115,10 +120,9 @@ def hessian(self, x, y, gamma_ext, psi_ext, ra_0=0, dec_0=0): class ShearReduced(LensProfileBase): - """ - reduced shear distortions :math:`\\gamma' = \\gamma / (1-\\kappa)`. - This distortion keeps the magnification as unity and, thus, does not change the size of apparent objects. - To keep the magnification at unity, it requires + """Reduced shear distortions :math:`\\gamma' = \\gamma / (1-\\kappa)`. This + distortion keeps the magnification as unity and, thus, does not change the size of + apparent objects. To keep the magnification at unity, it requires. .. math:: (1-\\kappa)^2) - \\gamma_1^2 - \\gamma_2^ = 1 @@ -126,9 +130,10 @@ class ShearReduced(LensProfileBase): Thus, for given pair of reduced shear :math:`(\\gamma'_1, \\gamma'_2)`, an additional convergence term is calculated and added to the lensing distortions. """ - param_names = ['gamma1', 'gamma2', 'ra_0', 'dec_0'] - lower_limit_default = {'gamma1': -0.5, 'gamma2': -0.5, 'ra_0': -100, 'dec_0': -100} - upper_limit_default = {'gamma1': 0.5, 'gamma2': 0.5, 'ra_0': 100, 'dec_0': 100} + + param_names = ["gamma1", "gamma2", "ra_0", "dec_0"] + lower_limit_default = {"gamma1": -0.5, "gamma2": -0.5, "ra_0": -100, "dec_0": -100} + upper_limit_default = {"gamma1": 0.5, "gamma2": 0.5, "ra_0": 100, "dec_0": 100} def __init__(self): self._shear = Shear() @@ -137,16 +142,15 @@ def __init__(self): @staticmethod def _kappa_reduced(gamma1, gamma2): - """ - compute convergence such that magnification is unity + """Compute convergence such that magnification is unity. :param gamma1: reduced shear :param gamma2: reduced shear :return: kappa """ - kappa = 1 - 1. / np.sqrt(1 - gamma1**2 - gamma2**2) - gamma1_ = (1-kappa) * gamma1 - gamma2_ = (1-kappa) * gamma2 + kappa = 1 - 1.0 / np.sqrt(1 - gamma1**2 - gamma2**2) + gamma1_ = (1 - kappa) * gamma1 + gamma2_ = (1 - kappa) * gamma2 return kappa, gamma1_, gamma2_ def function(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): @@ -177,7 +181,9 @@ def derivatives(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): :return: deflection angles """ kappa, gamma1_, gamma2_ = self._kappa_reduced(gamma1, gamma2) - f_x_shear, f_y_shear = self._shear.derivatives(x, y, gamma1_, gamma2_, ra_0, dec_0) + f_x_shear, f_y_shear = self._shear.derivatives( + x, y, gamma1_, gamma2_, ra_0, dec_0 + ) f_x_kappa, f_y_kappa = self._convergence.derivatives(x, y, kappa, ra_0, dec_0) return f_x_shear + f_x_kappa, f_y_shear + f_y_kappa @@ -193,8 +199,12 @@ def hessian(self, x, y, gamma1, gamma2, ra_0=0, dec_0=0): :return: f_xx, f_xy, f_yx, f_yy """ kappa, gamma1_, gamma2_ = self._kappa_reduced(gamma1, gamma2) - f_xx_g, f_xy_g, f_yx_g, f_yy_g = self._shear.hessian(x, y, gamma1_, gamma2_, ra_0, dec_0) - f_xx_k, f_xy_k, f_yx_k, f_yy_k = self._convergence.hessian(x, y, kappa, ra_0, dec_0) + f_xx_g, f_xy_g, f_yx_g, f_yy_g = self._shear.hessian( + x, y, gamma1_, gamma2_, ra_0, dec_0 + ) + f_xx_k, f_xy_k, f_yx_k, f_yy_k = self._convergence.hessian( + x, y, kappa, ra_0, dec_0 + ) f_xx = f_xx_g + f_xx_k f_yy = f_yy_g + f_yy_k f_xy = f_xy_g + f_xy_k diff --git a/lenstronomy/LensModel/Profiles/sie.py b/lenstronomy/LensModel/Profiles/sie.py index ef2cc3932..f8b97e811 100644 --- a/lenstronomy/LensModel/Profiles/sie.py +++ b/lenstronomy/LensModel/Profiles/sie.py @@ -1,12 +1,11 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np -__all__ = ['SIE'] +__all__ = ["SIE"] class SIE(LensProfileBase): - """ - class for singular isothermal ellipsoid (SIS with ellipticity) + """Class for singular isothermal ellipsoid (SIS with ellipticity) .. math:: \\kappa(x, y) = \\frac{1}{2} \\left(\\frac{\\theta_{E}}{\\sqrt{q x^2 + y^2/q}} \\right) @@ -29,11 +28,23 @@ class for singular isothermal ellipsoid (SIS with ellipticity) .. math:: \\left(\\frac{\\theta'_{\\rm E}}{\\theta_{\\rm E}}\\right)^{2} = \\frac{2q}{1+q^2}. - """ - param_names = ['theta_E', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self, NIE=True): """ @@ -43,9 +54,11 @@ def __init__(self, NIE=True): self._nie = NIE if NIE: from lenstronomy.LensModel.Profiles.nie import NIE + self.profile = NIE() else: from lenstronomy.LensModel.Profiles.epl import EPL + self.profile = EPL() self._s_scale = 0.0000000001 self._gamma = 2 @@ -64,9 +77,13 @@ def function(self, x, y, theta_E, e1, e2, center_x=0, center_y=0): :return: """ if self._nie: - return self.profile.function(x, y, theta_E, e1, e2, self._s_scale, center_x, center_y) + return self.profile.function( + x, y, theta_E, e1, e2, self._s_scale, center_x, center_y + ) else: - return self.profile.function(x, y, theta_E, self._gamma, e1, e2, center_x, center_y) + return self.profile.function( + x, y, theta_E, self._gamma, e1, e2, center_x, center_y + ) def derivatives(self, x, y, theta_E, e1, e2, center_x=0, center_y=0): """ @@ -81,9 +98,13 @@ def derivatives(self, x, y, theta_E, e1, e2, center_x=0, center_y=0): :return: """ if self._nie: - return self.profile.derivatives(x, y, theta_E, e1, e2, self._s_scale, center_x, center_y) + return self.profile.derivatives( + x, y, theta_E, e1, e2, self._s_scale, center_x, center_y + ) else: - return self.profile.derivatives(x, y, theta_E, self._gamma, e1, e2, center_x, center_y) + return self.profile.derivatives( + x, y, theta_E, self._gamma, e1, e2, center_x, center_y + ) def hessian(self, x, y, theta_E, e1, e2, center_x=0, center_y=0): """ @@ -98,14 +119,18 @@ def hessian(self, x, y, theta_E, e1, e2, center_x=0, center_y=0): :return: """ if self._nie: - return self.profile.hessian(x, y, theta_E, e1, e2, self._s_scale, center_x, center_y) + return self.profile.hessian( + x, y, theta_E, e1, e2, self._s_scale, center_x, center_y + ) else: - return self.profile.hessian(x, y, theta_E, self._gamma, e1, e2, center_x, center_y) + return self.profile.hessian( + x, y, theta_E, self._gamma, e1, e2, center_x, center_y + ) @staticmethod def theta2rho(theta_E): - """ - converts projected density parameter (in units of deflection) into 3d density parameter + """Converts projected density parameter (in units of deflection) into 3d density + parameter. :param theta_E: :return: @@ -116,8 +141,7 @@ def theta2rho(theta_E): @staticmethod def mass_3d(r, rho0, e1=0, e2=0): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: radius in angular units :param rho0: density at angle=1 @@ -127,8 +151,8 @@ def mass_3d(r, rho0, e1=0, e2=0): return mass_3d def mass_3d_lens(self, r, theta_E, e1=0, e2=0): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units. :param r: radius in angular units :param theta_E: Einstein radius @@ -138,8 +162,7 @@ def mass_3d_lens(self, r, theta_E, e1=0, e2=0): return self.mass_3d(r, rho0) def mass_2d(self, r, rho0, e1=0, e2=0): - """ - mass enclosed projected 2d sphere of radius r + """Mass enclosed projected 2d sphere of radius r. :param r: :param rho0: @@ -147,7 +170,7 @@ def mass_2d(self, r, rho0, e1=0, e2=0): :param e2: :return: """ - alpha = 2 * rho0 * np.pi ** 2 + alpha = 2 * rho0 * np.pi**2 mass_2d = alpha * r return mass_2d @@ -164,8 +187,7 @@ def mass_2d_lens(self, r, theta_E, e1=0, e2=0): return self.mass_2d(r, rho0) def grav_pot(self, x, y, rho0, e1=0, e2=0, center_x=0, center_y=0): - """ - gravitational potential (modulo 4 pi G and rho0 in appropriate units) + """Gravitational potential (modulo 4 pi G and rho0 in appropriate units) :param x: :param y: @@ -180,13 +202,13 @@ def grav_pot(self, x, y, rho0, e1=0, e2=0, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) mass_3d = self.mass_3d(r, rho0) - pot = mass_3d/r + pot = mass_3d / r return pot def density_lens(self, r, theta_E, e1=0, e2=0): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius in angles :param theta_E: Einstein radius @@ -199,8 +221,7 @@ def density_lens(self, r, theta_E, e1=0, e2=0): @staticmethod def density(r, rho0, e1=0, e2=0): - """ - computes the density + """Computes the density. :param r: radius in angles :param rho0: density at angle=1 @@ -211,8 +232,7 @@ def density(r, rho0, e1=0, e2=0): @staticmethod def density_2d(x, y, rho0, e1=0, e2=0, center_x=0, center_y=0): - """ - projected density + """Projected density. :param x: :param y: diff --git a/lenstronomy/LensModel/Profiles/sis.py b/lenstronomy/LensModel/Profiles/sis.py index 4709b381c..6ab7db7af 100644 --- a/lenstronomy/LensModel/Profiles/sis.py +++ b/lenstronomy/LensModel/Profiles/sis.py @@ -1,40 +1,36 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SIS'] +__all__ = ["SIS"] class SIS(LensProfileBase): - """ - this class contains the function and the derivatives of the Singular Isothermal Sphere - + """This class contains the function and the derivatives of the Singular Isothermal + Sphere. .. math:: \\kappa(x, y) = \\frac{1}{2} \\left(\\frac{\\theta_{E}}{\\sqrt{x^2 + y^2}} \\right) with :math:`\\theta_{E}` is the Einstein radius, - - """ - param_names = ['theta_E', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'center_x': 100, 'center_y': 100} + + param_names = ["theta_E", "center_x", "center_y"] + lower_limit_default = {"theta_E": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"theta_E": 100, "center_x": 100, "center_y": 100} def function(self, x, y, theta_E, center_x=0, center_y=0): x_shift = x - center_x y_shift = y - center_y - f_ = theta_E * np.sqrt(x_shift*x_shift + y_shift*y_shift) + f_ = theta_E * np.sqrt(x_shift * x_shift + y_shift * y_shift) return f_ def derivatives(self, x, y, theta_E, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" x_shift = x - center_x y_shift = y - center_y - R = np.sqrt(x_shift*x_shift + y_shift*y_shift) + R = np.sqrt(x_shift * x_shift + y_shift * y_shift) if isinstance(R, int) or isinstance(R, float): a = theta_E / max(0.000001, R) else: @@ -47,30 +43,28 @@ def derivatives(self, x, y, theta_E, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, theta_E, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" x_shift = x - center_x y_shift = y - center_y - R = (x_shift*x_shift + y_shift*y_shift)**(3./2) + R = (x_shift * x_shift + y_shift * y_shift) ** (3.0 / 2) if isinstance(R, int) or isinstance(R, float): prefac = theta_E / max(0.000001, R) else: prefac = np.empty_like(R) r = R[R > 0] # in the SIS regime - prefac[R == 0] = 0. + prefac[R == 0] = 0.0 prefac[R > 0] = theta_E / r - f_xx = y_shift*y_shift * prefac - f_yy = x_shift*x_shift * prefac - f_xy = -x_shift*y_shift * prefac + f_xx = y_shift * y_shift * prefac + f_yy = x_shift * x_shift * prefac + f_xy = -x_shift * y_shift * prefac return f_xx, f_xy, f_xy, f_yy @staticmethod def rho2theta(rho0): - """ - converts 3d density into 2d projected density parameter - :param rho0: + """Converts 3d density into 2d projected density parameter :param rho0: + :return: """ theta_E = np.pi * 2 * rho0 @@ -78,29 +72,22 @@ def rho2theta(rho0): @staticmethod def theta2rho(theta_E): - """ - converts projected density parameter (in units of deflection) into 3d density parameter - :param theta_E: Einstein radius - :return: - """ + """Converts projected density parameter (in units of deflection) into 3d density + parameter :param theta_E: Einstein radius :return:""" fac1 = np.pi * 2 rho0 = theta_E / fac1 return rho0 @staticmethod def mass_3d(r, rho0): - """ - mass enclosed a 3d sphere or radius r - :param r: radius in angular units - :param rho0: density at angle=1 - :return: mass in angular units - """ + """Mass enclosed a 3d sphere or radius r :param r: radius in angular units + :param rho0: density at angle=1 :return: mass in angular units.""" mass_3d = 4 * np.pi * rho0 * r return mass_3d def mass_3d_lens(self, r, theta_E): - """ - mass enclosed a 3d sphere or radius r given a lens parameterization with angular units + """Mass enclosed a 3d sphere or radius r given a lens parameterization with + angular units. :param r: radius in angular units :param theta_E: Einstein radius @@ -111,14 +98,13 @@ def mass_3d_lens(self, r, theta_E): @staticmethod def mass_2d(r, rho0): - """ - mass enclosed projected 2d sphere of radius r - :param r: + """Mass enclosed projected 2d sphere of radius r :param r: + :param rho0: :return: """ - alpha = 2 * rho0 * np.pi ** 2 - mass_2d = alpha*r + alpha = 2 * rho0 * np.pi**2 + mass_2d = alpha * r return mass_2d def mass_2d_lens(self, r, theta_E): @@ -132,9 +118,9 @@ def mass_2d_lens(self, r, theta_E): return self.mass_2d(r, rho0) def grav_pot(self, x, y, rho0, center_x=0, center_y=0): - """ - gravitational potential (modulo 4 pi G and rho0 in appropriate units) - :param x: + """Gravitational potential (modulo 4 pi G and rho0 in appropriate units) :param + x: + :param y: :param rho0: :param center_x: @@ -145,24 +131,20 @@ def grav_pot(self, x, y, rho0, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) mass_3d = self.mass_3d(r, rho0) - pot = mass_3d/r + pot = mass_3d / r return pot @staticmethod def density(r, rho0): - """ - computes the density - :param r: radius in angles - :param rho0: density at angle=1 - :return: density at r - """ + """Computes the density :param r: radius in angles :param rho0: density at + angle=1 :return: density at r.""" rho = rho0 / r**2 return rho def density_lens(self, r, theta_E): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in projected in units of angles (i.e. arc seconds) results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in projected in units of angles (i.e. arc seconds) results in the + convergence quantity. :param r: 3d radius :param theta_E: Einstein radius @@ -173,9 +155,8 @@ def density_lens(self, r, theta_E): @staticmethod def density_2d(x, y, rho0, center_x=0, center_y=0): - """ - projected density - :param x: + """Projected density :param x: + :param y: :param rho0: :param center_x: diff --git a/lenstronomy/LensModel/Profiles/sis_truncate.py b/lenstronomy/LensModel/Profiles/sis_truncate.py index 82e45bb84..e47bb8c01 100644 --- a/lenstronomy/LensModel/Profiles/sis_truncate.py +++ b/lenstronomy/LensModel/Profiles/sis_truncate.py @@ -1,42 +1,56 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SIS_truncate'] +__all__ = ["SIS_truncate"] class SIS_truncate(LensProfileBase): - """ - this class contains the function and the derivatives of the Singular Isothermal Sphere - """ - param_names = ['theta_E', 'r_trunc', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'r_trunc': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'r_trunc': 100, 'center_x': 100, 'center_y': 100} + """This class contains the function and the derivatives of the Singular Isothermal + Sphere.""" + + param_names = ["theta_E", "r_trunc", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "r_trunc": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "r_trunc": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, theta_E, r_trunc, center_x=0, center_y=0): x_shift = x - center_x y_shift = y - center_y - r = np.sqrt(x_shift*x_shift + y_shift*y_shift) + r = np.sqrt(x_shift * x_shift + y_shift * y_shift) if isinstance(r, int) or isinstance(r, float): if r < r_trunc: f_ = theta_E * r - elif r < 2*r_trunc: - f_ = theta_E * r_trunc + 1. / 2 * theta_E * (3 - r / r_trunc) * (r - r_trunc) + elif r < 2 * r_trunc: + f_ = theta_E * r_trunc + 1.0 / 2 * theta_E * (3 - r / r_trunc) * ( + r - r_trunc + ) else: - f_ = 3./2 * theta_E * r_trunc + f_ = 3.0 / 2 * theta_E * r_trunc else: f_ = np.zeros_like(r) f_[r < r_trunc] = theta_E * r[r < r_trunc] - r_ = r[(r < 2*r_trunc) & (r > r_trunc)] - f_[(r < 2*r_trunc) & (r > r_trunc)] = theta_E * r_trunc + 1. / 2 * theta_E * (3 - r_ / r_trunc) * (r_ - r_trunc) - f_[r > 2*r_trunc] = 3./2 * theta_E * r_trunc + r_ = r[(r < 2 * r_trunc) & (r > r_trunc)] + f_[ + (r < 2 * r_trunc) & (r > r_trunc) + ] = theta_E * r_trunc + 1.0 / 2 * theta_E * (3 - r_ / r_trunc) * ( + r_ - r_trunc + ) + f_[r > 2 * r_trunc] = 3.0 / 2 * theta_E * r_trunc return f_ def derivatives(self, x, y, theta_E, r_trunc, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function - """ + """Returns df/dx and df/dy of the function.""" x_shift = x - center_x y_shift = y - center_y @@ -47,18 +61,17 @@ def derivatives(self, x, y, theta_E, r_trunc, center_x=0, center_y=0): return f_x, f_y def hessian(self, x, y, theta_E, r_trunc, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 - """ + """Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx, + d^f/dy^2.""" x_shift = x - center_x y_shift = y - center_y dphi_dr = self._dphi_dr(x_shift, y_shift, theta_E, r_trunc) d2phi_dr2 = self._d2phi_dr2(x_shift, y_shift, theta_E, r_trunc) dr_dx, dr_dy = self._dr_dx(x, y) d2r_dx2, d2r_dy2, d2r_dxy = self._d2r_dx2(x_shift, y_shift) - f_xx = d2r_dx2*dphi_dr + dr_dx**2*d2phi_dr2 - f_yy = d2r_dy2*dphi_dr + dr_dy**2*d2phi_dr2 - f_xy = d2r_dxy*dphi_dr + dr_dx*dr_dy*d2phi_dr2 + f_xx = d2r_dx2 * dphi_dr + dr_dx**2 * d2phi_dr2 + f_yy = d2r_dy2 * dphi_dr + dr_dy**2 * d2phi_dr2 + f_xy = d2r_dxy * dphi_dr + dr_dx * dr_dy * d2phi_dr2 return f_xx, f_xy, f_xy, f_yy def _dphi_dr(self, x, y, theta_E, r_trunc): @@ -69,52 +82,50 @@ def _dphi_dr(self, x, y, theta_E, r_trunc): :param r_trunc: :return: """ - r = np.sqrt(x*x + y*y) + r = np.sqrt(x * x + y * y) if isinstance(r, int) or isinstance(r, float): if r == 0: a = 0 elif r < r_trunc: a = theta_E - elif r < 2*r_trunc: + elif r < 2 * r_trunc: a = theta_E * (2 - r / r_trunc) else: a = 0 else: a = np.zeros_like(r) a[(r < r_trunc) & (r > 0)] = theta_E - r_ = r[(r < 2*r_trunc) & (r >= r_trunc)] - a[(r < 2*r_trunc) & (r >= r_trunc)] = theta_E * (2 - r_ / r_trunc) - a[r >= 2*r_trunc] = 0 + r_ = r[(r < 2 * r_trunc) & (r >= r_trunc)] + a[(r < 2 * r_trunc) & (r >= r_trunc)] = theta_E * (2 - r_ / r_trunc) + a[r >= 2 * r_trunc] = 0 return a def _d2phi_dr2(self, x, y, theta_E, r_trunc): - """ - second derivative of the potential in radial direction - :param x: + """Second derivative of the potential in radial direction :param x: + :param y: :param theta_E: :param r_trunc: :return: """ - r = np.sqrt(x*x + y*y) + r = np.sqrt(x * x + y * y) if isinstance(r, int) or isinstance(r, float): if r < r_trunc: a = 0 - elif r < 2*r_trunc: + elif r < 2 * r_trunc: a = -theta_E / r_trunc else: a = 0 else: a = np.zeros_like(r) a[r < r_trunc] = 0 - a[(r < 2*r_trunc) & (r > r_trunc)] = -theta_E / r_trunc - a[r > 2*r_trunc] = 0 + a[(r < 2 * r_trunc) & (r > r_trunc)] = -theta_E / r_trunc + a[r > 2 * r_trunc] = 0 return a def _dr_dx(self, x, y): - """ - derivative of dr/dx, dr/dy - :param x: + """Derivative of dr/dx, dr/dy :param x: + :param y: :return: """ @@ -124,14 +135,13 @@ def _dr_dx(self, x, y): if r == 0: r = 1 else: - r[r == 0] = 1 - return x/r, y/r + r[r == 0] = 1 + return x / r, y / r @staticmethod def _d2r_dx2(x, y): - """ - second derivative - :param x: + """Second derivative :param x: + :param y: :return: """ @@ -140,5 +150,5 @@ def _d2r_dx2(x, y): if r == 0: r = 1 else: - r[r == 0] = 1 - return y**2/r**3, x**2/r**3, -x*y/r**3 + r[r == 0] = 1 + return y**2 / r**3, x**2 / r**3, -x * y / r**3 diff --git a/lenstronomy/LensModel/Profiles/spemd.py b/lenstronomy/LensModel/Profiles/spemd.py index 1dfe8deb3..c6d6003ff 100644 --- a/lenstronomy/LensModel/Profiles/spemd.py +++ b/lenstronomy/LensModel/Profiles/spemd.py @@ -1,17 +1,17 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SPEMD'] +__all__ = ["SPEMD"] class SPEMD(LensProfileBase): - """ - class for smooth power law ellipse mass density profile (SPEMD). This class effectively performs the FASTELL calculations - by Renan Barkana. The parameters are changed and represent a spherically averaged Einstein radius an a logarithmic - 3D mass profile slope. + """Class for smooth power law ellipse mass density profile (SPEMD). This class + effectively performs the FASTELL calculations by Renan Barkana. The parameters are + changed and represent a spherically averaged Einstein radius an a logarithmic 3D + mass profile slope. The SPEMD mass profile is defined as follow: @@ -48,31 +48,52 @@ class for smooth power law ellipse mass density profile (SPEMD). This class effe .. math:: s2_{fastell} = s_{lenstronomy}^2 * q - """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 's_scale', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 0, 'e1': -0.5, 'e2': -0.5, 's_scale': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 100, 'e1': 0.5, 'e2': 0.5, 's_scale': 100, 'center_x': 100, 'center_y': 100} - def __init__(self, suppress_fastell=False): - """ + param_names = ["theta_E", "gamma", "e1", "e2", "s_scale", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 0, + "e1": -0.5, + "e2": -0.5, + "s_scale": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 100, + "e1": 0.5, + "e2": 0.5, + "s_scale": 100, + "center_x": 100, + "center_y": 100, + } - """ + def __init__(self, suppress_fastell=False): + """""" try: from fastell4py import fastell4py + self._fastell4py_bool = True self.fastell4py = fastell4py except: self._fastell4py_bool = False if suppress_fastell: - ImportWarning("module fastell4py not installed. You can get it from here: " - "https://github.com/sibirrer/fastell4py " - "Make sure you have a fortran compiler such that the installation works properly.") - Warning("SPEMD model outputs are replaced by zeros as fastell4py package is not installed!") + ImportWarning( + "module fastell4py not installed. You can get it from here: " + "https://github.com/sibirrer/fastell4py " + "Make sure you have a fortran compiler such that the installation works properly." + ) + Warning( + "SPEMD model outputs are replaced by zeros as fastell4py package is not installed!" + ) else: - raise ImportError("module fastell4py not installed. You can get it from here: " - "https://github.com/sibirrer/fastell4py " - "Make sure you have a fortran compiler such that the installation works properly.") + raise ImportError( + "module fastell4py not installed. You can get it from here: " + "https://github.com/sibirrer/fastell4py " + "Make sure you have a fortran compiler such that the installation works properly." + ) super(SPEMD, self).__init__() def function(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0): @@ -89,8 +110,9 @@ def function(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0 :param center_y: y-position of lens center :return: lensing potential """ - x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x, - center_y) + x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform( + x, y, theta_E, gamma, e1, e2, s_scale, center_x, center_y + ) compute_bool = self._parameter_constraints(q_fastell, gam, s2, q) if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool: potential = self.fastell4py.ellipphi(x1, x2, q_fastell, gam, arat=q, s2=s2) @@ -102,7 +124,9 @@ def function(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0 potential = np.zeros_like(x1) return potential - def derivatives(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0): + def derivatives( + self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0 + ): """ :param x: x-coordinate (angle) @@ -116,18 +140,21 @@ def derivatives(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_ :param center_y: y-position of lens center :return: deflection angles alpha_x, alpha_y """ - x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x, - center_y) + x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform( + x, y, theta_E, gamma, e1, e2, s_scale, center_x, center_y + ) compute_bool = self._parameter_constraints(q_fastell, gam, s2, q) if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool: - f_x_prim, f_y_prim = self.fastell4py.fastelldefl(x1, x2, q_fastell, gam, arat=q, s2=s2) + f_x_prim, f_y_prim = self.fastell4py.fastelldefl( + x1, x2, q_fastell, gam, arat=q, s2=s2 + ) else: f_x_prim, f_y_prim = np.zeros_like(x1), np.zeros_like(x1) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - f_x = cos_phi*f_x_prim - sin_phi*f_y_prim - f_y = sin_phi*f_x_prim + cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0): @@ -144,38 +171,54 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0) :param center_y: y-position of lens center :return: Hessian components f_xx, f_xy, f_yx, f_yy """ - x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform(x, y, theta_E, gamma, e1, e2, s_scale, center_x, center_y) + x1, x2, q_fastell, gam, s2, q, phi_G = self.param_transform( + x, y, theta_E, gamma, e1, e2, s_scale, center_x, center_y + ) compute_bool = self._parameter_constraints(q_fastell, gam, s2, q) if self._fastell4py_bool and self.is_not_empty(x1, x2) and compute_bool: - f_x_prim, f_y_prim, f_xx_prim, f_yy_prim, f_xy_prim = self.fastell4py.fastellmag(x1, x2, q_fastell, gam, - arat=q, s2=s2) + ( + f_x_prim, + f_y_prim, + f_xx_prim, + f_yy_prim, + f_xy_prim, + ) = self.fastell4py.fastellmag(x1, x2, q_fastell, gam, arat=q, s2=s2) n = len(np.atleast_1d(x)) if n <= 1: if np.shape(x) == (): - f_xx_prim, f_yy_prim, f_xy_prim = np.array(f_xx_prim[0]), np.array(f_yy_prim[0]), np.array( - f_xy_prim[0]) + f_xx_prim, f_yy_prim, f_xy_prim = ( + np.array(f_xx_prim[0]), + np.array(f_yy_prim[0]), + np.array(f_xy_prim[0]), + ) else: - f_xx_prim, f_yy_prim, f_xy_prim = np.zeros_like(x1), np.zeros_like(x1), np.zeros_like(x1) - kappa = (f_xx_prim + f_yy_prim)/2 - gamma1_value = (f_xx_prim - f_yy_prim)/2 + f_xx_prim, f_yy_prim, f_xy_prim = ( + np.zeros_like(x1), + np.zeros_like(x1), + np.zeros_like(x1), + ) + kappa = (f_xx_prim + f_yy_prim) / 2 + gamma1_value = (f_xx_prim - f_yy_prim) / 2 gamma2_value = f_xy_prim - gamma1 = np.cos(2*phi_G)*gamma1_value-np.sin(2*phi_G)*gamma2_value - gamma2 = +np.sin(2*phi_G)*gamma1_value+np.cos(2*phi_G)*gamma2_value + gamma1 = np.cos(2 * phi_G) * gamma1_value - np.sin(2 * phi_G) * gamma2_value + gamma2 = +np.sin(2 * phi_G) * gamma1_value + np.cos(2 * phi_G) * gamma2_value f_xx = kappa + gamma1 f_yy = kappa - gamma1 f_xy = gamma2 return f_xx, f_xy, f_xy, f_yy - def param_transform(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0): - """ - transforms parameters in the format of fastell4py + def param_transform( + self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, center_y=0 + ): + """Transforms parameters in the format of fastell4py. :param x: x-coordinate (angle) :param y: y-coordinate (angle) :param theta_E: Einstein radius (angle), pay attention to specific definition! - :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to isothermal + :param gamma: logarithmic slope of the power-law profile. gamma=2 corresponds to + isothermal :param e1: eccentricity component :param e2: eccentricity component :param s_scale: smoothing scale in the center of the profile @@ -198,8 +241,8 @@ def param_transform(self, x, y, theta_E, gamma, e1, e2, s_scale, center_x=0, cen @staticmethod def convert_params(theta_E, gamma, q, s_scale): - """ - converts parameter definitions into quantities used by the FASTELL fortran library + """Converts parameter definitions into quantities used by the FASTELL fortran + library. :param theta_E: Einstein radius :param gamma: 3D power-law slope of mass profile @@ -207,17 +250,17 @@ def convert_params(theta_E, gamma, q, s_scale): :param s_scale: float, smoothing scale in the core :return: pre-factors to SPEMP profile for FASTELL """ - gam = (gamma-1)/2. - q_fastell = (3-gamma)/2. * (theta_E ** 2 / q) ** gam - s2 = s_scale ** 2 * q + gam = (gamma - 1) / 2.0 + q_fastell = (3 - gamma) / 2.0 * (theta_E**2 / q) ** gam + s2 = s_scale**2 * q return q_fastell, gam, s2 @staticmethod def is_not_empty(x1, x2): - """ - Check if float or not an empty array + """Check if float or not an empty array. - :return: True if x1 and x2 are either floats/ints or an non-empty array, False if e.g. objects are [] + :return: True if x1 and x2 are either floats/ints or an non-empty array, False + if e.g. objects are [] :rtype: bool """ assert type(x1) == type(x2) @@ -232,8 +275,7 @@ def is_not_empty(x1, x2): @staticmethod def _parameter_constraints(q_fastell, gam, s2, q): - """ - sets bounds to parameters due to numerical stability + """Sets bounds to parameters due to numerical stability. FASTELL has the following definitons: The parameters are position (x1,x2), overall factor @@ -249,7 +291,13 @@ def _parameter_constraints(q_fastell, gam, s2, q): :param s2: square of smoothing scale of the core :return: bool of whether or not to let the fastell provide to be evaluated or instead return zero(s) """ - if q_fastell <= 0 or q > 1 or q < 0.01 or gam > 0.999 or gam < 0.001 or \ - not np.isfinite(q_fastell): + if ( + q_fastell <= 0 + or q > 1 + or q < 0.01 + or gam > 0.999 + or gam < 0.001 + or not np.isfinite(q_fastell) + ): return False return True diff --git a/lenstronomy/LensModel/Profiles/spep.py b/lenstronomy/LensModel/Profiles/spep.py index 4d579cd7a..765c00f6d 100644 --- a/lenstronomy/LensModel/Profiles/spep.py +++ b/lenstronomy/LensModel/Profiles/spep.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -7,16 +7,29 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.LensModel.Profiles.spp import SPP -__all__ = ['SPEP'] +__all__ = ["SPEP"] class SPEP(LensProfileBase): - """ - class for Softened power-law elliptical potential (SPEP) - """ - param_names = ['theta_E', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + """Class for Softened power-law elliptical potential (SPEP)""" + + param_names = ["theta_E", "gamma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.spp = SPP() @@ -42,53 +55,52 @@ def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): theta_E *= q x_shift = x - center_x y_shift = y - center_y - E = theta_E / (((3 - gamma) / 2.) ** (1. / (1 - gamma)) * np.sqrt(q)) - #E = phi_E - eta = -gamma+3 - xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift - xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift - p2 = xt1**2+xt2**2/q**2 - s2 = 0. # softening - return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2) + E = theta_E / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) + # E = phi_E + eta = -gamma + 3 + xt1 = np.cos(phi_G) * x_shift + np.sin(phi_G) * y_shift + xt2 = -np.sin(phi_G) * x_shift + np.cos(phi_G) * y_shift + p2 = xt1**2 + xt2**2 / q**2 + s2 = 0.0 # softening + return 2 * E**2 / eta**2 * ((p2 + s2) / E**2) ** (eta / 2) def derivatives(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): - phi_G, q = param_util.ellipticity2phi_q(e1, e2) gamma, q = self._param_bounds(gamma, q) phi_E_new = theta_E * q x_shift = x - center_x y_shift = y - center_y - E = phi_E_new / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q)) + E = phi_E_new / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) # E = phi_E - eta = float(-gamma+3) + eta = float(-gamma + 3) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) - xt1=cos_phi*x_shift+sin_phi*y_shift - xt2=-sin_phi*x_shift+cos_phi*y_shift - xt2difq2 = xt2/(q*q) - P2=xt1*xt1+xt2*xt2difq2 + xt1 = cos_phi * x_shift + sin_phi * y_shift + xt2 = -sin_phi * x_shift + cos_phi * y_shift + xt2difq2 = xt2 / (q * q) + P2 = xt1 * xt1 + xt2 * xt2difq2 if isinstance(P2, int) or isinstance(P2, float): - a = max(0.000001,P2) + a = max(0.000001, P2) else: - a=np.empty_like(P2) - p2 = P2[P2 > 0] #in the SIS regime + a = np.empty_like(P2) + p2 = P2[P2 > 0] # in the SIS regime a[P2 == 0] = 0.000001 a[P2 > 0] = p2 - fac = 1./eta*(a/(E*E))**(eta/2-1)*2 - f_x_prim = fac*xt1 - f_y_prim = fac*xt2difq2 + fac = 1.0 / eta * (a / (E * E)) ** (eta / 2 - 1) * 2 + f_x_prim = fac * xt1 + f_y_prim = fac * xt2difq2 - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): phi_G, q = param_util.ellipticity2phi_q(e1, e2) gamma, q = self._param_bounds(gamma, q) phi_E_new = theta_E * q - #x_shift = x - center_x - #y_shift = y - center_y + # x_shift = x - center_x + # y_shift = y - center_y # shift x_ = x - center_x @@ -96,31 +108,58 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): # rotate x__, y__ = util.rotate(x_, y_, phi_G) - E = phi_E_new / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q)) + E = phi_E_new / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) if E <= 0: - return np.zeros_like(x), np.zeros_like(x), np.zeros_like(x), np.zeros_like(x) + return ( + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + ) # E = phi_E - eta = float(-gamma+3) - #xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift - #xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift + eta = float(-gamma + 3) + # xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift + # xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift xt1, xt2 = x__, y__ - P2 = xt1**2+xt2**2/q**2 + P2 = xt1**2 + xt2**2 / q**2 if isinstance(P2, int) or isinstance(P2, float): a = max(0.000001, P2) else: - a=np.empty_like(P2) - p2 = P2[P2>0] #in the SIS regime - a[P2==0] = 0.000001 - a[P2>0] = p2 - s2 = 0. # softening - - kappa=1./eta*(a/E**2)**(eta/2-1)*((eta-2)*(xt1**2+xt2**2/q**4)/a+(1+1/q**2)) - gamma1_value=1./eta*(a/E**2)**(eta/2-1)*(1-1/q**2+(eta/2-1)*(2*xt1**2-2*xt2**2/q**4)/a) - gamma2_value=4*xt1*xt2/q**2*(1./2-1/eta)*(a/E**2)**(eta/2-2)/E**2 - - gamma1 = np.cos(2*phi_G)*gamma1_value-np.sin(2*phi_G)*gamma2_value - gamma2 = +np.sin(2*phi_G)*gamma1_value+np.cos(2*phi_G)*gamma2_value + a = np.empty_like(P2) + p2 = P2[P2 > 0] # in the SIS regime + a[P2 == 0] = 0.000001 + a[P2 > 0] = p2 + s2 = 0.0 # softening + + kappa = ( + 1.0 + / eta + * (a / E**2) ** (eta / 2 - 1) + * ((eta - 2) * (xt1**2 + xt2**2 / q**4) / a + (1 + 1 / q**2)) + ) + gamma1_value = ( + 1.0 + / eta + * (a / E**2) ** (eta / 2 - 1) + * ( + 1 + - 1 / q**2 + + (eta / 2 - 1) * (2 * xt1**2 - 2 * xt2**2 / q**4) / a + ) + ) + gamma2_value = ( + 4 + * xt1 + * xt2 + / q**2 + * (1.0 / 2 - 1 / eta) + * (a / E**2) ** (eta / 2 - 2) + / E**2 + ) + + gamma1 = np.cos(2 * phi_G) * gamma1_value - np.sin(2 * phi_G) * gamma2_value + gamma2 = +np.sin(2 * phi_G) * gamma1_value + np.cos(2 * phi_G) * gamma2_value f_xx = kappa + gamma1 f_yy = kappa - gamma1 @@ -128,8 +167,7 @@ def hessian(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def mass_3d_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the spherical power-law mass enclosed (with SPP routine) + """Computes the spherical power-law mass enclosed (with SPP routine) :param r: radius within the mass is computed :param theta_E: Einstein radius @@ -141,9 +179,9 @@ def mass_3d_lens(self, r, theta_E, gamma, e1=None, e2=None): return self.spp.mass_3d_lens(r, theta_E, gamma) def density_lens(self, r, theta_E, gamma, e1=None, e2=None): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: radius within the mass is computed :param theta_E: Einstein radius @@ -156,8 +194,7 @@ def density_lens(self, r, theta_E, gamma, e1=None, e2=None): @staticmethod def _param_bounds(gamma, q): - """ - bounds parameters + """Bounds parameters. :param gamma: :param q: diff --git a/lenstronomy/LensModel/Profiles/splcore.py b/lenstronomy/LensModel/Profiles/splcore.py index 593eecba6..7e03eac4e 100644 --- a/lenstronomy/LensModel/Profiles/splcore.py +++ b/lenstronomy/LensModel/Profiles/splcore.py @@ -1,17 +1,16 @@ -__author__ = 'dangilman' +__author__ = "dangilman" import numpy as np from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from scipy.special import hyp2f1 from scipy.special import gamma as gamma_func -__all__ = ['SPLCORE'] +__all__ = ["SPLCORE"] class SPLCORE(LensProfileBase): - """ - This lens profile corresponds to a spherical power law (SPL) mass distribution with logarithmic slope gamma and - a 3D core radius r_core + """This lens profile corresponds to a spherical power law (SPL) mass distribution + with logarithmic slope gamma and a 3D core radius r_core. .. math:: @@ -28,13 +27,24 @@ class SPLCORE(LensProfileBase): This class is defined for all gamma > 1 """ - param_names = ['sigma0', 'center_x', 'center_y', 'r_core', 'gamma'] - lower_limit_default = {'sigma0': 0, 'center_x': -100, 'center_y': -100, 'r_core': 1e-6, 'gamma': 1.+1e-6} - upper_limit_default = {'sigma0': 1e+12, 'center_x': 100, 'center_y': 100, 'r_core': 100, 'gamma': 5.} + param_names = ["sigma0", "center_x", "center_y", "r_core", "gamma"] + lower_limit_default = { + "sigma0": 0, + "center_x": -100, + "center_y": -100, + "r_core": 1e-6, + "gamma": 1.0 + 1e-6, + } + upper_limit_default = { + "sigma0": 1e12, + "center_x": 100, + "center_y": 100, + "r_core": 100, + "gamma": 5.0, + } def function(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): - - raise Exception('potential not implemented for this class') + raise Exception("potential not implemented for this class") def derivatives(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): """ @@ -54,8 +64,8 @@ def derivatives(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): r = self._safe_r_division(r, r_core) alpha_r = self.alpha(r, sigma0, r_core, gamma) - cos = x_/r - sin = y_/r + cos = x_ / r + sin = y_ / r return alpha_r * cos, alpha_r * sin def hessian(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): @@ -79,9 +89,9 @@ def hessian(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): rho0 = self._sigma2rho0(sigma0, r_core) kappa = self.density_2d(x_, y_, rho0, r_core, gamma) - gamma_tot = m2d / r ** 2 - kappa - sin_2phi = -2*x_*y_/r**2 - cos_2phi = (y_**2 - x_**2)/r**2 + gamma_tot = m2d / r**2 - kappa + sin_2phi = -2 * x_ * y_ / r**2 + cos_2phi = (y_**2 - x_**2) / r**2 gamma1 = cos_2phi * gamma_tot gamma2 = sin_2phi * gamma_tot @@ -92,9 +102,7 @@ def hessian(self, x, y, sigma0, r_core, gamma, center_x=0, center_y=0): return f_xx, f_xy, f_xy, f_yy def alpha(self, r, sigma0, r_core, gamma): - - """ - Returns the deflection angle at r + """Returns the deflection angle at r. :param r: radius [arcsec] :param sigma0: convergence at r=0 @@ -107,8 +115,7 @@ def alpha(self, r, sigma0, r_core, gamma): @staticmethod def density(r, rho0, r_core, gamma): - """ - Returns the 3D density at r + """Returns the 3D density at r. :param r: radius [arcsec] :param rho0: convergence at r=0 @@ -116,11 +123,10 @@ def density(r, rho0, r_core, gamma): :param gamma: logarithmic slope at r -> infinity :return: density at r """ - return rho0 * r_core ** gamma / (r_core**2 + r**2) ** (gamma/2) + return rho0 * r_core**gamma / (r_core**2 + r**2) ** (gamma / 2) def density_lens(self, r, sigma0, r_core, gamma): - """ - Returns the 3D density at r + """Returns the 3D density at r. :param r: radius [arcsec] :param sigma0: convergence at r=0 @@ -129,11 +135,10 @@ def density_lens(self, r, sigma0, r_core, gamma): :return: density at r """ rho0 = self._sigma2rho0(sigma0, r_core) - return rho0 * r_core ** gamma / (r_core**2 + r**2) ** (gamma/2) + return rho0 * r_core**gamma / (r_core**2 + r**2) ** (gamma / 2) def _density_2d_r(self, r, rho0, r_core, gamma): - """ - Returns the convergence at radius r after applying _safe_r_division + """Returns the convergence at radius r after applying _safe_r_division. :param r: position [arcsec] :param rho0: convergence at r=0 @@ -143,18 +148,22 @@ def _density_2d_r(self, r, rho0, r_core, gamma): """ sigma0 = self._rho02sigma(rho0, r_core) if gamma == 3: - return 2 * r_core ** 2 * sigma0 / (r ** 2 + r_core ** 2) + return 2 * r_core**2 * sigma0 / (r**2 + r_core**2) elif gamma == 2: - return np.pi * r_core * sigma0 / (r_core ** 2 + r ** 2) ** 0.5 + return np.pi * r_core * sigma0 / (r_core**2 + r**2) ** 0.5 else: x = r / r_core exponent = (1 - gamma) / 2 - return sigma0 * np.sqrt(np.pi) * gamma_func(0.5 * (gamma - 1)) / gamma_func(0.5 * gamma) * ( - 1 + x ** 2) ** exponent + return ( + sigma0 + * np.sqrt(np.pi) + * gamma_func(0.5 * (gamma - 1)) + / gamma_func(0.5 * gamma) + * (1 + x**2) ** exponent + ) def density_2d(self, x, y, rho0, r_core, gamma): - """ - Returns the convergence at radius r + """Returns the convergence at radius r. :param x: x position [arcsec] :param y: y position [arcsec] @@ -169,21 +178,20 @@ def density_2d(self, x, y, rho0, r_core, gamma): return self._density_2d_r(r, rho0, r_core, gamma) def mass_3d(self, r, rho0, r_core, gamma): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: radius [arcsec] - :param rho0: density at r = 0 in units [rho_0_physical / sigma_crit] (which should be equal to [arcsec]) - where rho_0_physical is a physical density normalization and sigma_crit is the critical density for lensing + :param rho0: density at r = 0 in units [rho_0_physical / sigma_crit] (which + should be equal to [arcsec]) where rho_0_physical is a physical density + normalization and sigma_crit is the critical density for lensing :param r_core: core radius [arcsec] :param gamma: logarithmic slope at r -> infinity :return: mass inside radius r """ - return 4 * np.pi * r_core ** 3 * rho0 * self._g(r/r_core, gamma) + return 4 * np.pi * r_core**3 * rho0 * self._g(r / r_core, gamma) def mass_3d_lens(self, r, sigma0, r_core, gamma): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: radius [arcsec] :param sigma0: convergence at r = 0 @@ -195,25 +203,24 @@ def mass_3d_lens(self, r, sigma0, r_core, gamma): return self.mass_3d(r, rho0, r_core, gamma) def mass_2d(self, r, rho0, r_core, gamma): - """ - mass enclosed projected 2d disk of radius r + """Mass enclosed projected 2d disk of radius r. :param r: radius [arcsec] - :param rho0: density at r = 0 in units [rho_0_physical / sigma_crit] (which should be equal to [1/arcsec]) - where rho_0_physical is a physical density normalization and sigma_crit is the critical density for lensing + :param rho0: density at r = 0 in units [rho_0_physical / sigma_crit] (which + should be equal to [1/arcsec]) where rho_0_physical is a physical density + normalization and sigma_crit is the critical density for lensing :param r_core: core radius [arcsec] :param gamma: logarithmic slope at r -> infinity :return: projected mass inside disk of radius r """ - return 4 * np.pi * r_core ** 3 * rho0 * self._f(r/r_core, gamma) + return 4 * np.pi * r_core**3 * rho0 * self._f(r / r_core, gamma) def mass_2d_lens(self, r, sigma0, r_core, gamma): - """ - mass enclosed projected 2d disk of radius r + """Mass enclosed projected 2d disk of radius r. :param r: radius [arcsec] - :param sigma0: convergence at r = 0 - where rho_0_physical is a physical density normalization and sigma_crit is the critical density for lensing + :param sigma0: convergence at r = 0 where rho_0_physical is a physical density + normalization and sigma_crit is the critical density for lensing :param r_core: core radius [arcsec] :param gamma: logarithmic slope at r -> infinity :return: projected mass inside disk of radius r @@ -223,8 +230,7 @@ def mass_2d_lens(self, r, sigma0, r_core, gamma): @staticmethod def _safe_r_division(r, r_core, x_min=1e-6): - """ - Avoids accidental division by 0 + """Avoids accidental division by 0. :param r: radius in arcsec :param r_core: core radius in arcsec @@ -239,8 +245,7 @@ def _safe_r_division(r, r_core, x_min=1e-6): @staticmethod def _sigma2rho0(sigma0, r_core): - """ - Converts the convergence normalization to the 3d normalization + """Converts the convergence normalization to the 3d normalization. :param sigma0: convergence at r=0 :param r_core: core radius [arcsec] @@ -250,8 +255,7 @@ def _sigma2rho0(sigma0, r_core): @staticmethod def _rho02sigma(rho0, r_core): - """ - Converts the convergence normalization to the 3d normalization + """Converts the convergence normalization to the 3d normalization. :param rho0: convergence at r=0 :param r_core: core radius [arcsec] @@ -261,8 +265,7 @@ def _rho02sigma(rho0, r_core): @staticmethod def _f(x, gamma): - """ - Returns the solution of the 2D mass integral defined such that + """Returns the solution of the 2D mass integral defined such that. .. math:: @@ -274,20 +277,19 @@ def _f(x, gamma): :return: a number """ if gamma == 3: - return 0.5 * np.log(x ** 2 + 1) + return 0.5 * np.log(x**2 + 1) elif gamma == 2: - return np.pi/2 * ((x**2 + 1)**0.5 - 1) + return np.pi / 2 * ((x**2 + 1) ** 0.5 - 1) else: gamma_func_term = gamma_func(0.5 * (gamma - 1)) / gamma_func(0.5 * gamma) prefactor = np.sqrt(np.pi) * gamma_func_term / (2 * (gamma - 3)) - term = (1 - (1 + x ** 2) ** ((3 - gamma) / 2)) + term = 1 - (1 + x**2) ** ((3 - gamma) / 2) return prefactor * term @staticmethod def _g(x, gamma): - """ - Returns the solution of the 3D mass integral defined such that - Returns the solution of the 2D mass integral defined such that + """Returns the solution of the 3D mass integral defined such that Returns the + solution of the 2D mass integral defined such that. .. math:: m_{\\rm{3D}}\\left(R\\right) = 4 \\pi r_{\\rm{core}}^3 @@ -299,11 +301,12 @@ def _g(x, gamma): """ if gamma == 3: - return np.arcsinh(x) - x / (1 + x ** 2) ** 0.5 + return np.arcsinh(x) - x / (1 + x**2) ** 0.5 elif gamma == 2: return x - np.arctan(x) else: prefactor = 1 / ((gamma - 3) * (gamma - 1)) / x - term = hyp2f1(-0.5, gamma / 2, 0.5, -x ** 2) - (1 + x ** 2) ** ((2 - gamma) / 2) * ( - 1 + x ** 2 * (gamma - 1)) + term = hyp2f1(-0.5, gamma / 2, 0.5, -(x**2)) - (1 + x**2) ** ( + (2 - gamma) / 2 + ) * (1 + x**2 * (gamma - 1)) return prefactor * term diff --git a/lenstronomy/LensModel/Profiles/spp.py b/lenstronomy/LensModel/Profiles/spp.py index 9927ea929..113ca724e 100644 --- a/lenstronomy/LensModel/Profiles/spp.py +++ b/lenstronomy/LensModel/Profiles/spp.py @@ -1,20 +1,29 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import scipy.special as special from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['SPP'] +__all__ = ["SPP"] class SPP(LensProfileBase): - """ - class for circular power-law mass distribution - """ - param_names = ['theta_E', 'gamma', 'center_x', 'center_y'] - lower_limit_default = {'theta_E': 0, 'gamma': 1.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'theta_E': 100, 'gamma': 2.5, 'center_x': 100, 'center_y': 100} + """Class for circular power-law mass distribution.""" + + param_names = ["theta_E", "gamma", "center_x", "center_y"] + lower_limit_default = { + "theta_E": 0, + "gamma": 1.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "theta_E": 100, + "gamma": 2.5, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, theta_E, gamma, center_x=0, center_y=0): """ @@ -33,50 +42,61 @@ def function(self, x, y, theta_E, gamma, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y - E = theta_E / ((3. - gamma) / 2.) ** (1. / (1. - gamma)) + E = theta_E / ((3.0 - gamma) / 2.0) ** (1.0 / (1.0 - gamma)) # E = phi_E_spp - eta= -gamma + 3 + eta = -gamma + 3 - p2 = x_**2+y_**2 - s2 = 0. # softening - return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2) - - def derivatives(self, x, y, theta_E, gamma, center_x=0., center_y=0.): + p2 = x_**2 + y_**2 + s2 = 0.0 # softening + return 2 * E**2 / eta**2 * ((p2 + s2) / E**2) ** (eta / 2) + def derivatives(self, x, y, theta_E, gamma, center_x=0.0, center_y=0.0): gamma = self._gamma_limit(gamma) xt1 = x - center_x xt2 = y - center_y - r2 = xt1*xt1+xt2*xt2 + r2 = xt1 * xt1 + xt2 * xt2 a = np.maximum(r2, 0.000001) r = np.sqrt(a) - alpha = theta_E * (r2/theta_E**2) ** (1 - gamma/2.) + alpha = theta_E * (r2 / theta_E**2) ** (1 - gamma / 2.0) fac = alpha / r - f_x = fac*xt1 - f_y = fac*xt2 + f_x = fac * xt1 + f_y = fac * xt2 return f_x, f_y - def hessian(self, x, y, theta_E, gamma, center_x=0., center_y=0.): + def hessian(self, x, y, theta_E, gamma, center_x=0.0, center_y=0.0): gamma = self._gamma_limit(gamma) xt1 = x - center_x xt2 = y - center_y - E = theta_E / ((3. - gamma) / 2.) ** (1. / (1. - gamma)) + E = theta_E / ((3.0 - gamma) / 2.0) ** (1.0 / (1.0 - gamma)) # E = phi_E_spp - eta = -gamma + 3. + eta = -gamma + 3.0 - P2 = xt1**2+xt2**2 + P2 = xt1**2 + xt2**2 if isinstance(P2, int) or isinstance(P2, float): a = max(0.000001, P2) else: a = np.empty_like(P2) - p2 = P2[P2 > 0] #in the SIS regime + p2 = P2[P2 > 0] # in the SIS regime a[P2 == 0] = 0.000001 a[P2 > 0] = p2 - kappa = 1./eta*(a/E**2)**(eta/2-1)*((eta-2)*(xt1**2+xt2**2)/a+(1+1)) - gamma1 = 1./eta*(a/E**2)**(eta/2-1)*((eta/2-1)*(2*xt1**2-2*xt2**2)/a) - gamma2 = 4*xt1*xt2*(1./2-1/eta)*(a/E**2)**(eta/2-2)/E**2 + kappa = ( + 1.0 + / eta + * (a / E**2) ** (eta / 2 - 1) + * ((eta - 2) * (xt1**2 + xt2**2) / a + (1 + 1)) + ) + gamma1 = ( + 1.0 + / eta + * (a / E**2) ** (eta / 2 - 1) + * ((eta / 2 - 1) * (2 * xt1**2 - 2 * xt2**2) / a) + ) + gamma2 = ( + 4 * xt1 * xt2 * (1.0 / 2 - 1 / eta) * (a / E**2) ** (eta / 2 - 2) / E**2 + ) f_xx = kappa + gamma1 f_yy = kappa - gamma1 @@ -85,44 +105,55 @@ def hessian(self, x, y, theta_E, gamma, center_x=0., center_y=0.): @staticmethod def rho2theta(rho0, gamma): - """ - converts 3d density into 2d projected density parameter + """Converts 3d density into 2d projected density parameter. :param rho0: :param gamma: :return: """ - fac = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma) * rho0 - - #fac = theta_E**(gamma - 1) - theta_E = fac**(1. / (gamma - 1)) + fac = ( + np.sqrt(np.pi) + * special.gamma(1.0 / 2 * (-1 + gamma)) + / special.gamma(gamma / 2.0) + * 2 + / (3 - gamma) + * rho0 + ) + + # fac = theta_E**(gamma - 1) + theta_E = fac ** (1.0 / (gamma - 1)) return theta_E @staticmethod def theta2rho(theta_E, gamma): - """ - converts projected density parameter (in units of deflection) into 3d density parameter + """Converts projected density parameter (in units of deflection) into 3d density + parameter. :param theta_E: :param gamma: :return: """ - fac1 = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * 2 / (3 - gamma) - fac2 = theta_E**(gamma - 1) + fac1 = ( + np.sqrt(np.pi) + * special.gamma(1.0 / 2 * (-1 + gamma)) + / special.gamma(gamma / 2.0) + * 2 + / (3 - gamma) + ) + fac2 = theta_E ** (gamma - 1) rho0 = fac2 / fac1 return rho0 @staticmethod def mass_3d(r, rho0, gamma): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: :param rho0: :param gamma: :return: """ - mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3) + mass_3d = 4 * np.pi * rho0 / (-gamma + 3) * r ** (-gamma + 3) return mass_3d def mass_3d_lens(self, r, theta_E, gamma): @@ -137,16 +168,23 @@ def mass_3d_lens(self, r, theta_E, gamma): return self.mass_3d(r, rho0, gamma) def mass_2d(self, r, rho0, gamma): - """ - mass enclosed projected 2d sphere of radius r + """Mass enclosed projected 2d sphere of radius r. :param r: :param rho0: :param gamma: :return: """ - alpha = np.sqrt(np.pi) * special.gamma(1. / 2 * (-1 + gamma)) / special.gamma(gamma / 2.) * r ** (2 - gamma)/(3 - gamma) * 2 * rho0 - mass_2d = alpha*r * np.pi + alpha = ( + np.sqrt(np.pi) + * special.gamma(1.0 / 2 * (-1 + gamma)) + / special.gamma(gamma / 2.0) + * r ** (2 - gamma) + / (3 - gamma) + * 2 + * rho0 + ) + mass_2d = alpha * r * np.pi return mass_2d def mass_2d_lens(self, r, theta_E, gamma): @@ -161,8 +199,7 @@ def mass_2d_lens(self, r, theta_E, gamma): return self.mass_2d(r, rho0, gamma) def grav_pot(self, x, y, rho0, gamma, center_x=0, center_y=0): - """ - gravitational potential (modulo 4 pi G and rho0 in appropriate units) + """Gravitational potential (modulo 4 pi G and rho0 in appropriate units) :param x: :param y: @@ -176,13 +213,12 @@ def grav_pot(self, x, y, rho0, gamma, center_x=0, center_y=0): y_ = y - center_y r = np.sqrt(x_**2 + y_**2) mass_3d = self.mass_3d(r, rho0, gamma) - pot = mass_3d/r + pot = mass_3d / r return pot @staticmethod def density(r, rho0, gamma): - """ - computes the density + """Computes the density. :param r: :param rho0: @@ -193,18 +229,17 @@ def density(r, rho0, gamma): return rho def density_lens(self, r, theta_E, gamma): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in projected in units of angles (i.e. arc seconds) results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. + The integral in projected in units of angles (i.e. arc seconds) results in the + convergence quantity. """ rho0 = self.theta2rho(theta_E, gamma) return self.density(r, rho0, gamma) @staticmethod def density_2d(x, y, rho0, gamma, center_x=0, center_y=0): - """ - projected density + """Projected density. :param x: :param y: @@ -217,13 +252,18 @@ def density_2d(x, y, rho0, gamma, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) - sigma = np.sqrt(np.pi) * special.gamma(1./2*(-1+gamma))/special.gamma(gamma/2.) * r**(1-gamma) * rho0 + sigma = ( + np.sqrt(np.pi) + * special.gamma(1.0 / 2 * (-1 + gamma)) + / special.gamma(gamma / 2.0) + * r ** (1 - gamma) + * rho0 + ) return sigma @staticmethod def _gamma_limit(gamma): - """ - limits the power-law slope to certain bounds + """Limits the power-law slope to certain bounds. :param gamma: power-law slope :return: bounded power-law slopte diff --git a/lenstronomy/LensModel/Profiles/synthesis.py b/lenstronomy/LensModel/Profiles/synthesis.py index 6ef293a2f..38913b5c2 100644 --- a/lenstronomy/LensModel/Profiles/synthesis.py +++ b/lenstronomy/LensModel/Profiles/synthesis.py @@ -1,4 +1,4 @@ -__author__ = 'mgomer' +__author__ = "mgomer" from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase import numpy as np @@ -8,10 +8,9 @@ from lenstronomy.LensModel.lens_model import LensModel - class SynthesisProfile(LensProfileBase): - """ - A general class which describes a linear sum of many simple profiles to approximate a target profile + """A general class which describes a linear sum of many simple profiles to + approximate a target profile. Example: Mimic an NFW profile with many CSE profiles. In this case, you could use LensModel(['SYNTHESIS'],kwargs_synthesis=kwargs_synthesis) with kwargs_synthesis={'target_lens_model': 'NFW', @@ -20,10 +19,13 @@ class SynthesisProfile(LensProfileBase): 'lin_fit_hyperparams':{'lower_log_bound':-6, 'upper_log_bound':3, 'num_r_evals':100, 'sigma':0.01} (default values) } where kwargs_list would be a list of input CSE kwargs (where the amplitude will be re-adjusted). - """ - profile_name = 'SYNTHESIS' - def __init__(self, target_lens_model, component_lens_model, kwargs_list, lin_fit_hyperparams): + + profile_name = "SYNTHESIS" + + def __init__( + self, target_lens_model, component_lens_model, kwargs_list, lin_fit_hyperparams + ): """ :param target_lens_model: name of target profile :param component_lens_model: name of component profile @@ -34,11 +36,10 @@ def __init__(self, target_lens_model, component_lens_model, kwargs_list, lin_fit super(SynthesisProfile, self).__init__() self.target_class = LensModel([target_lens_model]) self.component_class = LensModel([component_lens_model]) - self.kwargs_list=kwargs_list - self.lin_fit_hyperparams=lin_fit_hyperparams + self.kwargs_list = kwargs_list + self.lin_fit_hyperparams = lin_fit_hyperparams self.check_num_evals() - def linear_weight_mle_fit(self, kwargs_target, kwargs_list): if self._static is True: return self._linear_weights @@ -46,30 +47,38 @@ def linear_weight_mle_fit(self, kwargs_target, kwargs_list): return self._linear_weight_mle_fit(kwargs_target, kwargs_list) def _linear_weight_mle_fit(self, kwargs_target, kwargs_list): - """ - Fits a linear fit of the amplitudes for each component to minimize a chi2. + """Fits a linear fit of the amplitudes for each component to minimize a chi2. :param kwargs_target: kwargs of target profile to be approximated - :param kwargs_list: list of kwargs of component profile, length of list corresponds to number of components used to fit. - The normalization (must be nonzero) will be effectively overridden by the linear weights + :param kwargs_list: list of kwargs of component profile, length of list + corresponds to number of components used to fit. The normalization (must be + nonzero) will be effectively overridden by the linear weights """ self.set_limits(kwargs_list, self.lin_fit_hyperparams) - kwargs_target_centered=[self.circular_centered_kwargs(kwargs_target[0])] - Y = self.target_class.kappa(x=self.r_eval_list, y=np.zeros_like(self.r_eval_list), kwargs=kwargs_target_centered) - M = np.zeros((len(self.r_eval_list),self.num_components)) - C = np.diag(Y * self.sigma) #covariance matrix between components - for j in range(self.num_components): # M[i,j] is the jth component 1D kappa evaluated at r[i]. + kwargs_target_centered = [self.circular_centered_kwargs(kwargs_target[0])] + Y = self.target_class.kappa( + x=self.r_eval_list, + y=np.zeros_like(self.r_eval_list), + kwargs=kwargs_target_centered, + ) + M = np.zeros((len(self.r_eval_list), self.num_components)) + C = np.diag(Y * self.sigma) # covariance matrix between components + for j in range( + self.num_components + ): # M[i,j] is the jth component 1D kappa evaluated at r[i]. kwargs = self.circular_centered_kwargs(kwargs_list[j]) - M[:, j] = self.component_class.kappa(x=self.r_eval_list, y=0, kwargs=[kwargs]) - MTinvC=np.matmul(M.T,np.linalg.inv(C)) - first_term=np.linalg.inv(np.matmul(MTinvC,M)) - second_term=np.matmul(MTinvC,Y) - return np.matmul(first_term,second_term) + M[:, j] = self.component_class.kappa( + x=self.r_eval_list, y=0, kwargs=[kwargs] + ) + MTinvC = np.matmul(M.T, np.linalg.inv(C)) + first_term = np.linalg.inv(np.matmul(MTinvC, M)) + second_term = np.matmul(MTinvC, Y) + return np.matmul(first_term, second_term) def set_static(self, linear_weights): - """ - Sets weights to be static self values. Useful to call e.g. function many times with the same kwargs. - If kwargs_target or kwargs_list change, need to rerun linear fit by using set_dynamic. + """Sets weights to be static self values. Useful to call e.g. function many + times with the same kwargs. If kwargs_target or kwargs_list change, need to + rerun linear fit by using set_dynamic. :param linear_weights: output of LinearWeightMLEFit :return: self weights set @@ -79,25 +88,24 @@ def set_static(self, linear_weights): def set_dynamic(self): self._static = False - if hasattr(self, '_linear_weights'): + if hasattr(self, "_linear_weights"): del self._linear_weights - def circular_centered_kwargs(self,kwargs): + def circular_centered_kwargs(self, kwargs): """ :param kwargs: kwargs to remove center and ellipticity for linear fit. These are re-added when functions are called """ - kwargs_new=copy.deepcopy(kwargs) - if 'e1' in kwargs_new: - kwargs_new['e1']=0 - if 'e2' in kwargs_new: - kwargs_new['e2']=0 - if 'center_x' in kwargs_new: - kwargs_new['center_x']=0 - if 'center_y' in kwargs_new: - kwargs_new['center_y']=0 + kwargs_new = copy.deepcopy(kwargs) + if "e1" in kwargs_new: + kwargs_new["e1"] = 0 + if "e2" in kwargs_new: + kwargs_new["e2"] = 0 + if "center_x" in kwargs_new: + kwargs_new["center_x"] = 0 + if "center_y" in kwargs_new: + kwargs_new["center_y"] = 0 return kwargs_new - def set_limits(self, kwargs_list, lin_fit_hyperparams): """ :param kwargs_list: list of kwargs of component profile @@ -107,50 +115,54 @@ def set_limits(self, kwargs_list, lin_fit_hyperparams): 'num_r_evals': number of locations to evaluate fit to minimize chi2, must be larger than the number of components 'sigma': used to evaluate chi2. default is 1% """ - self.num_components=len(kwargs_list) - if 'lower_log_bound' not in lin_fit_hyperparams: + self.num_components = len(kwargs_list) + if "lower_log_bound" not in lin_fit_hyperparams: self.lower_log_bound = -6 else: - self.lower_log_bound = lin_fit_hyperparams['lower_log_bound'] - if 'upper_log_bound' not in lin_fit_hyperparams: + self.lower_log_bound = lin_fit_hyperparams["lower_log_bound"] + if "upper_log_bound" not in lin_fit_hyperparams: self.upper_log_bound = 3 else: - self.upper_log_bound = lin_fit_hyperparams['upper_log_bound'] - if 'num_r_evals' not in lin_fit_hyperparams: - self.num_r_evals = 100 + self.upper_log_bound = lin_fit_hyperparams["upper_log_bound"] + if "num_r_evals" not in lin_fit_hyperparams: + self.num_r_evals = 100 else: - self.num_r_evals = lin_fit_hyperparams['num_r_evals'] - if 'sigma' not in lin_fit_hyperparams: + self.num_r_evals = lin_fit_hyperparams["num_r_evals"] + if "sigma" not in lin_fit_hyperparams: self.sigma = 0.01 else: - self.sigma = lin_fit_hyperparams['sigma'] - self.r_eval_list=np.logspace(self.lower_log_bound,self.upper_log_bound,self.num_r_evals) + self.sigma = lin_fit_hyperparams["sigma"] + self.r_eval_list = np.logspace( + self.lower_log_bound, self.upper_log_bound, self.num_r_evals + ) def function(self, x, y, **kwargs_target): - """ - returns lensing potential + """Returns lensing potential. :param x: angular position (normally in units of arc seconds) - :param y: angular position (normally in units of arc seconds) - :kwargs_target: kwargs of target profile to be approximated + :param y: angular position (normally in units of arc seconds) :kwargs_target: + kwargs of target profile to be approximated """ weight_list = self.linear_weight_mle_fit([kwargs_target], self.kwargs_list) f_ = np.zeros_like(x) - f_innermost = 0 #for some profiles, minimum potential can go below zero. Add a constant here to make zero the minimum + f_innermost = 0 # for some profiles, minimum potential can go below zero. Add a constant here to make zero the minimum for kwargs, weight in zip(self.kwargs_list, weight_list): - f_ += weight * self.component_class.potential(x, y, [kwargs])#-potential_offset) - f_innermost+=weight * self.component_class.potential([10**self.lower_log_bound], [0], [kwargs]) + f_ += weight * self.component_class.potential( + x, y, [kwargs] + ) # -potential_offset) + f_innermost += weight * self.component_class.potential( + [10**self.lower_log_bound], [0], [kwargs] + ) return f_ - f_innermost def derivatives(self, x, y, **kwargs_target): - """ - returns df/dx and df/dy of the function which are the deflection angles + """Returns df/dx and df/dy of the function which are the deflection angles. :param x: angular position (normally in units of arc seconds) - :param y: angular position (normally in units of arc seconds) - :kwargs_target: kwargs of target profile to be approximated + :param y: angular position (normally in units of arc seconds) :kwargs_target: + kwargs of target profile to be approximated """ - weight_list = self.linear_weight_mle_fit([kwargs_target],self.kwargs_list) + weight_list = self.linear_weight_mle_fit([kwargs_target], self.kwargs_list) f_x, f_y = np.zeros_like(x), np.zeros_like(y) for kwargs, weight in zip(self.kwargs_list, weight_list): f_x_, f_y_ = weight * np.array(self.component_class.alpha(x, y, [kwargs])) @@ -159,17 +171,23 @@ def derivatives(self, x, y, **kwargs_target): return f_x, f_y def hessian(self, x, y, **kwargs_target): - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy. :param x: angular position (normally in units of arc seconds) - :param y: angular position (normally in units of arc seconds) - :kwargs_target: kwargs of target profile to be approximated + :param y: angular position (normally in units of arc seconds) :kwargs_target: + kwargs of target profile to be approximated """ weight_list = self.linear_weight_mle_fit([kwargs_target], self.kwargs_list) - f_xx, f_xy, f_yx, f_yy = np.zeros_like(x), np.zeros_like(x), np.zeros_like(x), np.zeros_like(x) + f_xx, f_xy, f_yx, f_yy = ( + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + ) for kwargs, weight in zip(self.kwargs_list, weight_list): - f_xx_i, f_xy_i, f_yx_i, f_yy_i = weight * np.array(self.component_class.hessian(x, y, [kwargs])) + f_xx_i, f_xy_i, f_yx_i, f_yy_i = weight * np.array( + self.component_class.hessian(x, y, [kwargs]) + ) f_xx += f_xx_i f_xy += f_xy_i f_yx += f_yx_i @@ -177,10 +195,12 @@ def hessian(self, x, y, **kwargs_target): return f_xx, f_xy, f_yx, f_yy def check_num_evals(self): - """ - Confirm that the number of evaluations is more than the number of components. Still not guaranteed to prevent overfitting - """ - num_comp=len(self.kwargs_list) - if num_comp >= self.lin_fit_hyperparams['num_r_evals']: - raise ValueError('There must be more num_r_evals than components or the profile will be overfit') + """Confirm that the number of evaluations is more than the number of components. + Still not guaranteed to prevent overfitting + """ + num_comp = len(self.kwargs_list) + if num_comp >= self.lin_fit_hyperparams["num_r_evals"]: + raise ValueError( + "There must be more num_r_evals than components or the profile will be overfit" + ) diff --git a/lenstronomy/LensModel/Profiles/tnfw.py b/lenstronomy/LensModel/Profiles/tnfw.py index 3aea301ca..cabd4fd71 100644 --- a/lenstronomy/LensModel/Profiles/tnfw.py +++ b/lenstronomy/LensModel/Profiles/tnfw.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to compute the truncated Navaro-Frank-White function (Baltz et al 2009)in mass/kappa space # the potential therefore is its integral @@ -7,12 +7,12 @@ from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase from lenstronomy.LensModel.Profiles.nfw import NFW -__all__ = ['TNFW'] +__all__ = ["TNFW"] class TNFW(LensProfileBase): - """ - this class contains functions concerning the truncated NFW profile with a truncation function (r_trunc^2)*(r^2+r_trunc^2) + """This class contains functions concerning the truncated NFW profile with a + truncation function (r_trunc^2)*(r^2+r_trunc^2) density equation is: @@ -20,17 +20,27 @@ class TNFW(LensProfileBase): \\rho(r) = \\frac{r_\\text{trunc}^2}{r^2+r_\\text{trunc}^2}\\frac{\\rho_0(\\alpha_{R_s})}{r/R_s(1+r/R_s)^2} relation are: R_200 = c * Rs - """ - profile_name = 'TNFW' - param_names = ['Rs', 'alpha_Rs', 'r_trunc', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'r_trunc': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'r_trunc': 100, 'center_x': 100, 'center_y': 100} - def __init__(self): - """ + profile_name = "TNFW" + param_names = ["Rs", "alpha_Rs", "r_trunc", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "r_trunc": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "r_trunc": 100, + "center_x": 100, + "center_y": 100, + } - """ + def __init__(self): + """""" self._s = 0.001 super(LensProfileBase, self).__init__() @@ -49,26 +59,24 @@ def function(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): rho0_input = self.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, self._s * Rs) f_ = self.nfwPot(R, Rs, rho0_input, r_trunc) return f_ def _L(self, x, tau): - """ - Logarithm that appears frequently + """Logarithm that appears frequently. :param x: r/Rs :param tau: t/Rs :return: """ x = np.maximum(x, self._s) - return np.log(x * (tau + np.sqrt(tau ** 2 + x ** 2)) ** -1) + return np.log(x * (tau + np.sqrt(tau**2 + x**2)) ** -1) def F(self, x): - """ - Classic NFW function in terms of arctanh and arctan + """Classic NFW function in terms of arctanh and arctan. :param x: r/Rs :return: @@ -80,8 +88,12 @@ def F(self, x): inds1 = np.where(x < 1) inds2 = np.where(x > 1) inds3 = np.where(x == 1) - nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5) - nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5) + nfwvals[inds1] = (1 - x[inds1] ** 2) ** -0.5 * np.arctanh( + (1 - x[inds1] ** 2) ** 0.5 + ) + nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -0.5 * np.arctan( + (x[inds2] ** 2 - 1) ** 0.5 + ) nfwvals[inds3] = 1 return nfwvals @@ -92,14 +104,13 @@ def F(self, x): elif x == 0: return 0 elif x < 1: - return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5) + return (1 - x**2) ** -0.5 * np.arctanh((1 - x**2) ** 0.5) else: - return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5) + return (x**2 - 1) ** -0.5 * np.arctan((x**2 - 1) ** 0.5) def derivatives(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): - - """ - returns df/dx and df/dy of the function (integral of TNFW), which are the deflection angles + """Returns df/dx and df/dy of the function (integral of TNFW), which are the + deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -113,15 +124,13 @@ def derivatives(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): rho0_input = self.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, self._s * Rs) f_x, f_y = self.nfwAlpha(R, Rs, rho0_input, r_trunc, x_, y_) return f_x, f_y def hessian(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): - - """ - returns d^2f/dx^2, d^2f/dxdy, d^2f/dydx, d^2f/dy^2 of the TNFW potential f + """Returns d^2f/dx^2, d^2f/dxdy, d^2f/dydx, d^2f/dy^2 of the TNFW potential f. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -136,7 +145,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): rho0_input = self.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) + R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, self._s * Rs) kappa = self.density_2d(x_, y_, Rs, rho0_input, r_trunc) @@ -148,8 +157,7 @@ def hessian(self, x, y, Rs, alpha_Rs, r_trunc, center_x=0, center_y=0): @staticmethod def density(r, Rs, rho0, r_trunc): - """ - three dimensional truncated NFW profile + """Three dimensional truncated NFW profile. :param r: radius of interest :type r: float/numpy array @@ -159,11 +167,14 @@ def density(r, Rs, rho0, r_trunc): :type r_trunc: float > 0 :return: rho(r) density """ - return (r_trunc ** 2 * (r_trunc ** 2 + r ** 2) ** -1) * rho0 / (r / Rs * (1 + r / Rs) ** 2) + return ( + (r_trunc**2 * (r_trunc**2 + r**2) ** -1) + * rho0 + / (r / Rs * (1 + r / Rs) ** 2) + ) def density_2d(self, x, y, Rs, rho0, r_trunc, center_x=0, center_y=0): - """ - projected two dimensional NFW profile (kappa*Sigma_crit) + """Projected two dimensional NFW profile (kappa*Sigma_crit) :param R: projected radius of interest :type R: float/numpy array @@ -177,15 +188,14 @@ def density_2d(self, x, y, Rs, rho0, r_trunc, center_x=0, center_y=0): """ x_ = x - center_x y_ = y - center_y - R = np.sqrt(x_ ** 2 + y_ ** 2) - x = R * Rs ** -1 - tau = float(r_trunc) * Rs ** -1 + R = np.sqrt(x_**2 + y_**2) + x = R * Rs**-1 + tau = float(r_trunc) * Rs**-1 Fx = self._F(x, tau) return 2 * rho0 * Rs * Fx def mass_3d(self, r, Rs, rho0, r_trunc): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: 3d radius :param Rs: scale radius @@ -194,19 +204,27 @@ def mass_3d(self, r, Rs, rho0, r_trunc): :return: M(0 """ - t2 = tau ** 2 - X = np.maximum(X, self._s ) + t2 = tau**2 + X = np.maximum(X, self._s) _F = self.F(X) - a = t2*(t2+1)**-2 + a = t2 * (t2 + 1) ** -2 if isinstance(X, np.ndarray): - #b = (t2 + 1) * (X ** 2 - 1) ** -1 * (1 - _F) + # b = (t2 + 1) * (X ** 2 - 1) ** -1 * (1 - _F) b = np.ones_like(X) - b[X == 1] = (t2+1) * 1./3 + b[X == 1] = (t2 + 1) * 1.0 / 3 b[X != 1] = (t2 + 1) * (X[X != 1] ** 2 - 1) ** -1 * (1 - _F[X != 1]) elif isinstance(X, float) or isinstance(X, int): if X == 1: - b = (t2+1)* 1./3 + b = (t2 + 1) * 1.0 / 3 else: - b = (t2+1)*(X**2-1)**-1*(1-_F) + b = (t2 + 1) * (X**2 - 1) ** -1 * (1 - _F) else: - raise ValueError("The variable type is not compatible with the function, please use float," - " int or ndarray's.") - - c = 2*_F - d = -np.pi*(t2+X**2)**-0.5 - e = (t2-1)*(tau*(t2+X**2)**0.5)**-1*self._L(X, tau) + raise ValueError( + "The variable type is not compatible with the function, please use float," + " int or ndarray's." + ) + + c = 2 * _F + d = -np.pi * (t2 + X**2) ** -0.5 + e = (t2 - 1) * (tau * (t2 + X**2) ** 0.5) ** -1 * self._L(X, tau) result = a * (b + c + d + e) return result def _g(self, x, tau): - """ - analytic solution of integral for NFW profile to compute deflection angel and gamma + """Analytic solution of integral for NFW profile to compute deflection angel and + gamma. :param x: R/Rs :type x: float >0 """ x = np.maximum(x, self._s) - return tau ** 2 * (tau ** 2 + 1) ** -2 * ( - (tau ** 2 + 1 + 2 * (x ** 2 - 1)) * self.F(x) + tau * np.pi + (tau ** 2 - 1) * np.log(tau) + - np.sqrt(tau ** 2 + x ** 2) * (-np.pi + self._L(x, tau) * (tau ** 2 - 1) * tau ** -1)) + return ( + tau**2 + * (tau**2 + 1) ** -2 + * ( + (tau**2 + 1 + 2 * (x**2 - 1)) * self.F(x) + + tau * np.pi + + (tau**2 - 1) * np.log(tau) + + np.sqrt(tau**2 + x**2) + * (-np.pi + self._L(x, tau) * (tau**2 - 1) * tau**-1) + ) + ) @staticmethod def _cos_function(x): - if isinstance(x, np.ndarray) or isinstance(x, list): out = np.empty_like(x) inds1 = np.where(x < 1) @@ -355,14 +377,12 @@ def _cos_function(x): out = np.arccos(1 / x) ** 2 else: - raise Exception('x data type %s not recognized.' % x) + raise Exception("x data type %s not recognized." % x) return out def _h(self, x, tau): - - """ - expression for the integral to compute potential + """Expression for the integral to compute potential. :param x: R/Rs :param tau: r_trunc/Rs @@ -370,33 +390,32 @@ def _h(self, x, tau): """ x = np.maximum(x, self._s) - u = x ** 2 - t2 = tau ** 2 + u = x**2 + t2 = tau**2 Lx = self._L(x, tau) Fx = self.F(x) return (t2 + 1) ** -2 * ( - 2 * t2 * np.pi * (tau - (t2 + u) ** .5 + tau * np.log(tau + (t2 + u) ** .5)) - + - 2 * (t2 - 1) * tau * (t2 + u) ** .5 * Lx - + - t2 * (t2 - 1) * Lx ** 2 - + - 4 * t2 * (u - 1) * Fx - + - t2 * (t2 - 1) * self._cos_function(x) - + - t2 * ((t2 - 1) * np.log(tau) - t2 - 1) * np.log(u) - - - t2 * ( - (t2 - 1) * np.log(tau) * np.log(4 * tau) + 2 * np.log(0.5 * tau) - 2 * tau * ( - tau - np.pi) * np.log( - tau * 2))) + 2 + * t2 + * np.pi + * (tau - (t2 + u) ** 0.5 + tau * np.log(tau + (t2 + u) ** 0.5)) + + 2 * (t2 - 1) * tau * (t2 + u) ** 0.5 * Lx + + t2 * (t2 - 1) * Lx**2 + + 4 * t2 * (u - 1) * Fx + + t2 * (t2 - 1) * self._cos_function(x) + + t2 * ((t2 - 1) * np.log(tau) - t2 - 1) * np.log(u) + - t2 + * ( + (t2 - 1) * np.log(tau) * np.log(4 * tau) + + 2 * np.log(0.5 * tau) + - 2 * tau * (tau - np.pi) * np.log(tau * 2) + ) + ) @staticmethod def alpha2rho0(alpha_Rs, Rs): - """ - convert angle at Rs into rho0; neglects the truncation + """Convert angle at Rs into rho0; neglects the truncation. :param alpha_Rs: deflection angle at RS :param Rs: scale radius @@ -406,8 +425,7 @@ def alpha2rho0(alpha_Rs, Rs): @staticmethod def rho02alpha(rho0, Rs): - """ - convert rho0 to angle at Rs; neglects the truncation + """Convert rho0 to angle at Rs; neglects the truncation. :param rho0: density normalization (characteristic density) :param Rs: scale radius diff --git a/lenstronomy/LensModel/Profiles/tnfw_ellipse.py b/lenstronomy/LensModel/Profiles/tnfw_ellipse.py index cd3721b83..99e2196d7 100644 --- a/lenstronomy/LensModel/Profiles/tnfw_ellipse.py +++ b/lenstronomy/LensModel/Profiles/tnfw_ellipse.py @@ -1,38 +1,52 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.Profiles.tnfw import TNFW import lenstronomy.Util.param_util as param_util from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['TNFW_ELLIPSE'] +__all__ = ["TNFW_ELLIPSE"] class TNFW_ELLIPSE(LensProfileBase): - """ - this class contains functions concerning the truncated NFW profile with an ellipticity defined in the potential - parameterization of alpha_Rs, Rs and r_trunc is the same as for the spherical NFW profile + """This class contains functions concerning the truncated NFW profile with an + ellipticity defined in the potential parameterization of alpha_Rs, Rs and r_trunc is + the same as for the spherical NFW profile. from Glose & Kneib: https://cds.cern.ch/record/529584/files/0112138.pdf relation are: R_200 = c * Rs """ - profile_name = 'TNFW_ELLIPSE' - param_names = ['Rs', 'alpha_Rs', 'r_trunc', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'Rs': 0, 'alpha_Rs': 0, 'r_trunc': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'Rs': 100, 'alpha_Rs': 10, 'r_trunc': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} - def __init__(self): - """ + profile_name = "TNFW_ELLIPSE" + param_names = ["Rs", "alpha_Rs", "r_trunc", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "Rs": 0, + "alpha_Rs": 0, + "r_trunc": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "Rs": 100, + "alpha_Rs": 10, + "r_trunc": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } - """ + def __init__(self): + """""" self.tnfw = TNFW() self._diff = 0.0000000001 super(TNFW_ELLIPSE, self).__init__() def function(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y=0): - """ - returns elliptically distorted NFW lensing potential + """Returns elliptically distorted NFW lensing potential. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -45,19 +59,20 @@ def function(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y=0): :param center_y: center of halo (in angular units) :return: lensing potential """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) R_ = np.sqrt(x_**2 + y_**2) rho0_input = self.tnfw.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) Rs = np.maximum(Rs, 0.0000001) - #if Rs < 0.0000001: + # if Rs < 0.0000001: # Rs = 0.0000001 f_ = self.tnfw.nfwPot(R_, Rs, rho0_input, r_trunc) return f_ def derivatives(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y=0): - """ - returns df/dx and df/dy of the function, calculated as an elliptically distorted deflection angle of the - spherical NFW profile + """Returns df/dx and df/dy of the function, calculated as an elliptically + distorted deflection angle of the spherical NFW profile. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -70,28 +85,30 @@ def derivatives(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y= :param center_y: center of halo (in angular units) :return: deflection in x-direction, deflection in y-direction """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) phi_G, q = param_util.ellipticity2phi_q(e1, e2) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) e = param_util.q2e(q) # e = abs(1 - q) - R_ = np.sqrt(x_ ** 2 + y_ ** 2) + R_ = np.sqrt(x_**2 + y_**2) rho0_input = self.tnfw.alpha2rho0(alpha_Rs=alpha_Rs, Rs=Rs) Rs = np.maximum(Rs, 0.0000001) - #if Rs < 0.0000001: + # if Rs < 0.0000001: # Rs = 0.0000001 f_x_prim, f_y_prim = self.tnfw.nfwAlpha(R_, Rs, rho0_input, r_trunc, x_, y_) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) - f_x = cos_phi*f_x_prim-sin_phi*f_y_prim - f_y = sin_phi*f_x_prim+cos_phi*f_y_prim + f_x = cos_phi * f_x_prim - sin_phi * f_y_prim + f_y = sin_phi * f_x_prim + cos_phi * f_y_prim return f_x, f_y def hessian(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y=0): - """ - returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy - the calculation is performed as a numerical differential from the deflection field. Analytical relations are possible + """Returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy the + calculation is performed as a numerical differential from the deflection field. + Analytical relations are possible. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -104,15 +121,21 @@ def hessian(self, x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x=0, center_y=0): :param center_y: center of halo (in angular units) :return: d^2f/dx^2, d^2/dxdy, d^2/dydx, d^f/dy^2 """ - alpha_ra, alpha_dec = self.derivatives(x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y) + alpha_ra, alpha_dec = self.derivatives( + x, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y + ) diff = self._diff - alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y) - alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y) - - f_xx = (alpha_ra_dx - alpha_ra)/diff - f_xy = (alpha_ra_dy - alpha_ra)/diff - f_yx = (alpha_dec_dx - alpha_dec)/diff - f_yy = (alpha_dec_dy - alpha_dec)/diff + alpha_ra_dx, alpha_dec_dx = self.derivatives( + x + diff, y, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y + ) + alpha_ra_dy, alpha_dec_dy = self.derivatives( + x, y + diff, Rs, alpha_Rs, r_trunc, e1, e2, center_x, center_y + ) + + f_xx = (alpha_ra_dx - alpha_ra) / diff + f_xy = (alpha_ra_dy - alpha_ra) / diff + f_yx = (alpha_dec_dx - alpha_dec) / diff + f_yy = (alpha_dec_dy - alpha_dec) / diff return f_xx, f_xy, f_yx, f_yy @@ -130,9 +153,9 @@ def mass_3d_lens(self, r, Rs, alpha_Rs, r_trunc, e1=1, e2=0): return self.tnfw.mass_3d_lens(r, Rs, alpha_Rs, r_trunc) def density_lens(self, r, Rs, alpha_Rs, r_trunc, e1=1, e2=0): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radios :param Rs: turn-over radius of NFW profile diff --git a/lenstronomy/LensModel/Profiles/uldm.py b/lenstronomy/LensModel/Profiles/uldm.py index f47d8d1eb..1454ec545 100644 --- a/lenstronomy/LensModel/Profiles/uldm.py +++ b/lenstronomy/LensModel/Profiles/uldm.py @@ -1,11 +1,11 @@ -__author__ = 'lucateo' +__author__ = "lucateo" # this file contains a class to compute the Ultra Light Dark Matter soliton profile import numpy as np from scipy.special import gamma, hyp2f1 from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase -__all__ = ['Uldm'] +__all__ = ["Uldm"] class Uldm(LensProfileBase): @@ -18,14 +18,14 @@ class Uldm(LensProfileBase): \\rho = \\rho_0 (1 + a(\\theta/\\theta_c)^2)^{-\\beta} where :math:`\\theta_c` is the core radius, corresponding to the radius where the - density drops by half its central value, :math: `\\beta` is the slope (called just slope + density drops by half its central value, :math: `\\beta` is the slope (called just slope in the parameters of this model), :math: `\\rho_0 = \\kappa_0 \\Sigma_c/D_lens`, and :math: `a` is a parameter, dependent on :math: `\\beta`, chosen such that :math: `\\theta_c` indeed corresponds to the radius where the density drops by half (simple math gives :math: `a = 0.5^{-1/\\beta} - 1` ). For an ULDM soliton profile without contributions to background potential, it - turns out that :math: `\\beta = 8, a = 0.091`. We allow :math: `\\beta` to be - different from 8 to model solitons which feel the influence of background + turns out that :math: `\\beta = 8, a = 0.091`. We allow :math: `\\beta` to be + different from 8 to model solitons which feel the influence of background potential (see 2105.10873) The profile has, as parameters: @@ -33,23 +33,37 @@ class Uldm(LensProfileBase): - theta_c: core radius (in arcseconds) - slope: exponent entering the profile, default value is 8 """ + _s = 0.000001 # numerical limit for minimal radius - param_names = ['kappa_0', 'theta_c', 'slope', 'center_x', 'center_y'] - lower_limit_default = {'kappa_0': 0, 'theta_c': 0, 'slope': 3.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'kappa_0': 1., 'theta_c': 100, 'slope': 10, 'center_x': 100, 'center_y': 100} + param_names = ["kappa_0", "theta_c", "slope", "center_x", "center_y"] + lower_limit_default = { + "kappa_0": 0, + "theta_c": 0, + "slope": 3.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "kappa_0": 1.0, + "theta_c": 100, + "slope": 10, + "center_x": 100, + "center_y": 100, + } @staticmethod def rhotilde(kappa_0, theta_c, slope=8): - """ - Computes the central density in angular units + """Computes the central density in angular units. :param kappa_0: central convergence of profile :param theta_c: core radius (in arcsec) :param slope: exponent entering the profile :return: central density in 1/arcsec """ - a_factor_sqrt = np.sqrt(0.5**(-1/slope) - 1) - num_factor = gamma(slope) / gamma(slope - 1/2) * a_factor_sqrt / np.sqrt(np.pi) + a_factor_sqrt = np.sqrt(0.5 ** (-1 / slope) - 1) + num_factor = ( + gamma(slope) / gamma(slope - 1 / 2) * a_factor_sqrt / np.sqrt(np.pi) + ) return kappa_0 * num_factor / theta_c def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): @@ -67,21 +81,39 @@ def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): x_ = x - center_x y_ = y - center_y - r = np.sqrt(x_** 2 + y_** 2) + r = np.sqrt(x_**2 + y_**2) r = np.maximum(r, self._s) - a_factor_sqrt = np.sqrt( (0.5)**(-1./slope) -1) + a_factor_sqrt = np.sqrt((0.5) ** (-1.0 / slope) - 1) if np.isscalar(r) == True: - hypgeom = float(kappa_0 /2 * r**2 * - hyp3f2(1, 1, slope - 0.5, 2, 2, -(a_factor_sqrt * r /theta_c )**2)) + hypgeom = float( + kappa_0 + / 2 + * r**2 + * hyp3f2(1, 1, slope - 0.5, 2, 2, -((a_factor_sqrt * r / theta_c) ** 2)) + ) else: - hypgeom = np.array([ kappa_0 /2. * r_i**2. * - hyp3f2(1, 1, slope - 0.5, 2, 2, -(a_factor_sqrt * r_i / theta_c)**2.) for r_i in r], dtype=float) + hypgeom = np.array( + [ + kappa_0 + / 2.0 + * r_i**2.0 + * hyp3f2( + 1, + 1, + slope - 0.5, + 2, + 2, + -((a_factor_sqrt * r_i / theta_c) ** 2.0), + ) + for r_i in r + ], + dtype=float, + ) return hypgeom @staticmethod def alpha_radial(r, kappa_0, theta_c, slope=8): - """ - returns the radial part of the deflection angle + """Returns the radial part of the deflection angle. :param kappa_0: central convergence of profile :param theta_c: core radius (in arcsec) @@ -89,14 +121,14 @@ def alpha_radial(r, kappa_0, theta_c, slope=8): :param r: radius where the deflection angle is computed :return: radial deflection angle """ - a_factor = 0.5**(-1./slope) - 1 - prefactor = 2./(2*slope - 3) * kappa_0 * theta_c**2 / a_factor - denominator_factor = (1 + a_factor * r**2/theta_c**2)**(slope - 3./2) - return prefactor/r * (1 - 1/denominator_factor) + a_factor = 0.5 ** (-1.0 / slope) - 1 + prefactor = 2.0 / (2 * slope - 3) * kappa_0 * theta_c**2 / a_factor + denominator_factor = (1 + a_factor * r**2 / theta_c**2) ** (slope - 3.0 / 2) + return prefactor / r * (1 - 1 / denominator_factor) def derivatives(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): - """ - returns df/dx and df/dy of the function (lensing potential), which are the deflection angles + """Returns df/dx and df/dy of the function (lensing potential), which are the + deflection angles. :param x: angular position (normally in units of arc seconds) :param y: angular position (normally in units of arc seconds) @@ -130,21 +162,25 @@ def hessian(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): y_ = y - center_y R = np.sqrt(x_**2 + y_**2) R = np.maximum(R, 0.00000001) - a_factor = 0.5**(-1./slope) - 1 - prefactor = 2./(2*slope -3) * kappa_0 * theta_c**2 / a_factor + a_factor = 0.5 ** (-1.0 / slope) - 1 + prefactor = 2.0 / (2 * slope - 3) * kappa_0 * theta_c**2 / a_factor # denominator factor - denominator = 1 + a_factor * R**2/theta_c**2 - factor1 = (2*slope - 3) * a_factor * denominator**(1./2 - slope) / (theta_c**2 * R**2) - factor2 = 1/R**4 * (1 - denominator**(3./2 - slope)) + denominator = 1 + a_factor * R**2 / theta_c**2 + factor1 = ( + (2 * slope - 3) + * a_factor + * denominator ** (1.0 / 2 - slope) + / (theta_c**2 * R**2) + ) + factor2 = 1 / R**4 * (1 - denominator ** (3.0 / 2 - slope)) f_xx = prefactor * (factor1 * x_**2 + factor2 * (y_**2 - x_**2)) f_yy = prefactor * (factor1 * y_**2 + factor2 * (x_**2 - y_**2)) - f_xy = prefactor * (factor1 * x_ * y_ - factor2 * 2*x_*y_) + f_xy = prefactor * (factor1 * x_ * y_ - factor2 * 2 * x_ * y_) return f_xx, f_xy, f_xy, f_yy def density(self, R, kappa_0, theta_c, slope=8): - """ - three dimensional ULDM profile in angular units - (rho0_physical = rho0_angular Sigma_crit / D_lens) + """Three dimensional ULDM profile in angular units (rho0_physical = rho0_angular + Sigma_crit / D_lens) :param R: radius of interest :param kappa_0: central convergence of profile @@ -153,14 +189,13 @@ def density(self, R, kappa_0, theta_c, slope=8): :return: rho(R) density in angular units """ rhotilde = self.rhotilde(kappa_0, theta_c, slope) - a_factor = 0.5**(-1./slope) - 1 - return rhotilde/(1 + a_factor * (R/theta_c)**2)**slope + a_factor = 0.5 ** (-1.0 / slope) - 1 + return rhotilde / (1 + a_factor * (R / theta_c) ** 2) ** slope def density_lens(self, r, kappa_0, theta_c, slope=8): - """ - computes the density at 3d radius r given lens model parameterization. - The integral in the LOS projection of this quantity results in the - convergence quantity. + """Computes the density at 3d radius r given lens model parameterization. The + integral in the LOS projection of this quantity results in the convergence + quantity. :param r: 3d radius :param kappa_0: central convergence of profile @@ -172,8 +207,7 @@ def density_lens(self, r, kappa_0, theta_c, slope=8): @staticmethod def kappa_r(R, kappa_0, theta_c, slope=8): - """ - convergence of the cored density profile. This routine is also for testing + """Convergence of the cored density profile. This routine is also for testing. :param R: radius (angular scale) :param kappa_0: convergence in the core @@ -181,13 +215,12 @@ def kappa_r(R, kappa_0, theta_c, slope=8): :param slope: exponent entering the profile :return: convergence at r """ - a_factor = (0.5)**(-1./slope) -1 - return kappa_0 * (1 + a_factor * (R/theta_c)**2)**(1./2 - slope) + a_factor = (0.5) ** (-1.0 / slope) - 1 + return kappa_0 * (1 + a_factor * (R / theta_c) ** 2) ** (1.0 / 2 - slope) def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): - """ - projected two dimensional ULDM profile (convergence * Sigma_crit), but - given our units convention for rho0, it is basically the convergence + """Projected two dimensional ULDM profile (convergence * Sigma_crit), but given + our units convention for rho0, it is basically the convergence. :param x: x-coordinate :param y: y-coordinate @@ -202,18 +235,16 @@ def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8): return self.kappa_r(R, kappa_0, theta_c, slope) def _mass_integral(self, x, slope=8): - """ - Returns the analytic result of the integral appearing in mass expression + """Returns the analytic result of the integral appearing in mass expression. :param slope: exponent entering the profile :return: integral result """ - hypF = np.real(hyp2f1(3./2, slope, 5./2, - x**2)) - return 1./3 * x**3 * hypF + hypF = np.real(hyp2f1(3.0 / 2, slope, 5.0 / 2, -(x**2))) + return 1.0 / 3 * x**3 * hypF def mass_3d(self, R, kappa_0, theta_c, slope=8): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param R: radius in arcseconds :param kappa_0: central convergence of profile @@ -222,15 +253,16 @@ def mass_3d(self, R, kappa_0, theta_c, slope=8): :return: mass of soliton in angular units """ rhotilde = self.rhotilde(kappa_0, theta_c, slope) - a_factor = 0.5**(-1./slope) - 1 - prefactor = 4. * np.pi * rhotilde * theta_c**3 / (a_factor)**(1.5) - m_3d = prefactor * (self._mass_integral(R/theta_c * np.sqrt(a_factor), slope) - - self._mass_integral(0, slope) ) + a_factor = 0.5 ** (-1.0 / slope) - 1 + prefactor = 4.0 * np.pi * rhotilde * theta_c**3 / (a_factor) ** (1.5) + m_3d = prefactor * ( + self._mass_integral(R / theta_c * np.sqrt(a_factor), slope) + - self._mass_integral(0, slope) + ) return m_3d def mass_3d_lens(self, r, kappa_0, theta_c, slope=8): - """ - mass enclosed a 3d sphere or radius r + """Mass enclosed a 3d sphere or radius r. :param r: radius over which the mass is computed :param kappa_0: central convergence of profile @@ -242,8 +274,7 @@ def mass_3d_lens(self, r, kappa_0, theta_c, slope=8): return m_3d def mass_2d(self, R, kappa_0, theta_c, slope=8): - """ - mass enclosed a 2d sphere or radius r + """Mass enclosed a 2d sphere or radius r. :param R: radius over which the mass is computed :param kappa_0: central convergence of profile diff --git a/lenstronomy/LensModel/QuadOptimizer/multi_plane_fast.py b/lenstronomy/LensModel/QuadOptimizer/multi_plane_fast.py index 738f41bab..21acf4cfb 100644 --- a/lenstronomy/LensModel/QuadOptimizer/multi_plane_fast.py +++ b/lenstronomy/LensModel/QuadOptimizer/multi_plane_fast.py @@ -1,23 +1,32 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from lenstronomy.LensModel.lens_model import LensModel import numpy as np class MultiplaneFast(object): + """This class accelerates ray tracing computations in multi plane lensing for + quadruple image lenses by only computing the deflection from objects in front of the + main deflector at z_lens one time. + The first ray tracing computation through the foreground is saved and re-used, but + it will always have the same shape as the initial x_image, y_image arrays. """ - This class accelerates ray tracing computations in multi plane lensing for quadruple image lenses by only - computing the deflection from objects in front of the main deflector at z_lens one time. The first ray tracing - computation through the foreground is saved and re-used, but it will always have the same shape as the initial - x_image, y_image arrays. - - """ - - def __init__(self, x_image, y_image, z_lens, z_source, lens_model_list, redshift_list, - astropy_instance, param_class, foreground_rays, - tol_source=1e-5, numerical_alpha_class=None): + def __init__( + self, + x_image, + y_image, + z_lens, + z_source, + lens_model_list, + redshift_list, + astropy_instance, + param_class, + foreground_rays, + tol_source=1e-5, + numerical_alpha_class=None, + ): """ :param x_image: x_image to fit @@ -34,21 +43,40 @@ def __init__(self, x_image, y_image, z_lens, z_source, lens_model_list, redshift :param numerical_alpha_class: class for computing numerically tabulated deflection angles """ - self.lensModel = LensModel(lens_model_list, z_lens, z_source, redshift_list, astropy_instance, - multi_plane=True, numerical_alpha_class=numerical_alpha_class) - - lensmodel_list_to_vary = lens_model_list[0:param_class.to_vary_index] - redshift_list_to_vary = redshift_list[0:param_class.to_vary_index] - lensmodel_list_fixed = lens_model_list[param_class.to_vary_index:] - redshift_list_fixed = redshift_list[param_class.to_vary_index:] - - self.lens_model_to_vary = LensModel(lensmodel_list_to_vary, z_lens, z_source, redshift_list_to_vary, - cosmo=astropy_instance, multi_plane=True, - numerical_alpha_class=numerical_alpha_class) - - self.lens_model_fixed = LensModel(lensmodel_list_fixed, z_lens, z_source, redshift_list_fixed, - cosmo=astropy_instance, multi_plane=True, - numerical_alpha_class=numerical_alpha_class) + self.lensModel = LensModel( + lens_model_list, + z_lens, + z_source, + redshift_list, + astropy_instance, + multi_plane=True, + numerical_alpha_class=numerical_alpha_class, + ) + + lensmodel_list_to_vary = lens_model_list[0 : param_class.to_vary_index] + redshift_list_to_vary = redshift_list[0 : param_class.to_vary_index] + lensmodel_list_fixed = lens_model_list[param_class.to_vary_index :] + redshift_list_fixed = redshift_list[param_class.to_vary_index :] + + self.lens_model_to_vary = LensModel( + lensmodel_list_to_vary, + z_lens, + z_source, + redshift_list_to_vary, + cosmo=astropy_instance, + multi_plane=True, + numerical_alpha_class=numerical_alpha_class, + ) + + self.lens_model_fixed = LensModel( + lensmodel_list_fixed, + z_lens, + z_source, + redshift_list_fixed, + cosmo=astropy_instance, + multi_plane=True, + numerical_alpha_class=numerical_alpha_class, + ) self._z_lens = z_lens @@ -63,7 +91,6 @@ def __init__(self, x_image, y_image, z_lens, z_source, lens_model_list, redshift self._foreground_rays = foreground_rays def chi_square(self, args_lens, *args, **kwargs): - """ :param args_lens: array of lens model parameters being optimized, computed from kwargs_lens in a specified @@ -78,7 +105,6 @@ def chi_square(self, args_lens, *args, **kwargs): return source_plane_penlty + param_penalty def logL(self, args_lens, *args, **kwargs): - """ :param args_lens: array of lens model parameters being optimized, computed from kwargs_lens in a specified @@ -90,7 +116,6 @@ def logL(self, args_lens, *args, **kwargs): return -0.5 * chi_square def source_plane_chi_square(self, args_lens, *args, **kwargs): - """ :param args_lens: array of lens model parameters being optimized, computed from kwargs_lens in a specified @@ -100,27 +125,35 @@ def source_plane_chi_square(self, args_lens, *args, **kwargs): betax, betay = self.ray_shooting_fast(args_lens) - dx_source = ((betax[0] - betax[1]) ** 2 + (betax[0] - betax[2]) ** 2 + ( - betax[0] - betax[3]) ** 2 + ( - betax[1] - betax[2]) ** 2 + - (betax[1] - betax[3]) ** 2 + (betax[2] - betax[3]) ** 2) - dy_source = ((betay[0] - betay[1]) ** 2 + (betay[0] - betay[2]) ** 2 + ( - betay[0] - betay[3]) ** 2 + ( - betay[1] - betay[2]) ** 2 + - (betay[1] - betay[3]) ** 2 + (betay[2] - betay[3]) ** 2) - - chi_square = 0.5 * (dx_source + dy_source) / self._tol_source ** 2 + dx_source = ( + (betax[0] - betax[1]) ** 2 + + (betax[0] - betax[2]) ** 2 + + (betax[0] - betax[3]) ** 2 + + (betax[1] - betax[2]) ** 2 + + (betax[1] - betax[3]) ** 2 + + (betax[2] - betax[3]) ** 2 + ) + dy_source = ( + (betay[0] - betay[1]) ** 2 + + (betay[0] - betay[2]) ** 2 + + (betay[0] - betay[3]) ** 2 + + (betay[1] - betay[2]) ** 2 + + (betay[1] - betay[3]) ** 2 + + (betay[2] - betay[3]) ** 2 + ) + + chi_square = 0.5 * (dx_source + dy_source) / self._tol_source**2 return chi_square def ray_shooting_fast(self, args_lens): + """Performs a ray tracing computation through observed coordinates on the sky + (self._x_image, self._y_image) to the source plane, returning the final + coordinates of each ray on the source plane. - """ - Performs a ray tracing computation through observed coordinates on the sky (self._x_image, self._y_image) - to the source plane, returning the final coordinates of each ray on the source plane - - :param args_lens: An array of parameters being optimized. The array is computed from a set of key word arguments - by an instance of ParamClass (see documentation in QuadOptimizer.param_manager) + :param args_lens: An array of parameters being optimized. The array is computed + from a set of key word arguments by an instance of ParamClass (see + documentation in QuadOptimizer.param_manager) :return: the xy coordinate of each ray traced back to the source plane """ @@ -132,36 +165,69 @@ def ray_shooting_fast(self, args_lens): index = self._param_class.to_vary_index kwargs_lens = kw[0:index] # evaluate main deflector deflection angles - x, y, alpha_x, alpha_y = self.lens_model_to_vary.lens_model.ray_shooting_partial( - x, y, alpha_x, alpha_y, self._z_lens, self._z_lens, kwargs_lens, include_z_start=True) + ( + x, + y, + alpha_x, + alpha_y, + ) = self.lens_model_to_vary.lens_model.ray_shooting_partial( + x, + y, + alpha_x, + alpha_y, + self._z_lens, + self._z_lens, + kwargs_lens, + include_z_start=True, + ) # ray trace through background halos kwargs_lens = kw[index:] x, y, _, _ = self.lens_model_fixed.lens_model.ray_shooting_partial( - x, y, alpha_x, alpha_y, self._z_lens, self._z_source, kwargs_lens, check_convention=False) + x, + y, + alpha_x, + alpha_y, + self._z_lens, + self._z_source, + kwargs_lens, + check_convention=False, + ) beta_x, beta_y = self.lens_model_fixed.lens_model.co_moving2angle_source(x, y) return beta_x, beta_y def _ray_shooting_fast_foreground(self): - - """ - Does the ray tracing through the foreground halos only once - """ + """Does the ray tracing through the foreground halos only once.""" if self._foreground_rays is None: - # These do not depend on the kwargs being optimized for kw = self._param_class.kwargs_lens index = self._param_class.to_vary_index kwargs_lens = kw[index:] x0, y0 = np.zeros_like(self._x_image), np.zeros_like(self._y_image) - x, y, alpha_x, alpha_y = self.lens_model_fixed.lens_model.ray_shooting_partial( - x0, y0, self._x_image, self._y_image, z_start=0., - z_stop=self._z_lens, kwargs_lens=kwargs_lens) + ( + x, + y, + alpha_x, + alpha_y, + ) = self.lens_model_fixed.lens_model.ray_shooting_partial( + x0, + y0, + self._x_image, + self._y_image, + z_start=0.0, + z_stop=self._z_lens, + kwargs_lens=kwargs_lens, + ) self._foreground_rays = (x, y, alpha_x, alpha_y) - return self._foreground_rays[0], self._foreground_rays[1], self._foreground_rays[2], self._foreground_rays[3] + return ( + self._foreground_rays[0], + self._foreground_rays[1], + self._foreground_rays[2], + self._foreground_rays[3], + ) diff --git a/lenstronomy/LensModel/QuadOptimizer/optimizer.py b/lenstronomy/LensModel/QuadOptimizer/optimizer.py index 088f6c624..c39bbb885 100644 --- a/lenstronomy/LensModel/QuadOptimizer/optimizer.py +++ b/lenstronomy/LensModel/QuadOptimizer/optimizer.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from scipy.optimize import minimize import numpy as np @@ -6,23 +6,37 @@ from lenstronomy.LensModel.QuadOptimizer.multi_plane_fast import MultiplaneFast from lenstronomy.Sampling.Pool.pool import choose_pool -__all__ = ['Optimizer'] +__all__ = ["Optimizer"] class Optimizer(object): - """ - class which executes the optimization routines. Currently implemented as a particle swarm optimization followed by - a downhill simplex routine. + """Class which executes the optimization routines. Currently implemented as a + particle swarm optimization followed by a downhill simplex routine. - Particle swarm optimizer is modified from the CosmoHammer particle swarm routine with different convergence criteria implemented. + Particle swarm optimizer is modified from the CosmoHammer particle swarm routine + with different convergence criteria implemented. """ - def __init__(self, x_image, y_image, lens_model_list, redshift_list, z_lens, z_source, - parameter_class, astropy_instance=None, numerical_alpha_class=None, - particle_swarm=True, re_optimize=False, re_optimize_scale=1., - pso_convergence_mean=50000, foreground_rays=None, - tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400): - + def __init__( + self, + x_image, + y_image, + lens_model_list, + redshift_list, + z_lens, + z_source, + parameter_class, + astropy_instance=None, + numerical_alpha_class=None, + particle_swarm=True, + re_optimize=False, + re_optimize_scale=1.0, + pso_convergence_mean=50000, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ): """ :param x_image: x_image to fit (should be length 4) @@ -45,9 +59,19 @@ def __init__(self, x_image, y_image, lens_model_list, redshift_list, z_lens, z_s :param simplex_n_iterations: number of iterations per dimension for the downhill simplex optimization """ - self.fast_rayshooting = MultiplaneFast(x_image, y_image, z_lens, z_source, - lens_model_list, redshift_list, astropy_instance, parameter_class, - foreground_rays, tol_source, numerical_alpha_class) + self.fast_rayshooting = MultiplaneFast( + x_image, + y_image, + z_lens, + z_source, + lens_model_list, + redshift_list, + astropy_instance, + parameter_class, + foreground_rays, + tol_source, + numerical_alpha_class, + ) self._tol_source = tol_source @@ -65,7 +89,6 @@ def __init__(self, x_image, y_image, lens_model_list, redshift_list, z_lens, z_s self._re_optimize_scale = re_optimize_scale def optimize(self, n_particles=50, n_iterations=250, verbose=False, threadCount=1): - """ :param n_particles: number of PSO particles, will be ignored if self._particle_swarm is False @@ -76,7 +99,6 @@ def optimize(self, n_particles=50, n_iterations=250, verbose=False, threadCount= """ if self._particle_swarm: - if threadCount > 1: pool = choose_pool(mpi=False, processes=threadCount) else: @@ -90,57 +112,73 @@ def optimize(self, n_particles=50, n_iterations=250, verbose=False, threadCount= kwargs_lens_final, source_penalty = self._fit_amoeba(kwargs, verbose) args_lens_final = self._param_class.kwargs_to_args(kwargs_lens_final) - source_x_array, source_y_array = self.fast_rayshooting.ray_shooting_fast(args_lens_final) + source_x_array, source_y_array = self.fast_rayshooting.ray_shooting_fast( + args_lens_final + ) source_x, source_y = np.mean(source_x_array), np.mean(source_y_array) if verbose: - print('optimization done.') - print('Recovered source position: ', (source_x_array, source_y_array)) + print("optimization done.") + print("Recovered source position: ", (source_x_array, source_y_array)) return kwargs_lens_final, [source_x, source_y] def _fit_pso(self, n_particles, n_iterations, pool, verbose): + """Executes the PSO.""" - """ - Executes the PSO - """ - - low_bounds, high_bounds = self._param_class.bounds(self._re_optimize, self._re_optimize_scale) + low_bounds, high_bounds = self._param_class.bounds( + self._re_optimize, self._re_optimize_scale + ) - pso = ParticleSwarmOptimizer(self.fast_rayshooting.logL, low_bounds, high_bounds, n_particles, - pool, args=[self._tol_source]) + pso = ParticleSwarmOptimizer( + self.fast_rayshooting.logL, + low_bounds, + high_bounds, + n_particles, + pool, + args=[self._tol_source], + ) - best, info = pso.optimize(n_iterations, verbose, early_stop_tolerance=self._pso_convergence_mean) + best, info = pso.optimize( + n_iterations, verbose, early_stop_tolerance=self._pso_convergence_mean + ) if verbose: - print('PSO done... ') - print('source plane chi^2: ', self.fast_rayshooting.source_plane_chi_square(best)) - print('total chi^2: ', self.fast_rayshooting.chi_square(best)) + print("PSO done... ") + print( + "source plane chi^2: ", + self.fast_rayshooting.source_plane_chi_square(best), + ) + print("total chi^2: ", self.fast_rayshooting.chi_square(best)) kwargs = self._param_class.args_to_kwargs(best) return kwargs def _fit_amoeba(self, kwargs, verbose): - - """ - Executes the downhill simplex - """ + """Executes the downhill simplex.""" args_init = self._param_class.kwargs_to_args(kwargs) - options = {'adaptive': True, 'fatol': self._tol_simplex_func, - 'maxiter': self._simplex_n_iterations * len(args_init)} + options = { + "adaptive": True, + "fatol": self._tol_simplex_func, + "maxiter": self._simplex_n_iterations * len(args_init), + } - method = 'Nelder-Mead' + method = "Nelder-Mead" if verbose: - print('starting amoeba... ') + print("starting amoeba... ") - opt = minimize(self.fast_rayshooting.chi_square, x0=args_init, - method=method, options=options) + opt = minimize( + self.fast_rayshooting.chi_square, + x0=args_init, + method=method, + options=options, + ) - kwargs = self._param_class.args_to_kwargs(opt['x']) - source_penalty = opt['fun'] + kwargs = self._param_class.args_to_kwargs(opt["x"]) + source_penalty = opt["fun"] return kwargs, source_penalty diff --git a/lenstronomy/LensModel/QuadOptimizer/param_manager.py b/lenstronomy/LensModel/QuadOptimizer/param_manager.py index c64911d5c..cd42af261 100644 --- a/lenstronomy/LensModel/QuadOptimizer/param_manager.py +++ b/lenstronomy/LensModel/QuadOptimizer/param_manager.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from lenstronomy.Util.param_util import shear_cartesian2polar, shear_polar2cartesian from lenstronomy.Util.param_util import ellipticity2phi_q @@ -45,14 +45,13 @@ class PowerLawParamManager(object): + """Base class for handling the translation between key word arguments and parameter + arrays for EPL mass models. - """ - Base class for handling the translation between key word arguments and parameter arrays for - EPL mass models. This class is intended for use in modeling galaxy-scale lenses + This class is intended for use in modeling galaxy-scale lenses """ def __init__(self, kwargs_lens_init): - """ :param kwargs_lens_init: the initial kwargs_lens before optimizing @@ -61,18 +60,17 @@ def __init__(self, kwargs_lens_init): self.kwargs_lens = kwargs_lens_init def param_chi_square_penalty(self, args): - - return 0. + return 0.0 @property def to_vary_index(self): + """The number of lens models being varied in this routine. This is set to 2 + because the first three lens models are EPL and SHEAR, and their parameters are + being optimized. - """ - The number of lens models being varied in this routine. This is set to 2 because the first three lens models - are EPL and SHEAR, and their parameters are being optimized. - - The kwargs_list is split at to to_vary_index with indicies < to_vary_index accessed in this class, - and lens models with indicies > to_vary_index kept fixed. + The kwargs_list is split at to to_vary_index with indicies < to_vary_index + accessed in this class, and lens models with indicies > to_vary_index kept + fixed. Note that this requires a specific ordering of lens_model_list :return: @@ -80,10 +78,8 @@ def to_vary_index(self): return 2 - def bounds(self, re_optimize, scale=1.): - - """ - Sets the low/high parameter bounds for the particle swarm optimization + def bounds(self, re_optimize, scale=1.0): + """Sets the low/high parameter bounds for the particle swarm optimization. NOTE: The low/high values specified here are intended for galaxy-scale lenses. If you want to use this for a different size system you should create a new ParamClass with different settings @@ -107,50 +103,63 @@ def bounds(self, re_optimize, scale=1.): e_shift = 0.1 g_shift = 0.025 - shifts = np.array([thetaE_shift, center_shift, center_shift, e_shift, e_shift, g_shift, g_shift]) + shifts = np.array( + [ + thetaE_shift, + center_shift, + center_shift, + e_shift, + e_shift, + g_shift, + g_shift, + ] + ) low = np.array(args) - shifts * scale high = np.array(args) + shifts * scale return low, high @staticmethod def kwargs_to_args(kwargs): - """ :param kwargs: keyword arguments corresponding to the lens model parameters being optimized :return: array of lens model parameters """ - thetaE = kwargs[0]['theta_E'] - center_x = kwargs[0]['center_x'] - center_y = kwargs[0]['center_y'] - e1 = kwargs[0]['e1'] - e2 = kwargs[0]['e2'] - g1 = kwargs[1]['gamma1'] - g2 = kwargs[1]['gamma2'] + thetaE = kwargs[0]["theta_E"] + center_x = kwargs[0]["center_x"] + center_y = kwargs[0]["center_y"] + e1 = kwargs[0]["e1"] + e2 = kwargs[0]["e2"] + g1 = kwargs[1]["gamma1"] + g2 = kwargs[1]["gamma2"] args = (thetaE, center_x, center_y, e1, e2, g1, g2) return args class PowerLawFreeShear(PowerLawParamManager): + """This class implements a fit of EPL + external shear with every parameter except + the power law slope allowed to vary.""" - """ - This class implements a fit of EPL + external shear with every parameter except the power law slope allowed to vary - """ def args_to_kwargs(self, args): - """ :param args: array of lens model parameters :return: dictionary of lens model parameters """ - gamma = self.kwargs_lens[0]['gamma'] - kwargs_epl = {'theta_E': args[0], 'center_x': args[1], 'center_y': args[2], - 'e1': args[3], 'e2': args[4], 'gamma': gamma} + gamma = self.kwargs_lens[0]["gamma"] + kwargs_epl = { + "theta_E": args[0], + "center_x": args[1], + "center_y": args[2], + "e1": args[3], + "e2": args[4], + "gamma": gamma, + } - kwargs_shear = {'gamma1': args[5], 'gamma2': args[6]} + kwargs_shear = {"gamma1": args[5], "gamma2": args[6]} self.kwargs_lens[0] = kwargs_epl self.kwargs_lens[1] = kwargs_shear @@ -159,15 +168,14 @@ def args_to_kwargs(self, args): class PowerLawFixedShear(PowerLawParamManager): + """This class implements a fit of EPL + external shear with every parameter except + the power law slope AND the shear strength allowed to vary. - """ - This class implements a fit of EPL + external shear with every parameter except the power law slope AND the - shear strength allowed to vary. The user should specify shear_strengh in the args_param_class keyword when - creating the Optimizer class + The user should specify shear_strengh in the args_param_class keyword when creating + the Optimizer class """ def __init__(self, kwargs_lens_init, shear_strength): - """ :param kwargs_lens_init: the initial kwargs_lens before optimizing @@ -178,7 +186,6 @@ def __init__(self, kwargs_lens_init, shear_strength): super(PowerLawFixedShear, self).__init__(kwargs_lens_init) def args_to_kwargs(self, args): - """ :param args: array of lens model parameters @@ -186,14 +193,20 @@ def args_to_kwargs(self, args): """ (thetaE, center_x, center_y, e1, e2, g1, g2) = args - gamma = self.kwargs_lens[0]['gamma'] + gamma = self.kwargs_lens[0]["gamma"] - kwargs_epl = {'theta_E': thetaE, 'center_x': center_x, 'center_y': center_y, - 'e1': e1, 'e2': e2, 'gamma': gamma} + kwargs_epl = { + "theta_E": thetaE, + "center_x": center_x, + "center_y": center_y, + "e1": e1, + "e2": e2, + "gamma": gamma, + } phi, _ = shear_cartesian2polar(g1, g2) gamma1, gamma2 = shear_polar2cartesian(phi, self._shear_strength) - kwargs_shear = {'gamma1': gamma1, 'gamma2': gamma2} + kwargs_shear = {"gamma1": gamma1, "gamma2": gamma2} self.kwargs_lens[0] = kwargs_epl self.kwargs_lens[1] = kwargs_shear @@ -202,23 +215,22 @@ def args_to_kwargs(self, args): class PowerLawFreeShearMultipole(PowerLawParamManager): + """This class implements a fit of EPL + external shear + a multipole term with every + parameter except the power law slope and multipole moment free to vary. - """ - This class implements a fit of EPL + external shear + a multipole term with every parameter except the - power law slope and multipole moment free to vary. The mass centroid and orientation of the multipole term are - fixed to that of the EPL profile - + The mass centroid and orientation of the multipole term are fixed to that of the EPL + profile """ @property def to_vary_index(self): + """The number of lens models being varied in this routine. This is set to 3 + because the first three lens models are EPL, SHEAR, and MULTIPOLE, and their + parameters are being optimized. - """ - The number of lens models being varied in this routine. This is set to 3 because the first three lens models - are EPL, SHEAR, and MULTIPOLE, and their parameters are being optimized. - - The kwargs_list is split at to to_vary_index with indicies < to_vary_index accessed in this class, - and lens models with indicies > to_vary_index kept fixed. + The kwargs_list is split at to to_vary_index with indicies < to_vary_index + accessed in this class, and lens models with indicies > to_vary_index kept + fixed. Note that this requires a specific ordering of lens_model_list :return: @@ -227,42 +239,49 @@ def to_vary_index(self): return 3 def args_to_kwargs(self, args): - (thetaE, center_x, center_y, e1, e2, g1, g2) = args - gamma = self.kwargs_lens[0]['gamma'] + gamma = self.kwargs_lens[0]["gamma"] - kwargs_epl = {'theta_E': thetaE, 'center_x': center_x, 'center_y': center_y, - 'e1': e1, 'e2': e2, 'gamma': gamma} - kwargs_shear = {'gamma1': g1, 'gamma2': g2} + kwargs_epl = { + "theta_E": thetaE, + "center_x": center_x, + "center_y": center_y, + "e1": e1, + "e2": e2, + "gamma": gamma, + } + kwargs_shear = {"gamma1": g1, "gamma2": g2} self.kwargs_lens[0] = kwargs_epl self.kwargs_lens[1] = kwargs_shear - self.kwargs_lens[2]['center_x'] = center_x - self.kwargs_lens[2]['center_y'] = center_y + self.kwargs_lens[2]["center_x"] = center_x + self.kwargs_lens[2]["center_y"] = center_y phi, _ = ellipticity2phi_q(e1, e2) - self.kwargs_lens[2]['phi_m'] = phi + self.kwargs_lens[2]["phi_m"] = phi return self.kwargs_lens class PowerLawFixedShearMultipole(PowerLawFixedShear): - """ - This class implements a fit of EPL + external shear + a multipole term with every parameter except the - power law slope, shear strength, and multipole moment free to vary. The mass centroid and orientation of the - multipole term are fixed to that of the EPL profile + """This class implements a fit of EPL + external shear + a multipole term with every + parameter except the power law slope, shear strength, and multipole moment free to + vary. + + The mass centroid and orientation of the multipole term are fixed to that of the EPL + profile """ @property def to_vary_index(self): + """The number of lens models being varied in this routine. This is set to 3 + because the first three lens models are EPL, SHEAR, and MULTIPOLE, and their + parameters are being optimized. - """ - The number of lens models being varied in this routine. This is set to 3 because the first three lens models - are EPL, SHEAR, and MULTIPOLE, and their parameters are being optimized. - - The kwargs_list is split at to to_vary_index with indicies < to_vary_index accessed in this class, - and lens models with indicies > to_vary_index kept fixed. + The kwargs_list is split at to to_vary_index with indicies < to_vary_index + accessed in this class, and lens models with indicies > to_vary_index kept + fixed. Note that this requires a specific ordering of lens_model_list :return: @@ -271,23 +290,28 @@ def to_vary_index(self): return 3 def args_to_kwargs(self, args): - (thetaE, center_x, center_y, e1, e2, g1, g2) = args - gamma = self.kwargs_lens[0]['gamma'] + gamma = self.kwargs_lens[0]["gamma"] - kwargs_epl = {'theta_E': thetaE, 'center_x': center_x, 'center_y': center_y, - 'e1': e1, 'e2': e2, 'gamma': gamma} + kwargs_epl = { + "theta_E": thetaE, + "center_x": center_x, + "center_y": center_y, + "e1": e1, + "e2": e2, + "gamma": gamma, + } phi, _ = shear_cartesian2polar(g1, g2) gamma1, gamma2 = shear_polar2cartesian(phi, self._shear_strength) - kwargs_shear = {'gamma1': gamma1, 'gamma2': gamma2} + kwargs_shear = {"gamma1": gamma1, "gamma2": gamma2} self.kwargs_lens[0] = kwargs_epl self.kwargs_lens[1] = kwargs_shear - self.kwargs_lens[2]['center_x'] = center_x - self.kwargs_lens[2]['center_y'] = center_y + self.kwargs_lens[2]["center_x"] = center_x + self.kwargs_lens[2]["center_y"] = center_y phi, _ = ellipticity2phi_q(e1, e2) - self.kwargs_lens[2]['phi_m'] = phi + self.kwargs_lens[2]["phi_m"] = phi return self.kwargs_lens diff --git a/lenstronomy/LensModel/Solver/epl_shear_solver.py b/lenstronomy/LensModel/Solver/epl_shear_solver.py index 73089677f..7861514ad 100644 --- a/lenstronomy/LensModel/Solver/epl_shear_solver.py +++ b/lenstronomy/LensModel/Solver/epl_shear_solver.py @@ -1,94 +1,143 @@ -__author__ = 'ewoudwempe', 'sibirrer' +__author__ = "ewoudwempe", "sibirrer" import numpy as np -from lenstronomy.LensModel.Util.epl_util import min_approx, pol_to_cart, cart_to_pol, cdot, ps, rotmat, solvequadeq, brentq_inline +from lenstronomy.LensModel.Util.epl_util import ( + min_approx, + pol_to_cart, + cart_to_pol, + cdot, + ps, + rotmat, + solvequadeq, + brentq_inline, +) from lenstronomy.LensModel.Util.epl_util import pol_to_ell, ell_to_pol, geomlinspace from lenstronomy.Util.image_util import findOverlap from lenstronomy.LensModel.Profiles.epl_numba import alpha, omega from lenstronomy.Util.numba_util import jit -from lenstronomy.Util.param_util import ellipticity2phi_q, shear_cartesian2polar, shear_polar2cartesian +from lenstronomy.Util.param_util import ( + ellipticity2phi_q, + shear_cartesian2polar, + shear_polar2cartesian, +) from lenstronomy.LensModel.Profiles.shear import Shear @jit() def _alpha_epl_shear(x, y, b, q, t=1, gamma1=0, gamma2=0, Omega=None): - """The complex deflection of EPL+SHEAR""" - return alpha(x, y, b, q, t=t, Omega=Omega) + (gamma1 * x + gamma2*y) + 1j * (gamma2*x - gamma1*y) + """The complex deflection of EPL+SHEAR.""" + return ( + alpha(x, y, b, q, t=t, Omega=Omega) + + (gamma1 * x + gamma2 * y) + + 1j * (gamma2 * x - gamma1 * y) + ) @jit() def _one_dim_lens_eq_calcs(args, phi): - """Calculates intermediate quantities that are needed for several of the subsequent functions""" + """Calculates intermediate quantities that are needed for several of the subsequent + functions.""" b, t, y1, y2, q, gamma1, gamma2 = args - y = (y1+y2*1j) - - rhat = 1/(1-gamma1**2-gamma2**2)*( - ((1+gamma1)*np.cos(phi)+gamma2*np.sin(phi))+1j*(gamma2*np.cos(phi)+(1-gamma1)*np.sin(phi))) - thetahat = 1/(1-gamma1**2-gamma2**2)*( - ((1+gamma1)*np.sin(phi)-gamma2*np.cos(phi))+1j*(gamma2*np.sin(phi)-(1-gamma1)*np.cos(phi))) + y = y1 + y2 * 1j + + rhat = ( + 1 + / (1 - gamma1**2 - gamma2**2) + * ( + ((1 + gamma1) * np.cos(phi) + gamma2 * np.sin(phi)) + + 1j * (gamma2 * np.cos(phi) + (1 - gamma1) * np.sin(phi)) + ) + ) + thetahat = ( + 1 + / (1 - gamma1**2 - gamma2**2) + * ( + ((1 + gamma1) * np.sin(phi) - gamma2 * np.cos(phi)) + + 1j * (gamma2 * np.sin(phi) - (1 - gamma1) * np.cos(phi)) + ) + ) frac_Roverrsh, phiell = pol_to_ell(1, phi, q) Omega = omega(phiell, t, q) - const = 2*b/(1+q) - if abs(t-1) > 1e-4: - b_over_r_pow_tm1 = -cdot(y, thetahat)/(const*cdot(Omega, thetahat)) - R = b*np.abs(b_over_r_pow_tm1)**(1/(1-t))*np.sign(b_over_r_pow_tm1) + const = 2 * b / (1 + q) + if abs(t - 1) > 1e-4: + b_over_r_pow_tm1 = -cdot(y, thetahat) / (const * cdot(Omega, thetahat)) + R = b * np.abs(b_over_r_pow_tm1) ** (1 / (1 - t)) * np.sign(b_over_r_pow_tm1) else: - Omega_ort = 1j*Omega - x = ((1-gamma1)*np.cos(phi)-gamma2*np.sin(phi))+1j*(-gamma2*np.cos(phi)+(1+gamma1)*np.sin(phi)) - R = cdot(Omega_ort, y)/cdot(Omega_ort, x)*frac_Roverrsh + Omega_ort = 1j * Omega + x = ((1 - gamma1) * np.cos(phi) - gamma2 * np.sin(phi)) + 1j * ( + -gamma2 * np.cos(phi) + (1 + gamma1) * np.sin(phi) + ) + R = cdot(Omega_ort, y) / cdot(Omega_ort, x) * frac_Roverrsh r, theta = ell_to_pol(R, phiell, q) return Omega, const, phiell, q, r, rhat, t, b, thetahat, y @jit() def _one_dim_lens_eq_both(phi, args): - """Calculates and returns simultaneously both the smooth and the not-smooth 1-dimensional lens equation - that needs to be solved""" - Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs(args, phi) + """Calculates and returns simultaneously both the smooth and the not-smooth + 1-dimensional lens equation that needs to be solved.""" + Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs( + args, phi + ) rr, thetaa = ell_to_pol(1, phiell, q) - ip = cdot(y, rhat)*cdot(Omega, thetahat)-cdot(Omega, rhat)*cdot(y, thetahat) + ip = cdot(y, rhat) * cdot(Omega, thetahat) - cdot(Omega, rhat) * cdot(y, thetahat) # The derivations are lost somewhere in my notes... - eq = (rr*b)**(2/t-2)*ps((cdot(y, thetahat)/const), 2/t)*ip**2+ps(ip, 2/t)*cdot(Omega, thetahat)**2 - eq_notsmooth = ps(rr*b, 1-t)*(cdot(y, thetahat)/const)*np.abs(ip)**t+ip*np.abs(cdot(Omega, thetahat))**(+t) + eq = (rr * b) ** (2 / t - 2) * ps( + (cdot(y, thetahat) / const), 2 / t + ) * ip**2 + ps(ip, 2 / t) * cdot(Omega, thetahat) ** 2 + eq_notsmooth = ps(rr * b, 1 - t) * (cdot(y, thetahat) / const) * np.abs( + ip + ) ** t + ip * np.abs(cdot(Omega, thetahat)) ** (+t) return eq, eq_notsmooth @jit() def _getr(phi, args): - """Given an angle phi, get the radius r""" - Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs(args, phi) + """Given an angle phi, get the radius r.""" + Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs( + args, phi + ) return r @jit() def _one_dim_lens_eq(phi, args): """Calculates the smooth 1-dimensional lens equation to solve - to be used by a root-finder""" - Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs(args, phi) + Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs( + args, phi + ) rr, thetaa = ell_to_pol(1, phiell, q) - ip = cdot(y, rhat)*cdot(Omega, thetahat)-cdot(Omega, rhat)*cdot(y, thetahat) - eq = (rr*b)**(2/t-2)*ps((cdot(y, thetahat)/const), 2/t)*ip**2+ps(ip, 2/t)*cdot(Omega, thetahat)**2 + ip = cdot(y, rhat) * cdot(Omega, thetahat) - cdot(Omega, rhat) * cdot(y, thetahat) + eq = (rr * b) ** (2 / t - 2) * ps( + (cdot(y, thetahat) / const), 2 / t + ) * ip**2 + ps(ip, 2 / t) * cdot(Omega, thetahat) ** 2 return eq @jit() def _one_dim_lens_eq_unsmooth(phi, args): """Calculates the not-smooth 1-dimensional lens equation to to solve - to be used by a root-finder. - For some parameters and solutions, numerical issues make solving this one feasible while the other is not.""" - Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs(args, phi) + For some parameters and solutions, numerical issues make solving this one feasible while the other is not. + """ + Omega, const, phiell, q, r, rhat, t, b, thetahat, y = _one_dim_lens_eq_calcs( + args, phi + ) rr, thetaa = ell_to_pol(1, phiell, q) - ip = cdot(y, rhat)*cdot(Omega, thetahat)-cdot(Omega, rhat)*cdot(y, thetahat) - eq_notsmooth = ps(rr*b, 1-t)*(cdot(y, thetahat)/const)*np.abs(ip)**t+ip*np.abs(cdot(Omega, thetahat))**(+t) + ip = cdot(y, rhat) * cdot(Omega, thetahat) - cdot(Omega, rhat) * cdot(y, thetahat) + eq_notsmooth = ps(rr * b, 1 - t) * (cdot(y, thetahat) / const) * np.abs( + ip + ) ** t + ip * np.abs(cdot(Omega, thetahat)) ** (+t) return eq_notsmooth @jit() def _getphi(thpl, args): - """ - Finds all roots to both versions the 1-dimensional lens equation in phi, by doing a grid search for sign changes on - the supplied thpl. In the case of extrema, refine at the relevant location. + """Finds all roots to both versions the 1-dimensional lens equation in phi, by doing + a grid search for sign changes on the supplied thpl. In the case of extrema, refine + at the relevant location. :param thpl: What points to calculate the equation on use for detecting sign changes :param args: Parameters to be passed to the lens equation @@ -97,23 +146,31 @@ def _getphi(thpl, args): y, y_notsmooth = _one_dim_lens_eq_both(thpl, args) num_phi = len(thpl) roots = [] - for i in range(num_phi-1): - if y[i+1]*y[i] <= 0: - roots.append(brentq_inline(_one_dim_lens_eq, thpl[i], thpl[i+1], args=args) % (2*np.pi)) - elif y_notsmooth[i+1]*y_notsmooth[i] <= 0: - roots.append(brentq_inline(_one_dim_lens_eq_unsmooth, thpl[i], thpl[i+1], args=args) % (2*np.pi)) - - for i in range(1, num_phi+1): - y1 = y[i-1] + for i in range(num_phi - 1): + if y[i + 1] * y[i] <= 0: + roots.append( + brentq_inline(_one_dim_lens_eq, thpl[i], thpl[i + 1], args=args) + % (2 * np.pi) + ) + elif y_notsmooth[i + 1] * y_notsmooth[i] <= 0: + roots.append( + brentq_inline( + _one_dim_lens_eq_unsmooth, thpl[i], thpl[i + 1], args=args + ) + % (2 * np.pi) + ) + + for i in range(1, num_phi + 1): + y1 = y[i - 1] y2 = y[i % num_phi] - y3 = y[(i+1) % num_phi] - y1n = y_notsmooth[(i-1) % num_phi] + y3 = y[(i + 1) % num_phi] + y1n = y_notsmooth[(i - 1) % num_phi] y2n = y_notsmooth[i % num_phi] - y3n = y_notsmooth[(i+1) % num_phi] - if (y3-y2)*(y2-y1) <= 0 or (y3n-y2n)*(y2n-y1n) <= 0: - if y3*y2 <= 0 or y1*y2 <= 0: + y3n = y_notsmooth[(i + 1) % num_phi] + if (y3 - y2) * (y2 - y1) <= 0 or (y3n - y2n) * (y2n - y1n) <= 0: + if y3 * y2 <= 0 or y1 * y2 <= 0: continue - if i > num_phi-2: + if i > num_phi - 2: continue else: x1 = thpl[i - 1] @@ -124,38 +181,70 @@ def _getphi(thpl, args): xmin_ns = min_approx(x1, x2, x3, y1n, y2n, y3n) ymin = _one_dim_lens_eq(xmin, args) ymin_ns = _one_dim_lens_eq_unsmooth(xmin_ns, args) - if ymin*y2 <= 0 and x2 <= xmin <= x3: - roots.append(brentq_inline(_one_dim_lens_eq, x2, xmin, args=args) % (2*np.pi)) - roots.append(brentq_inline(_one_dim_lens_eq, xmin, x3, args=args) % (2*np.pi)) - elif ymin*y2 <= 0 and x1 <= xmin <= x2: - roots.append(brentq_inline(_one_dim_lens_eq, x1, xmin, args=args) % (2*np.pi)) - roots.append(brentq_inline(_one_dim_lens_eq, xmin, x2, args=args) % (2*np.pi)) + if ymin * y2 <= 0 and x2 <= xmin <= x3: + roots.append( + brentq_inline(_one_dim_lens_eq, x2, xmin, args=args) % (2 * np.pi) + ) + roots.append( + brentq_inline(_one_dim_lens_eq, xmin, x3, args=args) % (2 * np.pi) + ) + elif ymin * y2 <= 0 and x1 <= xmin <= x2: + roots.append( + brentq_inline(_one_dim_lens_eq, x1, xmin, args=args) % (2 * np.pi) + ) + roots.append( + brentq_inline(_one_dim_lens_eq, xmin, x2, args=args) % (2 * np.pi) + ) elif ymin_ns * y2n <= 0 and x2 <= xmin_ns <= x3: - roots.append(brentq_inline(_one_dim_lens_eq_unsmooth, x2, xmin_ns, args=args) % (2*np.pi)) - roots.append(brentq_inline(_one_dim_lens_eq_unsmooth, xmin_ns, x3, args=args) % (2*np.pi)) - elif ymin_ns*y2n <= 0 and x1 <= xmin_ns <= x2: - roots.append(brentq_inline(_one_dim_lens_eq_unsmooth, x1, xmin_ns, args=args) % (2*np.pi)) - roots.append(brentq_inline(_one_dim_lens_eq_unsmooth, xmin_ns, x2, args=args) % (2*np.pi)) + roots.append( + brentq_inline(_one_dim_lens_eq_unsmooth, x2, xmin_ns, args=args) + % (2 * np.pi) + ) + roots.append( + brentq_inline(_one_dim_lens_eq_unsmooth, xmin_ns, x3, args=args) + % (2 * np.pi) + ) + elif ymin_ns * y2n <= 0 and x1 <= xmin_ns <= x2: + roots.append( + brentq_inline(_one_dim_lens_eq_unsmooth, x1, xmin_ns, args=args) + % (2 * np.pi) + ) + roots.append( + brentq_inline(_one_dim_lens_eq_unsmooth, xmin_ns, x2, args=args) + % (2 * np.pi) + ) return np.array(roots) def solvelenseq_majoraxis(args, Nmeas=200, Nmeas_extra=50): - """Solve the lens equation, where the arguments have been properly rotated to the major-axis""" + """Solve the lens equation, where the arguments have been properly rotated to the + major-axis.""" b, t, y1, y2, q, gamma1, gamma2 = args - p1 = np.arctan2(y2*(1-gamma1)+gamma2*y1, y1*(1+gamma1)+gamma2*y2) + p1 = np.arctan2(y2 * (1 - gamma1) + gamma2 * y1, y1 * (1 + gamma1) + gamma2 * y2) int_points = [p1] geom = geomlinspace(1e-4, 0.1, Nmeas_extra) - thpl = np.sort(np.concatenate((np.linspace(0., np.pi, Nmeas), - *[i % np.pi-geom for i in int_points], - *[i % np.pi+geom for i in int_points], - ))) + thpl = np.sort( + np.concatenate( + ( + np.linspace(0.0, np.pi, Nmeas), + *[i % np.pi - geom for i in int_points], + *[i % np.pi + geom for i in int_points], + ) + ) + ) the = _getphi(thpl, (b, t, y1, y2, q, gamma1, gamma2)) thetas = np.concatenate((the, the + np.pi)) Rs = np.array([_getr(theta, (b, t, y1, y2, q, gamma1, gamma2)) for theta in thetas]) stuff = np.array(pol_to_cart(Rs[Rs > 0], thetas[Rs > 0])) - diff = -y1-y2*1j+stuff[0]+stuff[1]*1j-_alpha_epl_shear(stuff[0], stuff[1], b, q, t, gamma1=gamma1, gamma2=gamma2) + diff = ( + -y1 + - y2 * 1j + + stuff[0] + + stuff[1] * 1j + - _alpha_epl_shear(stuff[0], stuff[1], b, q, t, gamma1=gamma1, gamma2=gamma2) + ) goodones = np.abs(diff) < 1e-8 return findOverlap(*stuff[:, goodones], 1e-8) @@ -168,16 +257,18 @@ def _check_center(kwargs_lens): if len(kwargs_lens) > 1: shear = Shear() # calculate shift from the deflector centroid from the shear field - alpha_x, alpha_y = shear.derivatives(kwargs_lens[0]['center_x'], kwargs_lens[0]['center_y'], **kwargs_lens[1]) + alpha_x, alpha_y = shear.derivatives( + kwargs_lens[0]["center_x"], kwargs_lens[0]["center_y"], **kwargs_lens[1] + ) return alpha_x, alpha_y else: return 0, 0 def solve_lenseq_pemd(pos_, kwargs_lens, Nmeas=400, Nmeas_extra=80, **kwargs): - """ - Solves the lens equation using a semi-analytical recipe. - :param pos_: The source plane position (shape (2,)), or the source plane positions (shape (2,N)) for which to solve the lens equation + """Solves the lens equation using a semi-analytical recipe. + + :param pos_: The source plane position (shape (2,)), or the source plane positions (shape (2,N)) for which to solve the lens equation :param kwargs_lens: List of kwargs in lenstronomy style, following ['EPL', 'SHEAR'] format :param Nmeas: resolution with which to sample the angular grid, higher means more reliable lens equation solving. For solving many positions at once, you may want to set this higher. :param Nmeas_extra: resolution with which to additionally sample the angular grid at the low-shear end, higher means more reliable lens equation solving. For solving many positions at once, you may want to set this higher. @@ -187,78 +278,103 @@ def solve_lenseq_pemd(pos_, kwargs_lens, Nmeas=400, Nmeas_extra=80, **kwargs): pos = np.asarray(pos_) if pos.ndim > 1 and pos.shape[-1] != 1: pos = pos[..., None] - t = kwargs_lens[0]['gamma']-1 if 'gamma' in kwargs_lens[0] else 1 + t = kwargs_lens[0]["gamma"] - 1 if "gamma" in kwargs_lens[0] else 1 - theta_ell, q = ellipticity2phi_q(kwargs_lens[0]['e1'], kwargs_lens[0]['e2']) - b = kwargs_lens[0]['theta_E']*np.sqrt(q) + theta_ell, q = ellipticity2phi_q(kwargs_lens[0]["e1"], kwargs_lens[0]["e2"]) + b = kwargs_lens[0]["theta_E"] * np.sqrt(q) if len(kwargs_lens) > 1: - gamma = kwargs_lens[1]['gamma1']+1j*kwargs_lens[1]['gamma2'] + gamma = kwargs_lens[1]["gamma1"] + 1j * kwargs_lens[1]["gamma2"] else: - gamma = 0+0j + gamma = 0 + 0j shift_x, shift_y = _check_center(kwargs_lens) shift = shift_x + 1j * shift_y - cen = kwargs_lens[0]['center_x']+1j*kwargs_lens[0]['center_y'] - p = pos[0]+1j*pos[1] - cen + shift + cen = kwargs_lens[0]["center_x"] + 1j * kwargs_lens[0]["center_y"] + p = pos[0] + 1j * pos[1] - cen + shift - rotfact = np.exp(-1j*theta_ell) + rotfact = np.exp(-1j * theta_ell) gamma *= rotfact**2 p *= rotfact - res = solvelenseq_majoraxis((b, t, p.real, p.imag, q, - gamma.real, gamma.imag), Nmeas=Nmeas, Nmeas_extra=Nmeas_extra) + res = solvelenseq_majoraxis( + (b, t, p.real, p.imag, q, gamma.real, gamma.imag), + Nmeas=Nmeas, + Nmeas_extra=Nmeas_extra, + ) xsol, ysol = res - x = np.array([(xs+1j*ys)/rotfact+cen for xs, ys in zip(xsol, ysol)]) + x = np.array([(xs + 1j * ys) / rotfact + cen for xs, ys in zip(xsol, ysol)]) return x.real, x.imag -def caustics_epl_shear(kwargs_lens, num_th=500, maginf=0, sourceplane=True, return_which=None): - """ - Analytically calculates the caustics of an EPL+shear lens model. - Since for gamma>2, the outer critical curve does not exist, the option to find the curves for a set, finite magnification exists, by supplying maginf, so that the routine finds the curve of this magnification, rather than the true caustic. +def caustics_epl_shear( + kwargs_lens, num_th=500, maginf=0, sourceplane=True, return_which=None +): + """Analytically calculates the caustics of an EPL+shear lens model. Since for + gamma>2, the outer critical curve does not exist, the option to find the curves for + a set, finite magnification exists, by supplying maginf, so that the routine finds + the curve of this magnification, rather than the true caustic. - :param kwargs_lens: List of kwargs in lenstronomy style, following ['EPL', 'SHEAR'] format + :param kwargs_lens: List of kwargs in lenstronomy style, following ['EPL', 'SHEAR'] + format :param num_th: resolution. - :param maginf: the outer critical curve for t>1 will be replaced with the curve where the inverse magnification is maginf - :param sourceplane: if True (default), ray-shoot the calculated critical curves to the source plane - :param return_which: options 'quad' (boundary of area within which there are 4 images), 'double' (boundary of area within which there are 2 images), - 'caustic' (the diamond caustic) and 'cut' (the cut, if it exists, that is if t<2, else, if t>2, returns the caustic) and None (in that case: return quad, caustic, cut) + :param maginf: the outer critical curve for t>1 will be replaced with the curve + where the inverse magnification is maginf + :param sourceplane: if True (default), ray-shoot the calculated critical curves to + the source plane + :param return_which: options 'quad' (boundary of area within which there are 4 + images), 'double' (boundary of area within which there are 2 images), 'caustic' + (the diamond caustic) and 'cut' (the cut, if it exists, that is if t<2, else, if + t>2, returns the caustic) and None (in that case: return quad, caustic, cut) :return: (2,N) array if return_which set, else a tuple of (caustic, cut, quad) """ - e1, e2 = kwargs_lens[0]['e1'], kwargs_lens[0]['e2'] + e1, e2 = kwargs_lens[0]["e1"], kwargs_lens[0]["e2"] if len(kwargs_lens) > 1: - gamma1unr, gamma2unr = kwargs_lens[1]['gamma1'], kwargs_lens[1]['gamma2'] + gamma1unr, gamma2unr = kwargs_lens[1]["gamma1"], kwargs_lens[1]["gamma2"] else: gamma1unr, gamma2unr = 0, 0 - t = kwargs_lens[0]['gamma']-1 if 'gamma' in kwargs_lens[0] else 1 + t = kwargs_lens[0]["gamma"] - 1 if "gamma" in kwargs_lens[0] else 1 theta_ell, q = ellipticity2phi_q(e1, e2) theta_gamma, gamma_mag = shear_cartesian2polar(gamma1unr, gamma2unr) - b = np.sqrt(q)*kwargs_lens[0]['theta_E'] - cen = np.expand_dims(np.array([kwargs_lens[0]['center_x'], kwargs_lens[0]['center_y']]), 1) + b = np.sqrt(q) * kwargs_lens[0]["theta_E"] + cen = np.expand_dims( + np.array([kwargs_lens[0]["center_x"], kwargs_lens[0]["center_y"]]), 1 + ) theta_gamma -= theta_ell gamma1, gamma2 = shear_polar2cartesian(theta_gamma, gamma_mag) M = rotmat(-theta_ell) phiell, q = ellipticity2phi_q(e1, e2) - theta = np.linspace(0, 2*np.pi, num_th, endpoint=False) + theta = np.linspace(0, 2 * np.pi, num_th, endpoint=False) r = 1 R, phi = pol_to_ell(1, theta, q) Omega = omega(phi, t, q) aa = 1 - bb = -(2-t) - frac_roverR = r/R - cc = (1-t)*(2-t)*(cdot(np.exp(1j*theta), Omega))/frac_roverR*2/(1+q) - cc -= (1-t)**2*(2/(1+q))**2*np.abs(Omega)**2/frac_roverR**2 + bb = -(2 - t) + frac_roverR = r / R + cc = ( + (1 - t) + * (2 - t) + * (cdot(np.exp(1j * theta), Omega)) + / frac_roverR + * 2 + / (1 + q) + ) + cc -= (1 - t) ** 2 * (2 / (1 + q)) ** 2 * np.abs(Omega) ** 2 / frac_roverR**2 # Shear stuff: - gammaint_fac = (-np.exp(2j*theta)*(2-t)/2+(1-t)*np.exp(1j*theta)*2/(1+q)*Omega/frac_roverR) - gamma = gamma1+1j*gamma2 - aa -= np.abs(gamma)**2 - bb -= 2*cdot(gamma, gammaint_fac) + gammaint_fac = ( + -np.exp(2j * theta) * (2 - t) / 2 + + (1 - t) * np.exp(1j * theta) * 2 / (1 + q) * Omega / frac_roverR + ) + gamma = gamma1 + 1j * gamma2 + aa -= np.abs(gamma) ** 2 + bb -= 2 * cdot(gamma, gammaint_fac) usol = np.array(solvequadeq(cc, bb, aa)).T - xcr_4, ycr_4 = pol_to_cart(b*usol[:, 1]**(-1/t)*frac_roverR, theta) - if t > 1: # If t>1, get the approximate outer caustic instead (where inverse magnification = maginf). - usol = np.array(solvequadeq(cc, bb, aa-maginf)).T - xcr_cut, ycr_cut = pol_to_cart(b*usol[:, 1]**(-1/t)*frac_roverR, theta) + xcr_4, ycr_4 = pol_to_cart(b * usol[:, 1] ** (-1 / t) * frac_roverR, theta) + if ( + t > 1 + ): # If t>1, get the approximate outer caustic instead (where inverse magnification = maginf). + usol = np.array(solvequadeq(cc, bb, aa - maginf)).T + xcr_cut, ycr_cut = pol_to_cart(b * usol[:, 1] ** (-1 / t) * frac_roverR, theta) else: - usol = np.array(solvequadeq(cc, bb, aa+maginf)).T - xcr_cut, ycr_cut = pol_to_cart(b*usol[:, 0]**(-1/t)*frac_roverR, theta) + usol = np.array(solvequadeq(cc, bb, aa + maginf)).T + xcr_cut, ycr_cut = pol_to_cart(b * usol[:, 0] ** (-1 / t) * frac_roverR, theta) al_cut = _alpha_epl_shear(xcr_cut, ycr_cut, b, q, t, gamma1, gamma2, Omega=Omega) al_4 = _alpha_epl_shear(xcr_4, ycr_4, b, q, t, gamma1, gamma2, Omega=Omega) if sourceplane: @@ -267,23 +383,27 @@ def caustics_epl_shear(kwargs_lens, num_th=500, maginf=0, sourceplane=True, retu else: xca_cut, yca_cut = xcr_cut, ycr_cut xca_4, yca_4 = xcr_4, ycr_4 - if return_which == 'caustic': - return M@(xca_4, yca_4) + cen - if return_which == 'cut': - return M@(xca_cut, yca_cut) + cen + if return_which == "caustic": + return M @ (xca_4, yca_4) + cen + if return_which == "cut": + return M @ (xca_cut, yca_cut) + cen rcut, thcut = cart_to_pol(xca_cut, yca_cut) r, th = cart_to_pol(xca_4, yca_4) - r2 = np.interp(th, thcut, rcut, period=2*np.pi) + r2 = np.interp(th, thcut, rcut, period=2 * np.pi) - if return_which == 'double': + if return_which == "double": r = np.fmax(r, r2) else: # Quad r = np.fmin(r, r2) pos_tosample = np.empty((2, num_th)) pos_tosample[0], pos_tosample[1] = pol_to_cart(r, th) - if return_which in ('double', 'quad'): - return M@pos_tosample + cen - - return M@(xca_4, yca_4) + cen, M@(xca_cut, yca_cut) + cen, M@pos_tosample + cen # Mostly for some backward compatibility + if return_which in ("double", "quad"): + return M @ pos_tosample + cen + + return ( + M @ (xca_4, yca_4) + cen, + M @ (xca_cut, yca_cut) + cen, + M @ pos_tosample + cen, + ) # Mostly for some backward compatibility diff --git a/lenstronomy/LensModel/Solver/lens_equation_solver.py b/lenstronomy/LensModel/Solver/lens_equation_solver.py index 9d3c854fa..26cf0acfa 100644 --- a/lenstronomy/LensModel/Solver/lens_equation_solver.py +++ b/lenstronomy/LensModel/Solver/lens_equation_solver.py @@ -4,32 +4,36 @@ from scipy.optimize import minimize from lenstronomy.LensModel.Solver.epl_shear_solver import solve_lenseq_pemd -__all__ = ['LensEquationSolver'] +__all__ = ["LensEquationSolver"] class LensEquationSolver(object): - """ - class to solve for image positions given lens model and source position - """ - def __init__(self, lensModel): - """ - This class must contain the following definitions (with same syntax as the standard LensModel() class: - def ray_shooting() - def hessian() - def magnification() + """Class to solve for image positions given lens model and source position.""" - :param lensModel: instance of a class according to lenstronomy.LensModel.lens_model + def __init__(self, lensModel): + """This class must contain the following definitions (with same syntax as the + standard LensModel() class: def ray_shooting() def hessian() def magnification() + :param lensModel: instance of a class according to + lenstronomy.LensModel.lens_model """ self.lensModel = lensModel - def image_position_stochastic(self, source_x, source_y, kwargs_lens, search_window=10, - precision_limit=10**(-10), arrival_time_sort=True, x_center=0, - y_center=0, num_random=1000): - """ - Solves the lens equation stochastic with the scipy minimization routine on the quadratic distance between - the backwards ray-shooted proposed image position and the source position. - Credits to Giulia Pagano + def image_position_stochastic( + self, + source_x, + source_y, + kwargs_lens, + search_window=10, + precision_limit=10 ** (-10), + arrival_time_sort=True, + x_center=0, + y_center=0, + num_random=1000, + ): + """Solves the lens equation stochastic with the scipy minimization routine on + the quadratic distance between the backwards ray-shooted proposed image position + and the source position. Credits to Giulia Pagano. :param source_x: source position :param source_y: source position @@ -39,18 +43,32 @@ def image_position_stochastic(self, source_x, source_y, kwargs_lens, search_wind :param arrival_time_sort: bool, if True sorts according to arrival time :param x_center: center of search window :param y_center: center of search window - :param num_random: number of random starting points of the non-linear solver in the search window + :param num_random: number of random starting points of the non-linear solver in + the search window :return: x_image, y_image """ kwargs_lens = self.lensModel.set_static(kwargs_lens) x_solve, y_solve = [], [] for i in range(num_random): - x_init = np.random.uniform(-search_window / 2., search_window / 2) + x_center - y_init = np.random.uniform(-search_window / 2., search_window / 2) + y_center + x_init = ( + np.random.uniform(-search_window / 2.0, search_window / 2) + x_center + ) + y_init = ( + np.random.uniform(-search_window / 2.0, search_window / 2) + y_center + ) xinitial = np.array([x_init, y_init]) - result = minimize(self._root, xinitial, args=(kwargs_lens, source_x, source_y), tol=precision_limit ** 2, method='Nelder-Mead') - if self._root(result.x, kwargs_lens, source_x, source_y) < precision_limit**2: + result = minimize( + self._root, + xinitial, + args=(kwargs_lens, source_x, source_y), + tol=precision_limit**2, + method="Nelder-Mead", + ) + if ( + self._root(result.x, kwargs_lens, source_x, source_y) + < precision_limit**2 + ): x_solve.append(result.x[0]) y_solve.append(result.x[1]) @@ -71,22 +89,34 @@ def _root(self, x, kwargs_lens, source_x, source_y): """ x_, y_ = x beta_x, beta_y = self.lensModel.ray_shooting(x_, y_, kwargs_lens) - return (beta_x - source_x)**2 + (beta_y - source_y)**2 - - def candidate_solutions(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distance=0.1, search_window=10, - verbose=False, x_center=0, y_center=0): - """ - finds pixels in the image plane possibly hosting a solution of the lens equation, for the given source position and lens model + return (beta_x - source_x) ** 2 + (beta_y - source_y) ** 2 + + def candidate_solutions( + self, + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=0.1, + search_window=10, + verbose=False, + x_center=0, + y_center=0, + ): + """Finds pixels in the image plane possibly hosting a solution of the lens + equation, for the given source position and lens model. :param sourcePos_x: source position in units of angle :param sourcePos_y: source position in units of angle :param kwargs_lens: lens model parameters as keyword arguments - :param min_distance: minimum separation to consider for two images in units of angle - :param search_window: window size to be considered by the solver. Will not find image position outside this window + :param min_distance: minimum separation to consider for two images in units of + angle + :param search_window: window size to be considered by the solver. Will not find + image position outside this window :param verbose: bool, if True, prints some useful information for the user :param x_center: float, center of the window to search for point sources :param y_center: float, center of the window to search for point sources - :returns: (approximate) angular position of (multiple) images ra_pos, dec_pos in units of angles, related ray-traced source displacements and pixel width + :returns: (approximate) angular position of (multiple) images ra_pos, dec_pos in + units of angles, related ray-traced source displacements and pixel width :raises: AttributeError, KeyError """ kwargs_lens = self.lensModel.set_static(kwargs_lens) @@ -102,14 +132,22 @@ def candidate_solutions(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distanc # width of the grid point to a solution of the lens equation x_mins, y_mins, delta_map = util.local_minima_2d(absmapped, x_grid, y_grid) # pixel width - pixel_width = x_grid[1]-x_grid[0] - + pixel_width = x_grid[1] - x_grid[0] + return x_mins, y_mins, delta_map, pixel_width - def image_position_analytical(self, x, y, kwargs_lens, arrival_time_sort=True, magnification_limit=None, **kwargs_solver): - """ - Solves the lens equation. Only supports EPL-like (plus shear) models. Uses a specialized recipe that solves a - one-dimensional lens equation that is easier and more reliable to solve than the usual two-dimensional lens equation. + def image_position_analytical( + self, + x, + y, + kwargs_lens, + arrival_time_sort=True, + magnification_limit=None, + **kwargs_solver, + ): + """Solves the lens equation. Only supports EPL-like (plus shear) models. Uses a + specialized recipe that solves a one-dimensional lens equation that is easier + and more reliable to solve than the usual two-dimensional lens equation. :param x: source position in units of angle, an array of positions is also supported. :param y: source position in units of angle, an array of positions is also supported. @@ -123,9 +161,17 @@ def image_position_analytical(self, x, y, kwargs_lens, arrival_time_sort=True, m setting a a proper magnification_limit is more important. To get similar behaviour, a limit of 1e-1 is acceptable """ lens_model_list = list(self.lensModel.lens_model_list) - if lens_model_list not in (['SIE', 'SHEAR'], ['SIE'], ['EPL_NUMBA', 'SHEAR'], ['EPL_NUMBA'], ['EPL', 'SHEAR'], - ['EPL']): - raise ValueError("Only SIE, EPL, EPL_NUMBA (+shear) supported in the analytical solver for now.") + if lens_model_list not in ( + ["SIE", "SHEAR"], + ["SIE"], + ["EPL_NUMBA", "SHEAR"], + ["EPL_NUMBA"], + ["EPL", "SHEAR"], + ["EPL"], + ): + raise ValueError( + "Only SIE, EPL, EPL_NUMBA (+shear) supported in the analytical solver for now." + ) x_mins, y_mins = solve_lenseq_pemd((x, y), kwargs_lens, **kwargs_solver) if arrival_time_sort: @@ -136,80 +182,147 @@ def image_position_analytical(self, x, y, kwargs_lens, arrival_time_sort=True, m y_mins = y_mins[mag >= magnification_limit] return x_mins, y_mins - def image_position_from_source(self, sourcePos_x, sourcePos_y, kwargs_lens, solver='lenstronomy', **kwargs): - """ - Solves the lens equation, i.e. finds the image positions in the lens plane that are mapped to a given source - position. + def image_position_from_source( + self, sourcePos_x, sourcePos_y, kwargs_lens, solver="lenstronomy", **kwargs + ): + """Solves the lens equation, i.e. finds the image positions in the lens plane + that are mapped to a given source position. :param sourcePos_x: source position in units of angle :param sourcePos_y: source position in units of angle :param kwargs_lens: lens model parameters as keyword arguments - :param solver: which solver to use, can be 'lenstronomy' (default), 'analytical' or 'stochastic'. - :param kwargs: Any additional kwargs are passed to the chosen solver, see the documentation of - image_position_lenstronomy, image_position_analytical and image_position_stochastic - :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units of angle + :param solver: which solver to use, can be 'lenstronomy' (default), 'analytical' + or 'stochastic'. + :param kwargs: Any additional kwargs are passed to the chosen solver, see the + documentation of image_position_lenstronomy, image_position_analytical and + image_position_stochastic + :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units + of angle """ - if solver == 'lenstronomy': - return self.image_position_lenstronomy(sourcePos_x, sourcePos_y, kwargs_lens, **kwargs) - if solver == 'analytical': - return self.image_position_analytical(sourcePos_x, sourcePos_y, kwargs_lens, **kwargs) - if solver == 'stochastic': - return self.image_position_stochastic(sourcePos_x, sourcePos_y, kwargs_lens, **kwargs) + if solver == "lenstronomy": + return self.image_position_lenstronomy( + sourcePos_x, sourcePos_y, kwargs_lens, **kwargs + ) + if solver == "analytical": + return self.image_position_analytical( + sourcePos_x, sourcePos_y, kwargs_lens, **kwargs + ) + if solver == "stochastic": + return self.image_position_stochastic( + sourcePos_x, sourcePos_y, kwargs_lens, **kwargs + ) raise ValueError(f"{solver} is not a valid solver.") - def image_position_lenstronomy(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distance=0.1, search_window=10, - precision_limit=10**(-10), num_iter_max=100, arrival_time_sort=True, - initial_guess_cut=True, verbose=False, x_center=0, y_center=0, num_random=0, - non_linear=False, magnification_limit=None): - """ - Finds image position given source position and lens model. The solver first samples does a grid search in the - lens plane, and the grid points that are closest to the supplied source position are fed to a - specialized gradient-based root finder that finds the exact solutions. Works with all lens models. + def image_position_lenstronomy( + self, + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=0.1, + search_window=10, + precision_limit=10 ** (-10), + num_iter_max=100, + arrival_time_sort=True, + initial_guess_cut=True, + verbose=False, + x_center=0, + y_center=0, + num_random=0, + non_linear=False, + magnification_limit=None, + ): + """Finds image position given source position and lens model. The solver first + samples does a grid search in the lens plane, and the grid points that are + closest to the supplied source position are fed to a specialized gradient-based + root finder that finds the exact solutions. Works with all lens models. :param sourcePos_x: source position in units of angle :param sourcePos_y: source position in units of angle :param kwargs_lens: lens model parameters as keyword arguments - :param min_distance: minimum separation to consider for two images in units of angle - :param search_window: window size to be considered by the solver. Will not find image position outside this window - :param precision_limit: required precision in the lens equation solver (in units of angle in the source plane). - :param num_iter_max: maximum iteration of lens-source mapping conducted by solver to match the required precision - :param arrival_time_sort: bool, if True, sorts image position in arrival time (first arrival photon first listed) - :param initial_guess_cut: bool, if True, cuts initial local minima selected by the grid search based on distance criteria from the source position + :param min_distance: minimum separation to consider for two images in units of + angle + :param search_window: window size to be considered by the solver. Will not find + image position outside this window + :param precision_limit: required precision in the lens equation solver (in units + of angle in the source plane). + :param num_iter_max: maximum iteration of lens-source mapping conducted by + solver to match the required precision + :param arrival_time_sort: bool, if True, sorts image position in arrival time + (first arrival photon first listed) + :param initial_guess_cut: bool, if True, cuts initial local minima selected by + the grid search based on distance criteria from the source position :param verbose: bool, if True, prints some useful information for the user :param x_center: float, center of the window to search for point sources :param y_center: float, center of the window to search for point sources - :param num_random: int, number of random positions within the search window to be added to be starting - positions for the gradient decent solver - :param non_linear: bool, if True applies a non-linear solver not dependent on Hessian computation - :param magnification_limit: None or float, if set will only return image positions that have an - abs(magnification) larger than this number - :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units of angle + :param num_random: int, number of random positions within the search window to + be added to be starting positions for the gradient decent solver + :param non_linear: bool, if True applies a non-linear solver not dependent on + Hessian computation + :param magnification_limit: None or float, if set will only return image + positions that have an abs(magnification) larger than this number + :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units + of angle :raises: AttributeError, KeyError """ # find pixels in the image plane possibly hosting a solution of the lens equation, related source distances and # pixel width - x_mins, y_mins, delta_map, pixel_width = self.candidate_solutions(sourcePos_x, sourcePos_y, kwargs_lens, min_distance, search_window, verbose, x_center, y_center) + x_mins, y_mins, delta_map, pixel_width = self.candidate_solutions( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance, + search_window, + verbose, + x_center, + y_center, + ) if verbose: - print("There are %s regions identified that could contain a solution of the lens equation with" - "coordinates %s and %s " % (len(x_mins), x_mins, y_mins)) + print( + "There are %s regions identified that could contain a solution of the lens equation with" + "coordinates %s and %s " % (len(x_mins), x_mins, y_mins) + ) if len(x_mins) < 1: return x_mins, y_mins if initial_guess_cut: mag = np.abs(self.lensModel.magnification(x_mins, y_mins, kwargs_lens)) mag[mag < 1] = 1 - x_mins = x_mins[delta_map <= min_distance*mag*5] - y_mins = y_mins[delta_map <= min_distance*mag*5] + x_mins = x_mins[delta_map <= min_distance * mag * 5] + y_mins = y_mins[delta_map <= min_distance * mag * 5] if verbose: - print("The number of regions that meet the plausibility criteria are %s" % len(x_mins)) - x_mins = np.append(x_mins, np.random.uniform(low=-search_window/2+x_center, high=search_window/2+x_center, - size=num_random)) - y_mins = np.append(y_mins, np.random.uniform(low=-search_window / 2 + y_center, - high=search_window / 2 + y_center, size=num_random)) + print( + "The number of regions that meet the plausibility criteria are %s" + % len(x_mins) + ) + x_mins = np.append( + x_mins, + np.random.uniform( + low=-search_window / 2 + x_center, + high=search_window / 2 + x_center, + size=num_random, + ), + ) + y_mins = np.append( + y_mins, + np.random.uniform( + low=-search_window / 2 + y_center, + high=search_window / 2 + y_center, + size=num_random, + ), + ) # iterative solving of the lens equation for the selected grid points # print("Candidates:", x_mins.shape, y_mins.shape) - x_mins, y_mins, solver_precision = self._find_gradient_decent(x_mins, y_mins, sourcePos_x, sourcePos_y, kwargs_lens, - precision_limit, num_iter_max, verbose=verbose, - min_distance=min_distance, non_linear=non_linear) + x_mins, y_mins, solver_precision = self._find_gradient_decent( + x_mins, + y_mins, + sourcePos_x, + sourcePos_y, + kwargs_lens, + precision_limit, + num_iter_max, + verbose=verbose, + min_distance=min_distance, + non_linear=non_linear, + ) # only select iterative results that match the precision limit x_mins = x_mins[solver_precision <= precision_limit] y_mins = y_mins[solver_precision <= precision_limit] @@ -224,23 +337,39 @@ def image_position_lenstronomy(self, sourcePos_x, sourcePos_y, kwargs_lens, min_ self.lensModel.set_dynamic() return x_mins, y_mins - def _find_gradient_decent(self, x_min, y_min, sourcePos_x, sourcePos_y, kwargs_lens, precision_limit=10 ** (-10), - num_iter_max=200, verbose=False, min_distance=0.01, non_linear=False): - """ - given a 'good guess' of a solution of the lens equation (expected image position given a fixed source position) - this routine iteratively performs a ray-tracing with second order correction (effectively gradient decent) to find - a precise solution to the lens equation. + def _find_gradient_decent( + self, + x_min, + y_min, + sourcePos_x, + sourcePos_y, + kwargs_lens, + precision_limit=10 ** (-10), + num_iter_max=200, + verbose=False, + min_distance=0.01, + non_linear=False, + ): + """Given a 'good guess' of a solution of the lens equation (expected image + position given a fixed source position) this routine iteratively performs a ray- + tracing with second order correction (effectively gradient decent) to find a + precise solution to the lens equation. :param x_min: np.array, list of 'good guess' solutions of the lens equation :param y_min: np.array, list of 'good guess' solutions of the lens equation :param sourcePos_x: source position for which to solve the lens equation :param sourcePos_y: source position for which to solve the lens equation :param kwargs_lens: keyword argument list of the lens model - :param precision_limit: float, required match in the solution in the source plane - :param num_iter_max: int, maximum number of iterations before the algorithm stops - :param verbose: bool, if True inserts print statements about the behavior of the solver - :param min_distance: maximum correction applied per step (to avoid over-shooting in unstable regions) - :param non_linear: bool, if True, uses scipy.miminize instead of the directly implemented gradient decent approach. + :param precision_limit: float, required match in the solution in the source + plane + :param num_iter_max: int, maximum number of iterations before the algorithm + stops + :param verbose: bool, if True inserts print statements about the behavior of the + solver + :param min_distance: maximum correction applied per step (to avoid over-shooting + in unstable regions) + :param non_linear: bool, if True, uses scipy.miminize instead of the directly + implemented gradient decent approach. :return: x_position array, y_position array, error in the source plane array """ num_candidates = len(x_min) @@ -248,61 +377,117 @@ def _find_gradient_decent(self, x_min, y_min, sourcePos_x, sourcePos_y, kwargs_l y_mins = np.zeros(num_candidates) solver_precision = np.zeros(num_candidates) for i in range(len(x_min)): - x_guess, y_guess, delta, l = self._solve_single_proposal(x_min[i], y_min[i], sourcePos_x, sourcePos_y, - kwargs_lens, precision_limit, num_iter_max, - max_step=min_distance, non_linear=non_linear) + x_guess, y_guess, delta, l = self._solve_single_proposal( + x_min[i], + y_min[i], + sourcePos_x, + sourcePos_y, + kwargs_lens, + precision_limit, + num_iter_max, + max_step=min_distance, + non_linear=non_linear, + ) if verbose: - print("Solution found for region %s with required precision at iteration %s" % (i, l)) + print( + "Solution found for region %s with required precision at iteration %s" + % (i, l) + ) x_mins[i] = x_guess y_mins[i] = y_guess solver_precision[i] = delta return x_mins, y_mins, solver_precision - def _solve_single_proposal(self, x_guess, y_guess, source_x, source_y, kwargs_lens, precision_limit, num_iter_max, - max_step, non_linear=False): - """ - gradient decent solution of a single proposed starting point (close to a true solution) + def _solve_single_proposal( + self, + x_guess, + y_guess, + source_x, + source_y, + kwargs_lens, + precision_limit, + num_iter_max, + max_step, + non_linear=False, + ): + """Gradient decent solution of a single proposed starting point (close to a true + solution) :param x_guess: starting guess position in the image plane :param y_guess: starting guess position in the image plane :param source_x: source position to solve for in the image plane :param source_y: source position to solve for in the image plane :param kwargs_lens: keyword argument list of the lens model - :param precision_limit: float, required match in the solution in the source plane - :param num_iter_max: int, maximum number of iterations before the algorithm stops - :param max_step: maximum correction applied per step (to avoid over-shooting in instable regions) - :param non_linear: bool, if True, uses scipy.miminize instead of the directly implemented gradient decent approach. - :return: x_position, y_position, error in the source plane, steps required (for gradient decent) + :param precision_limit: float, required match in the solution in the source + plane + :param num_iter_max: int, maximum number of iterations before the algorithm + stops + :param max_step: maximum correction applied per step (to avoid over-shooting in + instable regions) + :param non_linear: bool, if True, uses scipy.miminize instead of the directly + implemented gradient decent approach. + :return: x_position, y_position, error in the source plane, steps required (for + gradient decent) """ l = 0 if non_linear: xinitial = np.array([x_guess, y_guess]) - result = minimize(self._root, xinitial, args=(kwargs_lens, source_x, source_y), tol=precision_limit ** 2, - method='Nelder-Mead') + result = minimize( + self._root, + xinitial, + args=(kwargs_lens, source_x, source_y), + tol=precision_limit**2, + method="Nelder-Mead", + ) delta = self._root(result.x, kwargs_lens, source_x, source_y) x_guess, y_guess = result.x[0], result.x[1] else: - x_mapped, y_mapped = self.lensModel.ray_shooting(x_guess, y_guess, kwargs_lens) + x_mapped, y_mapped = self.lensModel.ray_shooting( + x_guess, y_guess, kwargs_lens + ) delta = np.sqrt((x_mapped - source_x) ** 2 + (y_mapped - source_y) ** 2) while delta > precision_limit and l < num_iter_max: - x_mapped, y_mapped = self.lensModel.ray_shooting(x_guess, y_guess, kwargs_lens) + x_mapped, y_mapped = self.lensModel.ray_shooting( + x_guess, y_guess, kwargs_lens + ) delta = np.sqrt((x_mapped - source_x) ** 2 + (y_mapped - source_y) ** 2) - f_xx, f_xy, f_yx, f_yy = self.lensModel.hessian(x_guess, y_guess, kwargs_lens) + f_xx, f_xy, f_yx, f_yy = self.lensModel.hessian( + x_guess, y_guess, kwargs_lens + ) DistMatrix = np.array([[1 - f_yy, f_yx], [f_xy, 1 - f_xx]]) det = (1 - f_xx) * (1 - f_yy) - f_xy * f_yx deltaVec = np.array([x_mapped - source_x, y_mapped - source_y]) image_plane_vector = DistMatrix.dot(deltaVec) / det - dist = np.sqrt(image_plane_vector[0]**2 + image_plane_vector[1]**2) + dist = np.sqrt(image_plane_vector[0] ** 2 + image_plane_vector[1] ** 2) if dist > max_step: - image_plane_vector *= max_step/dist - x_guess, y_guess, delta, l = self._gradient_step(x_guess, y_guess, source_x, source_y, delta, - image_plane_vector, kwargs_lens, l, num_iter_max) + image_plane_vector *= max_step / dist + x_guess, y_guess, delta, l = self._gradient_step( + x_guess, + y_guess, + source_x, + source_y, + delta, + image_plane_vector, + kwargs_lens, + l, + num_iter_max, + ) return x_guess, y_guess, delta, l - def _gradient_step(self, x_guess, y_guess, source_x, source_y, delta_init, image_plane_vector, kwargs_lens, - iter_num, num_iter_max): + def _gradient_step( + self, + x_guess, + y_guess, + source_x, + source_y, + delta_init, + image_plane_vector, + kwargs_lens, + iter_num, + num_iter_max, + ): """ :param x_guess: float, current best fit solution in the image plane @@ -331,13 +516,39 @@ def _gradient_step(self, x_guess, y_guess, source_x, source_y, delta_init, image # direction and tries again image_plane_vector[0] *= np.random.normal(loc=0, scale=0.5) image_plane_vector[1] *= np.random.normal(loc=0, scale=0.5) - return self._gradient_step(x_guess, y_guess, source_x, source_y, delta_init, image_plane_vector, kwargs_lens, iter_num, num_iter_max) + return self._gradient_step( + x_guess, + y_guess, + source_x, + source_y, + delta_init, + image_plane_vector, + kwargs_lens, + iter_num, + num_iter_max, + ) else: return x_new, y_new, delta_new, iter_num - def findBrightImage(self, sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=0.01, search_window=5, - precision_limit=10**(-10), num_iter_max=10, arrival_time_sort=True, x_center=0, y_center=0, - num_random=0, non_linear=False, magnification_limit=None, initial_guess_cut=True, verbose=False): + def findBrightImage( + self, + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=0.01, + search_window=5, + precision_limit=10 ** (-10), + num_iter_max=10, + arrival_time_sort=True, + x_center=0, + y_center=0, + num_random=0, + non_linear=False, + magnification_limit=None, + initial_guess_cut=True, + verbose=False, + ): """ :param sourcePos_x: source position in units of angle @@ -360,13 +571,23 @@ def findBrightImage(self, sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, mi :returns: (exact) angular position of (multiple) images ra_pos, dec_pos in units of angle """ - x_mins, y_mins = self.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=min_distance, search_window=search_window, - precision_limit=precision_limit, num_iter_max=num_iter_max, - arrival_time_sort=arrival_time_sort, - initial_guess_cut=initial_guess_cut, verbose=verbose, - x_center=x_center, y_center=y_center, num_random=num_random, - non_linear=non_linear, magnification_limit=magnification_limit) + x_mins, y_mins = self.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=precision_limit, + num_iter_max=num_iter_max, + arrival_time_sort=arrival_time_sort, + initial_guess_cut=initial_guess_cut, + verbose=verbose, + x_center=x_center, + y_center=y_center, + num_random=num_random, + non_linear=non_linear, + magnification_limit=magnification_limit, + ) mag_list = [] for i in range(len(x_mins)): mag = self.lensModel.magnification(x_mins[i], y_mins[i], kwargs_lens) @@ -375,12 +596,14 @@ def findBrightImage(self, sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, mi x_mins_sorted = util.selectBest(x_mins, mag_list, numImages) y_mins_sorted = util.selectBest(y_mins, mag_list, numImages) if arrival_time_sort: - x_mins_sorted, y_mins_sorted = self.sort_arrival_times(x_mins_sorted, y_mins_sorted, kwargs_lens) + x_mins_sorted, y_mins_sorted = self.sort_arrival_times( + x_mins_sorted, y_mins_sorted, kwargs_lens + ) return x_mins_sorted, y_mins_sorted def sort_arrival_times(self, x_mins, y_mins, kwargs_lens): - """ - sort arrival times (fermat potential) of image positions in increasing order of light travel time + """Sort arrival times (fermat potential) of image positions in increasing order + of light travel time. :param x_mins: ra position of images :param y_mins: dec position of images @@ -388,9 +611,11 @@ def sort_arrival_times(self, x_mins, y_mins, kwargs_lens): :return: sorted lists of x_mins and y_mins """ - if hasattr(self.lensModel, '_no_potential'): - raise Exception('Instance of lensModel passed to this class does not compute the lensing potential, ' - 'and therefore cannot compute time delays.') + if hasattr(self.lensModel, "_no_potential"): + raise Exception( + "Instance of lensModel passed to this class does not compute the lensing potential, " + "and therefore cannot compute time delays." + ) if len(x_mins) <= 1: return x_mins, y_mins diff --git a/lenstronomy/LensModel/Solver/solver.py b/lenstronomy/LensModel/Solver/solver.py index f34ec98ef..39880c8a2 100644 --- a/lenstronomy/LensModel/Solver/solver.py +++ b/lenstronomy/LensModel/Solver/solver.py @@ -2,14 +2,12 @@ from lenstronomy.LensModel.Solver.solver4point import Solver4Point import numpy as np -__all__ = ['Solver'] +__all__ = ["Solver"] class Solver(object): - """ - joint solve class to manage with type of solver to be executed and checks whether the requirements are fulfilled. - - """ + """Joint solve class to manage with type of solver to be executed and checks whether + the requirements are fulfilled.""" def __init__(self, solver_type, lensModel, num_images): """ @@ -23,10 +21,12 @@ def __init__(self, solver_type, lensModel, num_images): self._lensModel = lensModel if self._num_images == 4: self._solver = Solver4Point(lensModel, solver_type=solver_type) - elif self. _num_images == 2: + elif self._num_images == 2: self._solver = Solver2Point(lensModel, solver_type=solver_type) else: - raise ValueError("%s number of images is not valid. Use 2 or 4!" % self._num_images) + raise ValueError( + "%s number of images is not valid. Use 2 or 4!" % self._num_images + ) def constraint_lensmodel(self, x_pos, y_pos, kwargs_list, xtol=1.49012e-12): """ @@ -50,32 +50,36 @@ def update_solver(self, kwargs_lens, x_pos, y_pos): """ if not len(x_pos) == self._num_images: - raise ValueError("Point source number %s must be as specified by the solver with number of images %s" % - (len(x_pos), self._num_images)) + raise ValueError( + "Point source number %s must be as specified by the solver with number of images %s" + % (len(x_pos), self._num_images) + ) kwargs_lens, precision = self.constraint_lensmodel(x_pos, y_pos, kwargs_lens) return kwargs_lens def check_solver(self, image_x, image_y, kwargs_lens): - """ - returns the precision of the solver to match the image position + """Returns the precision of the solver to match the image position. :param kwargs_lens: full lens model (including solved parameters) :param image_x: point source in image :param image_y: point source in image - :return: precision of Euclidean distances between the different rays arriving at the image positions + :return: precision of Euclidean distances between the different rays arriving at + the image positions """ source_x, source_y = self._lensModel.ray_shooting(image_x, image_y, kwargs_lens) dist = np.sqrt((source_x - source_x[0]) ** 2 + (source_y - source_y[0]) ** 2) return dist def add_fixed_lens(self, kwargs_fixed_lens, kwargs_lens_init): - """ - returns kwargs that are kept fixed during run, depending on options + """Returns kwargs that are kept fixed during run, depending on options. - :param kwargs_fixed_lens: keyword argument list of fixed parameters (indicated by fitting argument of the user) + :param kwargs_fixed_lens: keyword argument list of fixed parameters (indicated + by fitting argument of the user) :param kwargs_lens_init: Initial values of the full lens model keyword arguments - :return: updated kwargs_fixed_lens, added fixed parameters being added (and replaced later on) by the - non-linear solver. + :return: updated kwargs_fixed_lens, added fixed parameters being added (and + replaced later on) by the non-linear solver. """ - kwargs_fixed_lens = self._solver.add_fixed_lens(kwargs_fixed_lens, kwargs_lens_init) + kwargs_fixed_lens = self._solver.add_fixed_lens( + kwargs_fixed_lens, kwargs_lens_init + ) return kwargs_fixed_lens diff --git a/lenstronomy/LensModel/Solver/solver2point.py b/lenstronomy/LensModel/Solver/solver2point.py index 26c188799..3074b76dc 100644 --- a/lenstronomy/LensModel/Solver/solver2point.py +++ b/lenstronomy/LensModel/Solver/solver2point.py @@ -1,26 +1,24 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import scipy.optimize import numpy as np import copy import lenstronomy.Util.param_util as param_util -__all__ = ['Solver2Point'] +__all__ = ["Solver2Point"] class Solver2Point(object): - """ - class to solve a constraint lens model with two point source positions + """Class to solve a constraint lens model with two point source positions. options are: 'CENTER': solves for 'center_x', 'center_y' parameters of the first lens model 'ELLIPSE': solves for 'e1', 'e2' of the first lens (can also be shear) 'SHAPELETS': solves for shapelet coefficients c01, c10 'THETA_E_PHI: solves for Einstein radius of first lens model and shear angle of second model - - """ - def __init__(self, lensModel, solver_type='CENTER', decoupling=True): + + def __init__(self, lensModel, solver_type="CENTER", decoupling=True): """ :param lensModel: instance of LensModel class @@ -29,31 +27,46 @@ def __init__(self, lensModel, solver_type='CENTER', decoupling=True): """ self.lensModel = lensModel self._lens_mode_list = lensModel.lens_model_list - if solver_type not in ['CENTER', 'ELLIPSE', 'SHAPELETS', 'THETA_E_PHI', 'THETA_E_ELLIPSE']: + if solver_type not in [ + "CENTER", + "ELLIPSE", + "SHAPELETS", + "THETA_E_PHI", + "THETA_E_ELLIPSE", + ]: raise ValueError("solver_type %s is not a valid option!" % solver_type) - if solver_type == 'SHAPELETS': - if not self._lens_mode_list[0] in ['SHAPELETS_CART', 'SHAPELETS_POLAR']: - raise ValueError("solver_type %s needs the first lens model to be in ['SHAPELETS_CART', " - "'SHAPELETS_POLAR']" % solver_type) - if solver_type == 'THETA_E_PHI': - if not self._lens_mode_list[1] == 'SHEAR': - raise ValueError("solver_type %s needs the second lens model to be 'SHEAR" % solver_type) + if solver_type == "SHAPELETS": + if not self._lens_mode_list[0] in ["SHAPELETS_CART", "SHAPELETS_POLAR"]: + raise ValueError( + "solver_type %s needs the first lens model to be in ['SHAPELETS_CART', " + "'SHAPELETS_POLAR']" % solver_type + ) + if solver_type == "THETA_E_PHI": + if not self._lens_mode_list[1] == "SHEAR": + raise ValueError( + "solver_type %s needs the second lens model to be 'SHEAR" + % solver_type + ) self._solver_type = solver_type - if lensModel.multi_plane is True or 'FOREGROUND_SHEAR' in self._lens_mode_list or solver_type == 'THETA_E_PHI': + if ( + lensModel.multi_plane is True + or "FOREGROUND_SHEAR" in self._lens_mode_list + or solver_type == "THETA_E_PHI" + ): self._decoupling = False else: self._decoupling = decoupling def constraint_lensmodel(self, x_pos, y_pos, kwargs_list, xtol=1.49012e-12): - """ - constrains lens model parameters by demanding the solution to match the image positions to a single source - position + """Constrains lens model parameters by demanding the solution to match the image + positions to a single source position. :param x_pos: list of image positions (x-axis) :param y_pos: list of image position (y-axis) :param kwargs_list: list of lens model kwargs :param xtol: tolerance level of solution when to stop the non-linear solver - :return: updated lens model that satisfies the lens equation for the point sources + :return: updated lens model that satisfies the lens equation for the point + sources """ kwargs = copy.deepcopy(kwargs_list) init = self._extract_array(kwargs) @@ -68,11 +81,13 @@ def constraint_lensmodel(self, x_pos, y_pos, kwargs_list, xtol=1.49012e-12): x = self.solve(x_pos, y_pos, init, kwargs, a, xtol=xtol) kwargs = self._update_kwargs(x, kwargs) y_end = self._F(x, x_pos, y_pos, kwargs, a) - accuracy = np.sum(y_end ** 2) + accuracy = np.sum(y_end**2) return kwargs, accuracy def solve(self, x_pos, y_pos, init, kwargs_list, a, xtol=1.49012e-12): - x = scipy.optimize.fsolve(self._F, init, args=(x_pos, y_pos, kwargs_list, a), xtol=xtol) # , factor=0.1) + x = scipy.optimize.fsolve( + self._F, init, args=(x_pos, y_pos, kwargs_list, a), xtol=xtol + ) # , factor=0.1) return x def _F(self, x, x_pos, y_pos, kwargs_list, a=np.zeros(2)): @@ -95,8 +110,8 @@ def _subtract_constraint(x_sub, y_sub): :return: """ a = np.zeros(2) - a[0] = - x_sub[0] + x_sub[1] - a[1] = - y_sub[0] + y_sub[1] + a[0] = -x_sub[0] + x_sub[1] + a[1] = -y_sub[0] + y_sub[1] return a def _update_kwargs(self, x, kwargs_list): @@ -106,70 +121,77 @@ def _update_kwargs(self, x, kwargs_list): :param kwargs_list: list of lens model kwargs :return: updated kwargs_list """ - if self._solver_type == 'CENTER': + if self._solver_type == "CENTER": [center_x, center_y] = x - kwargs_list[0]['center_x'] = center_x - kwargs_list[0]['center_y'] = center_y - elif self._solver_type == 'ELLIPSE': + kwargs_list[0]["center_x"] = center_x + kwargs_list[0]["center_y"] = center_y + elif self._solver_type == "ELLIPSE": [e1, e2] = x - kwargs_list[0]['e1'] = e1 - kwargs_list[0]['e2'] = e2 - elif self._solver_type == 'SHAPELETS': + kwargs_list[0]["e1"] = e1 + kwargs_list[0]["e2"] = e2 + elif self._solver_type == "SHAPELETS": [c10, c01] = x - coeffs = list(kwargs_list[0]['coeffs']) - coeffs[1: 3] = [c10, c01] - kwargs_list[0]['coeffs'] = coeffs - elif self._solver_type == 'THETA_E_PHI': + coeffs = list(kwargs_list[0]["coeffs"]) + coeffs[1:3] = [c10, c01] + kwargs_list[0]["coeffs"] = coeffs + elif self._solver_type == "THETA_E_PHI": [theta_E, phi_G] = x - kwargs_list[0]['theta_E'] = theta_E - phi_G_no_sense, gamma_ext = param_util.shear_cartesian2polar(kwargs_list[1]['gamma1'], kwargs_list[1]['gamma2']) + kwargs_list[0]["theta_E"] = theta_E + phi_G_no_sense, gamma_ext = param_util.shear_cartesian2polar( + kwargs_list[1]["gamma1"], kwargs_list[1]["gamma2"] + ) gamma1, gamma2 = param_util.shear_polar2cartesian(phi_G, gamma_ext) - kwargs_list[1]['gamma1'] = gamma1 - kwargs_list[1]['gamma2'] = gamma2 - elif self._solver_type == 'THETA_E_ELLIPSE': + kwargs_list[1]["gamma1"] = gamma1 + kwargs_list[1]["gamma2"] = gamma2 + elif self._solver_type == "THETA_E_ELLIPSE": [theta_E, phi_G] = x - kwargs_list[0]['theta_E'] = theta_E - phi_G_no_sense, q = param_util.ellipticity2phi_q(kwargs_list[0]['e1'], kwargs_list[0]['e2']) + kwargs_list[0]["theta_E"] = theta_E + phi_G_no_sense, q = param_util.ellipticity2phi_q( + kwargs_list[0]["e1"], kwargs_list[0]["e2"] + ) e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_list[0]['e1'] = e1 - kwargs_list[0]['e2'] = e2 + kwargs_list[0]["e1"] = e1 + kwargs_list[0]["e2"] = e2 else: - raise ValueError("Solver type %s not supported for 2-point solver!" % self._solver_type) + raise ValueError( + "Solver type %s not supported for 2-point solver!" % self._solver_type + ) return kwargs_list def _extract_array(self, kwargs_list): - """ - inverse of _update_kwargs + """Inverse of _update_kwargs. :param kwargs_list: :return: """ - if self._solver_type == 'CENTER': - center_x = kwargs_list[0]['center_x'] - center_y = kwargs_list[0]['center_y'] + if self._solver_type == "CENTER": + center_x = kwargs_list[0]["center_x"] + center_y = kwargs_list[0]["center_y"] x = [center_x, center_y] - elif self._solver_type == 'ELLIPSE': - e1 = kwargs_list[0]['e1'] - e2 = kwargs_list[0]['e2'] + elif self._solver_type == "ELLIPSE": + e1 = kwargs_list[0]["e1"] + e2 = kwargs_list[0]["e2"] x = [e1, e2] - elif self._solver_type == 'SHAPELETS': - coeffs = list(kwargs_list[0]['coeffs']) - [c10, c01] = coeffs[1: 3] + elif self._solver_type == "SHAPELETS": + coeffs = list(kwargs_list[0]["coeffs"]) + [c10, c01] = coeffs[1:3] x = [c10, c01] - elif self._solver_type == 'THETA_E_PHI': - theta_E = kwargs_list[0]['theta_E'] - gamma1 = kwargs_list[1]['gamma1'] - gamma2 = kwargs_list[1]['gamma2'] + elif self._solver_type == "THETA_E_PHI": + theta_E = kwargs_list[0]["theta_E"] + gamma1 = kwargs_list[1]["gamma1"] + gamma2 = kwargs_list[1]["gamma2"] phi_ext, gamma_ext = param_util.shear_cartesian2polar(gamma1, gamma2) x = [theta_E, phi_ext] - elif self._solver_type == 'THETA_E_ELLIPSE': - theta_E = kwargs_list[0]['theta_E'] - e1 = kwargs_list[0]['e1'] - e2 = kwargs_list[0]['e2'] + elif self._solver_type == "THETA_E_ELLIPSE": + theta_E = kwargs_list[0]["theta_E"] + e1 = kwargs_list[0]["e1"] + e2 = kwargs_list[0]["e2"] phi_ext, gamma_ext = param_util.shear_cartesian2polar(e1, e2) x = [theta_E, phi_ext] else: - raise ValueError("Solver type %s not supported for 2-point solver!" % self._solver_type) + raise ValueError( + "Solver type %s not supported for 2-point solver!" % self._solver_type + ) return x def add_fixed_lens(self, kwargs_fixed_lens_list, kwargs_lens_init): @@ -181,20 +203,22 @@ def add_fixed_lens(self, kwargs_fixed_lens_list, kwargs_lens_init): """ kwargs_fixed = kwargs_fixed_lens_list[0] kwargs_lens = kwargs_lens_init[0] - if self._solver_type in ['CENTER']: - kwargs_fixed['center_x'] = kwargs_lens['center_x'] - kwargs_fixed['center_y'] = kwargs_lens['center_y'] - elif self._solver_type in ['ELLIPSE']: - kwargs_fixed['e1'] = kwargs_lens['e1'] - kwargs_fixed['e2'] = kwargs_lens['e2'] - elif self._solver_type == 'SHAPELETS': + if self._solver_type in ["CENTER"]: + kwargs_fixed["center_x"] = kwargs_lens["center_x"] + kwargs_fixed["center_y"] = kwargs_lens["center_y"] + elif self._solver_type in ["ELLIPSE"]: + kwargs_fixed["e1"] = kwargs_lens["e1"] + kwargs_fixed["e2"] = kwargs_lens["e2"] + elif self._solver_type == "SHAPELETS": pass - elif self._solver_type == 'THETA_E_PHI': - kwargs_fixed['theta_E'] = kwargs_lens['theta_E'] - kwargs_fixed_lens_list[1]['gamma2'] = 0 - elif self._solver_type == 'THETA_E_ELLIPSE': - kwargs_fixed['theta_E'] = kwargs_lens['theta_E'] - kwargs_fixed_lens_list[0]['e2'] = 0 + elif self._solver_type == "THETA_E_PHI": + kwargs_fixed["theta_E"] = kwargs_lens["theta_E"] + kwargs_fixed_lens_list[1]["gamma2"] = 0 + elif self._solver_type == "THETA_E_ELLIPSE": + kwargs_fixed["theta_E"] = kwargs_lens["theta_E"] + kwargs_fixed_lens_list[0]["e2"] = 0 else: - raise ValueError("Solver type %s not supported for 2-point solver!" % self._solver_type) + raise ValueError( + "Solver type %s not supported for 2-point solver!" % self._solver_type + ) return kwargs_fixed_lens_list diff --git a/lenstronomy/LensModel/Solver/solver4point.py b/lenstronomy/LensModel/Solver/solver4point.py index 61c580cf2..34504fbc0 100644 --- a/lenstronomy/LensModel/Solver/solver4point.py +++ b/lenstronomy/LensModel/Solver/solver4point.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.param_util as param_util @@ -6,34 +6,51 @@ import numpy as np import copy -__all__ = ['Solver4Point'] +__all__ = ["Solver4Point"] class Solver4Point(object): - """ - class to make the constraints for the solver - """ - def __init__(self, lensModel, solver_type='PROFILE'): + """Class to make the constraints for the solver.""" + + def __init__(self, lensModel, solver_type="PROFILE"): self._solver_type = solver_type # supported: - if not lensModel.lens_model_list[0] in ['SPEP', 'SPEMD', 'PEMD', 'SIE', 'NIE', 'NFW_ELLIPSE', 'NFW_ELLIPSE_CSE', - 'SHAPELETS_CART', 'CNFW_ELLIPSE', 'EPL', 'EPL_BOXYDISKY']: - raise ValueError("first lens model must be supported by the solver: 'SPEP', 'SPEMD', 'PEMD'," - " 'SIE', 'NIE', 'EPL', 'EPL_BOXYDISKY', 'NFW_ELLIPSE', 'NFW_ELLIPSE_CSE', " - "'SHAPELETS_CART', 'CNFW_ELLIPSE'." - "Your choice was %s" % lensModel.lens_model_list[0]) - if solver_type not in ['PROFILE', 'PROFILE_SHEAR']: - raise ValueError("solver_type %s not supported! Choose from 'PROFILE', 'PROFILE_SHEAR'" - % solver_type) - if solver_type in ['PROFILE_SHEAR']: - if lensModel.lens_model_list[1] == 'SHEAR': - self._solver_type = 'PROFILE_SHEAR' - elif lensModel.lens_model_list[1] == 'SHEAR_GAMMA_PSI': - self._solver_type = 'PROFILE_SHEAR_GAMMA_PSI' + if not lensModel.lens_model_list[0] in [ + "SPEP", + "SPEMD", + "PEMD", + "SIE", + "NIE", + "NFW_ELLIPSE", + "NFW_ELLIPSE_CSE", + "SHAPELETS_CART", + "CNFW_ELLIPSE", + "EPL", + "EPL_BOXYDISKY", + ]: + raise ValueError( + "first lens model must be supported by the solver: 'SPEP', 'SPEMD', 'PEMD'," + " 'SIE', 'NIE', 'EPL', 'EPL_BOXYDISKY', 'NFW_ELLIPSE', 'NFW_ELLIPSE_CSE', " + "'SHAPELETS_CART', 'CNFW_ELLIPSE'." + "Your choice was %s" % lensModel.lens_model_list[0] + ) + if solver_type not in ["PROFILE", "PROFILE_SHEAR"]: + raise ValueError( + "solver_type %s not supported! Choose from 'PROFILE', 'PROFILE_SHEAR'" + % solver_type + ) + if solver_type in ["PROFILE_SHEAR"]: + if lensModel.lens_model_list[1] == "SHEAR": + self._solver_type = "PROFILE_SHEAR" + elif lensModel.lens_model_list[1] == "SHEAR_GAMMA_PSI": + self._solver_type = "PROFILE_SHEAR_GAMMA_PSI" else: - raise ValueError("second lens model must be SHEAR_GAMMA_PSI or SHEAR to enable solver type %s!" % solver_type) + raise ValueError( + "second lens model must be SHEAR_GAMMA_PSI or SHEAR to enable solver type %s!" + % solver_type + ) self.lensModel = lensModel self._lens_mode_list = lensModel.lens_model_list - if lensModel.multi_plane is True or 'FOREGROUND_SHEAR' in self._lens_mode_list: + if lensModel.multi_plane is True or "FOREGROUND_SHEAR" in self._lens_mode_list: self._decoupling = False else: self._decoupling = True @@ -52,8 +69,10 @@ def constraint_lensmodel(self, x_pos, y_pos, kwargs_list, xtol=1.49012e-12): if self._decoupling: alpha_0_x, alpha_0_y = self.lensModel.alpha(x_pos, y_pos, kwargs) alpha_1_x, alpha_1_y = self.lensModel.alpha(x_pos, y_pos, kwargs, k=0) - if self._solver_type in ['PROFILE_SHEAR', 'PROFILE_SHEAR_GAMMA_PSI']: - alpha_shear_x, alpha_shear_y = self.lensModel.alpha(x_pos, y_pos, kwargs, k=1) + if self._solver_type in ["PROFILE_SHEAR", "PROFILE_SHEAR_GAMMA_PSI"]: + alpha_shear_x, alpha_shear_y = self.lensModel.alpha( + x_pos, y_pos, kwargs, k=1 + ) alpha_1_x += alpha_shear_x alpha_1_y += alpha_shear_y x_sub = alpha_1_x - alpha_0_x @@ -68,15 +87,19 @@ def constraint_lensmodel(self, x_pos, y_pos, kwargs_list, xtol=1.49012e-12): return kwargs, accuracy def solve(self, x_pos, y_pos, init, kwargs_list, a, xtol=1.49012e-10): - x = scipy.optimize.fsolve(self._F, init, args=(x_pos, y_pos, kwargs_list, a), xtol=xtol) # , factor=0.1) + x = scipy.optimize.fsolve( + self._F, init, args=(x_pos, y_pos, kwargs_list, a), xtol=xtol + ) # , factor=0.1) return x def _F(self, x, x_pos, y_pos, kwargs_list, a=np.zeros(6)): kwargs_list = self._update_kwargs(x, kwargs_list) if self._decoupling: alpha_x, alpha_y = self.lensModel.alpha(x_pos, y_pos, kwargs_list, k=0) - if self._solver_type in ['PROFILE_SHEAR', 'PROFILE_SHEAR_GAMMA_PSI']: - alpha_x_shear, alpha_y_shear = self.lensModel.alpha(x_pos, y_pos, kwargs_list, k=1) + if self._solver_type in ["PROFILE_SHEAR", "PROFILE_SHEAR_GAMMA_PSI"]: + alpha_x_shear, alpha_y_shear = self.lensModel.alpha( + x_pos, y_pos, kwargs_list, k=1 + ) alpha_x += alpha_x_shear alpha_y += alpha_y_shear beta_x = x_pos - alpha_x @@ -102,12 +125,12 @@ def _subtract_constraint(x_sub, y_sub): """ a = np.zeros(6) - a[0] = - x_sub[0] + x_sub[1] - a[1] = - x_sub[0] + x_sub[2] - a[2] = - x_sub[0] + x_sub[3] - a[3] = - y_sub[0] + y_sub[1] - a[4] = - y_sub[0] + y_sub[2] - a[5] = - y_sub[0] + y_sub[3] + a[0] = -x_sub[0] + x_sub[1] + a[1] = -x_sub[0] + x_sub[2] + a[2] = -x_sub[0] + x_sub[3] + a[3] = -y_sub[0] + y_sub[1] + a[4] = -y_sub[0] + y_sub[2] + a[5] = -y_sub[0] + y_sub[3] return a def _update_kwargs(self, x, kwargs_list): @@ -117,54 +140,65 @@ def _update_kwargs(self, x, kwargs_list): :param kwargs_list: list of lens model kwargs :return: updated kwargs_list """ - if self._solver_type == 'PROFILE_SHEAR_GAMMA_PSI': + if self._solver_type == "PROFILE_SHEAR_GAMMA_PSI": phi_G = x[5] # % (2 * np.pi) - kwargs_list[1]['psi_ext'] = phi_G - if self._solver_type == 'PROFILE_SHEAR': + kwargs_list[1]["psi_ext"] = phi_G + if self._solver_type == "PROFILE_SHEAR": phi_G = x[5] % np.pi - phi_G_no_sense, gamma_ext = param_util.shear_cartesian2polar(kwargs_list[1]['gamma1'], kwargs_list[1]['gamma2']) + phi_G_no_sense, gamma_ext = param_util.shear_cartesian2polar( + kwargs_list[1]["gamma1"], kwargs_list[1]["gamma2"] + ) gamma1, gamma2 = param_util.shear_polar2cartesian(phi_G, gamma_ext) - kwargs_list[1]['gamma1'] = gamma1 - kwargs_list[1]['gamma2'] = gamma2 + kwargs_list[1]["gamma1"] = gamma1 + kwargs_list[1]["gamma2"] = gamma2 lens_model = self._lens_mode_list[0] - if lens_model in ['SPEP', 'SPEMD', 'SIE', 'NIE', 'PEMD', 'EPL', 'EPL_BOXYDISKY']: + if lens_model in [ + "SPEP", + "SPEMD", + "SIE", + "NIE", + "PEMD", + "EPL", + "EPL_BOXYDISKY", + ]: [theta_E, e1, e2, center_x, center_y, _] = x - kwargs_list[0]['theta_E'] = theta_E - kwargs_list[0]['e1'] = e1 - kwargs_list[0]['e2'] = e2 - kwargs_list[0]['center_x'] = center_x - kwargs_list[0]['center_y'] = center_y + kwargs_list[0]["theta_E"] = theta_E + kwargs_list[0]["e1"] = e1 + kwargs_list[0]["e2"] = e2 + kwargs_list[0]["center_x"] = center_x + kwargs_list[0]["center_y"] = center_y - elif lens_model in ['NFW_ELLIPSE', 'CNFW_ELLIPSE', 'NFW_ELLIPSE_CSE']: + elif lens_model in ["NFW_ELLIPSE", "CNFW_ELLIPSE", "NFW_ELLIPSE_CSE"]: [alpha_Rs, e1, e2, center_x, center_y, _] = x - kwargs_list[0]['alpha_Rs'] = alpha_Rs - kwargs_list[0]['e1'] = e1 - kwargs_list[0]['e2'] = e2 - kwargs_list[0]['center_x'] = center_x - kwargs_list[0]['center_y'] = center_y - elif lens_model in ['SHAPELETS_CART']: + kwargs_list[0]["alpha_Rs"] = alpha_Rs + kwargs_list[0]["e1"] = e1 + kwargs_list[0]["e2"] = e2 + kwargs_list[0]["center_x"] = center_x + kwargs_list[0]["center_y"] = center_y + elif lens_model in ["SHAPELETS_CART"]: [c10, c01, c20, c11, c02, _] = x - coeffs = list(kwargs_list[0]['coeffs']) - coeffs[1: 6] = [c10, c01, c20, c11, c02] - kwargs_list[0]['coeffs'] = coeffs + coeffs = list(kwargs_list[0]["coeffs"]) + coeffs[1:6] = [c10, c01, c20, c11, c02] + kwargs_list[0]["coeffs"] = coeffs else: - raise ValueError("Lens model %s not supported for 4-point solver!" % lens_model) + raise ValueError( + "Lens model %s not supported for 4-point solver!" % lens_model + ) return kwargs_list def _extract_array(self, kwargs_list): - """ - inverse of _update_kwargs - :param kwargs_list: + """Inverse of _update_kwargs :param kwargs_list: + :return: """ - if self._solver_type == 'PROFILE_SHEAR_GAMMA_PSI': - phi_ext = kwargs_list[1]['psi_ext'] # % (np.pi) + if self._solver_type == "PROFILE_SHEAR_GAMMA_PSI": + phi_ext = kwargs_list[1]["psi_ext"] # % (np.pi) # e1 = kwargs_list[1]['e1'] # e2 = kwargs_list[1]['e2'] # phi_ext, gamma_ext = param_util.ellipticity2phi_gamma(e1, e2) - elif self._solver_type == 'PROFILE_SHEAR': - gamma1 = kwargs_list[1]['gamma1'] - gamma2 = kwargs_list[1]['gamma2'] + elif self._solver_type == "PROFILE_SHEAR": + gamma1 = kwargs_list[1]["gamma1"] + gamma2 = kwargs_list[1]["gamma2"] phi_ext, gamma_ext = param_util.shear_cartesian2polar(gamma1, gamma2) # phi_G_no_sense, gamma_ext = param_util.ellipticity2phi_gamma(kwargs_list[1]['e1'], kwargs_list[1]['e2']) # e1, e2 = param_util.phi_gamma_ellipticity(phi_G, gamma_ext) @@ -172,27 +206,37 @@ def _extract_array(self, kwargs_list): else: phi_ext = 0 lens_model = self._lens_mode_list[0] - if lens_model in ['SPEP', 'SPEMD', 'SIE', 'NIE', 'PEMD', 'EPL', 'EPL_BOXYDISKY']: - e1 = kwargs_list[0]['e1'] - e2 = kwargs_list[0]['e2'] - center_x = kwargs_list[0]['center_x'] - center_y = kwargs_list[0]['center_y'] - theta_E = kwargs_list[0]['theta_E'] + if lens_model in [ + "SPEP", + "SPEMD", + "SIE", + "NIE", + "PEMD", + "EPL", + "EPL_BOXYDISKY", + ]: + e1 = kwargs_list[0]["e1"] + e2 = kwargs_list[0]["e2"] + center_x = kwargs_list[0]["center_x"] + center_y = kwargs_list[0]["center_y"] + theta_E = kwargs_list[0]["theta_E"] x = [theta_E, e1, e2, center_x, center_y, phi_ext] - elif lens_model in ['NFW_ELLIPSE', 'CNFW_ELLIPSE', 'NFW_ELLIPSE_CSE']: - e1 = kwargs_list[0]['e1'] - e2 = kwargs_list[0]['e2'] - center_x = kwargs_list[0]['center_x'] - center_y = kwargs_list[0]['center_y'] - alpha_Rs = kwargs_list[0]['alpha_Rs'] + elif lens_model in ["NFW_ELLIPSE", "CNFW_ELLIPSE", "NFW_ELLIPSE_CSE"]: + e1 = kwargs_list[0]["e1"] + e2 = kwargs_list[0]["e2"] + center_x = kwargs_list[0]["center_x"] + center_y = kwargs_list[0]["center_y"] + alpha_Rs = kwargs_list[0]["alpha_Rs"] x = [alpha_Rs, e1, e2, center_x, center_y, phi_ext] - elif lens_model in ['SHAPELETS_CART']: - coeffs = list(kwargs_list[0]['coeffs']) - [c10, c01, c20, c11, c02] = coeffs[1: 6] + elif lens_model in ["SHAPELETS_CART"]: + coeffs = list(kwargs_list[0]["coeffs"]) + [c10, c01, c20, c11, c02] = coeffs[1:6] x = [c10, c01, c20, c11, c02, phi_ext] else: - raise ValueError("Lens model %s not supported for 4-point solver!" % lens_model) + raise ValueError( + "Lens model %s not supported for 4-point solver!" % lens_model + ) return x def add_fixed_lens(self, kwargs_fixed_lens_list, kwargs_lens_init): @@ -206,24 +250,34 @@ def add_fixed_lens(self, kwargs_fixed_lens_list, kwargs_lens_init): lens_model = self.lensModel.lens_model_list[0] kwargs_fixed = kwargs_fixed_lens_list[0] kwargs_lens = kwargs_lens_init[0] - if self._solver_type in ['PROFILE_SHEAR', 'PROFILE_SHEAR_GAMMA_PSI']: + if self._solver_type in ["PROFILE_SHEAR", "PROFILE_SHEAR_GAMMA_PSI"]: pass # kwargs_fixed_lens_list[1]['psi_ext'] = kwargs_lens_init[1]['psi_ext'] - if lens_model in ['SPEP', 'SPEMD', 'SIE', 'NIE', 'PEMD', 'EPL', 'EPL_BOXYDISKY']: - kwargs_fixed['theta_E'] = kwargs_lens['theta_E'] - kwargs_fixed['e1'] = kwargs_lens['e1'] - kwargs_fixed['e2'] = kwargs_lens['e2'] - kwargs_fixed['center_x'] = kwargs_lens['center_x'] - kwargs_fixed['center_y'] = kwargs_lens['center_y'] - elif lens_model in ['NFW_ELLIPSE', 'CNFW_ELLIPSE', 'NFW_ELLIPSE_CSE']: - kwargs_fixed['alpha_Rs'] = kwargs_lens['alpha_Rs'] - kwargs_fixed['e1'] = kwargs_lens['e1'] - kwargs_fixed['e2'] = kwargs_lens['e2'] - kwargs_fixed['center_x'] = kwargs_lens['center_x'] - kwargs_fixed['center_y'] = kwargs_lens['center_y'] - elif lens_model in ['SHAPELETS_CART']: + if lens_model in [ + "SPEP", + "SPEMD", + "SIE", + "NIE", + "PEMD", + "EPL", + "EPL_BOXYDISKY", + ]: + kwargs_fixed["theta_E"] = kwargs_lens["theta_E"] + kwargs_fixed["e1"] = kwargs_lens["e1"] + kwargs_fixed["e2"] = kwargs_lens["e2"] + kwargs_fixed["center_x"] = kwargs_lens["center_x"] + kwargs_fixed["center_y"] = kwargs_lens["center_y"] + elif lens_model in ["NFW_ELLIPSE", "CNFW_ELLIPSE", "NFW_ELLIPSE_CSE"]: + kwargs_fixed["alpha_Rs"] = kwargs_lens["alpha_Rs"] + kwargs_fixed["e1"] = kwargs_lens["e1"] + kwargs_fixed["e2"] = kwargs_lens["e2"] + kwargs_fixed["center_x"] = kwargs_lens["center_x"] + kwargs_fixed["center_y"] = kwargs_lens["center_y"] + elif lens_model in ["SHAPELETS_CART"]: pass else: raise ValueError( - "%s is not a valid option. Choose from 'PROFILE', 'PROFILE_SHEAR', 'SHAPELETS'" % self._solver_type) + "%s is not a valid option. Choose from 'PROFILE', 'PROFILE_SHEAR', 'SHAPELETS'" + % self._solver_type + ) return kwargs_fixed_lens_list diff --git a/lenstronomy/LensModel/Util/epl_util.py b/lenstronomy/LensModel/Util/epl_util.py index cda344e6a..271f30caf 100644 --- a/lenstronomy/LensModel/Util/epl_util.py +++ b/lenstronomy/LensModel/Util/epl_util.py @@ -1,4 +1,4 @@ -__author__ = 'ewoudwempe' +__author__ = "ewoudwempe" import numpy as np from lenstronomy.Util.numba_util import jit @@ -6,8 +6,8 @@ @jit() def min_approx(x1, x2, x3, y1, y2, y3): - """ - Get the x-value of the minimum of the parabola through the points (x1,y1), ... + """Get the x-value of the minimum of the parabola through the points (x1,y1), ... + :param x1: x-coordinate point 1 :param x2: x-coordinate point 2 :param x3: x-coordinate point 3 @@ -17,107 +17,96 @@ def min_approx(x1, x2, x3, y1, y2, y3): :return: x-location of the minimum """ # - div = (2.*(x3*(y1 - y2) + x1*(y2 - y3) + x2*(-y1 + y3))) - return (x3**2*(y1 - y2) + x1**2*(y2 - y3) + x2**2*(-y1 + y3))/div + div = 2.0 * (x3 * (y1 - y2) + x1 * (y2 - y3) + x2 * (-y1 + y3)) + return (x3**2 * (y1 - y2) + x1**2 * (y2 - y3) + x2**2 * (-y1 + y3)) / div @jit() def rotmat(th): - """ - Calculates the rotation matrix - :param th: angle - :return: rotation matrix - """ + """Calculates the rotation matrix :param th: angle :return: rotation matrix.""" return np.array([[np.cos(th), np.sin(th)], [-np.sin(th), np.cos(th)]]) @jit() def cdot(a, b): - """ - Calculates some complex dot-product that simplifies the math - :param a: complex number - :param b: complex number - :return: dot-product - """ - return a.real*b.real + a.imag*b.imag + """Calculates some complex dot-product that simplifies the math :param a: complex + number :param b: complex number :return: dot-product.""" + return a.real * b.real + a.imag * b.imag @jit() def ps(x, p): - """ - A regularized power-law that gets rid of singularities, abs(x)**p*sign(x) - :param x: x - :param p: p - :return: - """ - return np.abs(x)**p*np.sign(x) + """A regularized power-law that gets rid of singularities, abs(x)**p*sign(x) :param + x: x :param p: p :return:""" + return np.abs(x) ** p * np.sign(x) @jit() def cart_to_pol(x, y): - """ - Convert from cartesian to polar - :param x: x-coordinate - :param y: y-coordinate - :return: tuple of (r, theta) - """ - return np.sqrt(x**2+y**2), np.arctan2(y, x) % (2*np.pi) + """Convert from cartesian to polar :param x: x-coordinate :param y: y-coordinate + :return: tuple of (r, theta)""" + return np.sqrt(x**2 + y**2), np.arctan2(y, x) % (2 * np.pi) @jit() def pol_to_cart(r, th): - """ - Convert from polar to cartesian - :param r: r-coordinate - :param th: theta-coordinate - :return: tuple of (x,y) - """ - return r*np.cos(th), r*np.sin(th) + """Convert from polar to cartesian :param r: r-coordinate :param th: theta- + coordinate :return: tuple of (x,y)""" + return r * np.cos(th), r * np.sin(th) @jit() def pol_to_ell(r, theta, q): - """Converts from polar to elliptical coordinates""" - phi = np.arctan2(np.sin(theta), np.cos(theta)*q) - rell = r*np.sqrt(q**2*np.cos(theta)**2+np.sin(theta)**2) + """Converts from polar to elliptical coordinates.""" + phi = np.arctan2(np.sin(theta), np.cos(theta) * q) + rell = r * np.sqrt(q**2 * np.cos(theta) ** 2 + np.sin(theta) ** 2) return rell, phi @jit() def ell_to_pol(rell, theta, q): - """Converts from elliptical to polar coordinates""" - phi = np.arctan2(np.sin(theta)*q, np.cos(theta)) - r = rell*np.sqrt(1/q**2*np.cos(theta)**2+np.sin(theta)**2) + """Converts from elliptical to polar coordinates.""" + phi = np.arctan2(np.sin(theta) * q, np.cos(theta)) + r = rell * np.sqrt(1 / q**2 * np.cos(theta) ** 2 + np.sin(theta) ** 2) return r, phi def geomlinspace(a, b, N): - """Constructs a geomspace from a to b, with a linspace prepended to it from 0 to a, with the same spacing as the - geomspace would have at a""" - delta = a*((b/a)**(1/(N-1))-1) - return np.concatenate((np.linspace(0, a, int(a/delta), endpoint=False), np.geomspace(a, b, N))) + """Constructs a geomspace from a to b, with a linspace prepended to it from 0 to a, + with the same spacing as the geomspace would have at a.""" + delta = a * ((b / a) ** (1 / (N - 1)) - 1) + return np.concatenate( + (np.linspace(0, a, int(a / delta), endpoint=False), np.geomspace(a, b, N)) + ) @jit() def solvequadeq(a, b, c): - """ - Solves a quadratic equation. Care is taken for the numerics, see also https://en.wikipedia.org/wiki/Loss_of_significance + """Solves a quadratic equation. + + Care is taken for the numerics, see also + https://en.wikipedia.org/wiki/Loss_of_significance :param a: a :param b: b :param c: c :return: tuple of two solutions """ - sD = (b**2-4*a*c)**0.5 - x1 = (-b-np.sign(b)*sD)/(2*a) - x2 = 2*c/(-b-np.sign(b)*sD) - return np.where(b != 0, np.where(a != 0, x1, -c/b), -(-c/a)**0.5), \ - np.where(b != 0, np.where(a != 0, x2, -c/b+1e-8), +(-c/a)**0.5) + sD = (b**2 - 4 * a * c) ** 0.5 + x1 = (-b - np.sign(b) * sD) / (2 * a) + x2 = 2 * c / (-b - np.sign(b) * sD) + return np.where(b != 0, np.where(a != 0, x1, -c / b), -((-c / a) ** 0.5)), np.where( + b != 0, np.where(a != 0, x2, -c / b + 1e-8), +((-c / a) ** 0.5) + ) -def brentq_nojit(f, xa, xb, xtol=2e-14, rtol=16*np.finfo(float).eps, maxiter=100, args=()): - """ - A numba-compatible implementation of brentq (largely copied from scipy.optimize.brentq). - Unfortunately, the scipy verison is not compatible with numba, hence this reimplementation :( +def brentq_nojit( + f, xa, xb, xtol=2e-14, rtol=16 * np.finfo(float).eps, maxiter=100, args=() +): + """A numba-compatible implementation of brentq (largely copied from + scipy.optimize.brentq). + + Unfortunately, the scipy verison is not compatible with numba, hence this + reimplementation :( :param f: function to optimize :param xa: left bound :param xb: right bound @@ -125,19 +114,19 @@ def brentq_nojit(f, xa, xb, xtol=2e-14, rtol=16*np.finfo(float).eps, maxiter=100 :param rtol: x-coord relative tolerance :param maxiter: maximum num of iterations :param args: additional arguments to pass to function in the form f(x, args) - :return: + :return: """ xpre = xa xcur = xb - xblk = 0. - fblk = 0. - spre = 0. - scur = 0. + xblk = 0.0 + fblk = 0.0 + spre = 0.0 + scur = 0.0 fpre = f(xpre, args) fcur = f(xcur, args) funcalls = 2 - if fpre*fcur > 0: - raise ValueError('Signs are not different') + if fpre * fcur > 0: + raise ValueError("Signs are not different") if fpre == 0: return xpre if fcur == 0: @@ -145,7 +134,7 @@ def brentq_nojit(f, xa, xb, xtol=2e-14, rtol=16*np.finfo(float).eps, maxiter=100 iterations = 0 for i in range(maxiter): iterations += 1 - if fpre*fcur < 0: + if fpre * fcur < 0: xblk = xpre fblk = fpre # spres = scur = xcur - xpre @@ -153,26 +142,27 @@ def brentq_nojit(f, xa, xb, xtol=2e-14, rtol=16*np.finfo(float).eps, maxiter=100 xpre = xcur xcur = xblk xblk = xpre - + fpre = fcur fcur = fblk fblk = fpre - - delta = (xtol + rtol*abs(xcur))/2 - sbis = (xblk - xcur)/2 + + delta = (xtol + rtol * abs(xcur)) / 2 + sbis = (xblk - xcur) / 2 if fcur == 0 or abs(sbis) < delta: return xcur if abs(spre) > delta and abs(fcur) < abs(fpre): if xpre == xblk: - stry = -fcur*(xcur - xpre)/(fcur - fpre) + stry = -fcur * (xcur - xpre) / (fcur - fpre) else: - dpre = (fpre - fcur)/(xpre - xcur) - dblk = (fblk - fcur)/(xblk - xcur) - stry = -fcur*(fblk*dblk - fpre*dpre)\ - / (dblk*dpre*(fblk - fpre)) - - if 2*abs(stry) < min(abs(spre), 3*abs(sbis) - delta): + dpre = (fpre - fcur) / (xpre - xcur) + dblk = (fblk - fcur) / (xblk - xcur) + stry = ( + -fcur * (fblk * dblk - fpre * dpre) / (dblk * dpre * (fblk - fpre)) + ) + + if 2 * abs(stry) < min(abs(spre), 3 * abs(sbis) - delta): spre = scur scur = stry else: @@ -191,8 +181,8 @@ def brentq_nojit(f, xa, xb, xtol=2e-14, rtol=16*np.finfo(float).eps, maxiter=100 fcur = f(xcur, args) funcalls += 1 - + return xcur -brentq_inline = jit(inline='always')(brentq_nojit) +brentq_inline = jit(inline="always")(brentq_nojit) diff --git a/lenstronomy/LensModel/convergence_integrals.py b/lenstronomy/LensModel/convergence_integrals.py index 10f089598..4dd98792b 100644 --- a/lenstronomy/LensModel/convergence_integrals.py +++ b/lenstronomy/LensModel/convergence_integrals.py @@ -3,18 +3,19 @@ from lenstronomy.Util import util from lenstronomy.Util import image_util from lenstronomy.Util import kernel_util -""" -class to compute lensing potentials and deflection angles provided a convergence map -""" + +"""Class to compute lensing potentials and deflection angles provided a convergence +map.""" from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def potential_from_kappa_grid(kappa, grid_spacing): - """ - Lensing potential :math:`\\psi ({\\vec {\\theta }})` on the convergence grid :math:`\\kappa`. + """Lensing potential :math:`\\psi ({\\vec {\\theta }})` on the convergence grid + :math:`\\kappa`. .. math:: \\psi ({\\vec {\\theta }})={\\frac {1}{\\pi }}\\int d^{2}\\theta ^{\\prime } @@ -30,20 +31,22 @@ def potential_from_kappa_grid(kappa, grid_spacing): if num_pix % 2 == 0: num_pix += 1 kernel = potential_kernel(num_pix, grid_spacing) - f_ = scp.fftconvolve(kappa, kernel, mode='same') / np.pi * grid_spacing ** 2 + f_ = scp.fftconvolve(kappa, kernel, mode="same") / np.pi * grid_spacing**2 return f_ @export -def potential_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size): - """ - lensing potential on the convergence grid - the computation is performed as a convolution of the Green's function with the convergence map using FFT +def potential_from_kappa_grid_adaptive( + kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size +): + """Lensing potential on the convergence grid the computation is performed as a + convolution of the Green's function with the convergence map using FFT. :param kappa_high_res: 2d grid of convergence values :param grid_spacing: scale of an individual pixel (per axis) of grid :param low_res_factor: lower resolution factor of larger scale kernel. - :param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels + :param high_res_kernel_size: int, size of high resolution kernel in units of + degraded pixels :return: lensing potential in a 2d grid at positions x_grid, y_grid """ kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor) @@ -52,19 +55,28 @@ def potential_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_fac num_pix += 1 grid_spacing_low_res = grid_spacing * low_res_factor kernel = potential_kernel(num_pix, grid_spacing) - kernel_low_res, kernel_high_res = kernel_util.split_kernel(kernel, high_res_kernel_size, low_res_factor, - normalized=False) - - f_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res, mode='same') / np.pi * grid_spacing ** 2 + kernel_low_res, kernel_high_res = kernel_util.split_kernel( + kernel, high_res_kernel_size, low_res_factor, normalized=False + ) + + f_high_res = ( + scp.fftconvolve(kappa_high_res, kernel_high_res, mode="same") + / np.pi + * grid_spacing**2 + ) f_high_res = image_util.re_size(f_high_res, low_res_factor) - f_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res, mode='same') / np.pi * grid_spacing_low_res ** 2 + f_low_res = ( + scp.fftconvolve(kappa_low_res, kernel_low_res, mode="same") + / np.pi + * grid_spacing_low_res**2 + ) return f_high_res + f_low_res @export def deflection_from_kappa_grid(kappa, grid_spacing): - """ - Deflection angle :math:`\\vec {\\alpha }}` from a convergence grid :math:`\\kappa`. + """Deflection angle :math:`\\vec {\\alpha }}` from a convergence grid + :math:`\\kappa`. .. math:: {\\vec {\\alpha }}({\\vec {\\theta }})={\\frac {1}{\\pi }} @@ -81,22 +93,24 @@ def deflection_from_kappa_grid(kappa, grid_spacing): if num_pix % 2 == 0: num_pix += 1 kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing) - f_x = scp.fftconvolve(kappa, kernel_x, mode='same') / np.pi * grid_spacing ** 2 - f_y = scp.fftconvolve(kappa, kernel_y, mode='same') / np.pi * grid_spacing ** 2 + f_x = scp.fftconvolve(kappa, kernel_x, mode="same") / np.pi * grid_spacing**2 + f_y = scp.fftconvolve(kappa, kernel_y, mode="same") / np.pi * grid_spacing**2 return f_x, f_y @export -def deflection_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size): - """ - deflection angles on the convergence grid with adaptive FFT - the computation is performed as a convolution of the Green's function with the convergence map using FFT - The grid is returned in the lower resolution grid +def deflection_from_kappa_grid_adaptive( + kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size +): + """Deflection angles on the convergence grid with adaptive FFT the computation is + performed as a convolution of the Green's function with the convergence map using + FFT The grid is returned in the lower resolution grid. :param kappa_high_res: convergence values for each pixel (2-d array) :param grid_spacing: pixel size of high resolution grid :param low_res_factor: lower resolution factor of larger scale kernel. - :param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels + :param high_res_kernel_size: int, size of high resolution kernel in units of + degraded pixels :return: numerical deflection angles in x- and y- direction """ kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor) @@ -107,52 +121,71 @@ def deflection_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_fa kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing) grid_spacing_low_res = grid_spacing * low_res_factor - kernel_low_res_x, kernel_high_res_x = kernel_util.split_kernel(kernel_x, high_res_kernel_size, low_res_factor, - normalized=False) - f_x_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_x, mode='same') / np.pi * grid_spacing ** 2 + kernel_low_res_x, kernel_high_res_x = kernel_util.split_kernel( + kernel_x, high_res_kernel_size, low_res_factor, normalized=False + ) + f_x_high_res = ( + scp.fftconvolve(kappa_high_res, kernel_high_res_x, mode="same") + / np.pi + * grid_spacing**2 + ) f_x_high_res = image_util.re_size(f_x_high_res, low_res_factor) - f_x_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_x, mode='same') / np.pi * grid_spacing_low_res ** 2 + f_x_low_res = ( + scp.fftconvolve(kappa_low_res, kernel_low_res_x, mode="same") + / np.pi + * grid_spacing_low_res**2 + ) f_x = f_x_high_res + f_x_low_res - kernel_low_res_y, kernel_high_res_y = kernel_util.split_kernel(kernel_y, high_res_kernel_size, low_res_factor, - normalized=False) - f_y_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_y, mode='same') / np.pi * grid_spacing ** 2 + kernel_low_res_y, kernel_high_res_y = kernel_util.split_kernel( + kernel_y, high_res_kernel_size, low_res_factor, normalized=False + ) + f_y_high_res = ( + scp.fftconvolve(kappa_high_res, kernel_high_res_y, mode="same") + / np.pi + * grid_spacing**2 + ) f_y_high_res = image_util.re_size(f_y_high_res, low_res_factor) - f_y_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_y, mode='same') / np.pi * grid_spacing_low_res ** 2 + f_y_low_res = ( + scp.fftconvolve(kappa_low_res, kernel_low_res_y, mode="same") + / np.pi + * grid_spacing_low_res**2 + ) f_y = f_y_high_res + f_y_low_res return f_x, f_y @export def potential_kernel(num_pix, delta_pix): - """ - numerical gridded integration kernel for convergence to lensing kernel with given pixel size + """Numerical gridded integration kernel for convergence to lensing kernel with given + pixel size. :param num_pix: integer; number of pixels of kernel per axis :param delta_pix: pixel size (per dimension in units of angle) :return: kernel for lensing potential """ x_shift, y_shift = util.make_grid(numPix=num_pix, deltapix=delta_pix) - r2 = x_shift ** 2 + y_shift ** 2 + r2 = x_shift**2 + y_shift**2 r2_max = np.max(r2) r2[r2 < (delta_pix / 2) ** 2] = (delta_pix / 2) ** 2 - lnr = np.log(r2/r2_max) / 2. + lnr = np.log(r2 / r2_max) / 2.0 kernel = util.array2image(lnr) return kernel @export def deflection_kernel(num_pix, delta_pix): - """ - numerical gridded integration kernel for convergence to deflection angle with given pixel size + """Numerical gridded integration kernel for convergence to deflection angle with + given pixel size. - :param num_pix: integer; number of pixels of kernel per axis, should be odd number to have a defined center + :param num_pix: integer; number of pixels of kernel per axis, should be odd number + to have a defined center :param delta_pix: pixel size (per dimension in units of angle) :return: kernel for x-direction and kernel of y-direction deflection angles """ x_shift, y_shift = util.make_grid(numPix=num_pix, deltapix=delta_pix) r2 = x_shift**2 + y_shift**2 - r2[r2 < (delta_pix/2)**2] = (delta_pix/2) ** 2 + r2[r2 < (delta_pix / 2) ** 2] = (delta_pix / 2) ** 2 kernel_x = util.array2image(x_shift / r2) kernel_y = util.array2image(y_shift / r2) diff --git a/lenstronomy/LensModel/lens_model.py b/lenstronomy/LensModel/lens_model.py index 3b67df2b5..4805de3a0 100644 --- a/lenstronomy/LensModel/lens_model.py +++ b/lenstronomy/LensModel/lens_model.py @@ -1,24 +1,36 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.single_plane import SinglePlane from lenstronomy.LensModel.LineOfSight.single_plane_los import SinglePlaneLOS from lenstronomy.LensModel.MultiPlane.multi_plane import MultiPlane from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.Util import constants as const -__all__ = ['LensModel'] +__all__ = ["LensModel"] class LensModel(object): - """ - class to handle an arbitrary list of lens models. This is the main lenstronomy LensModel API for all other modules. - """ + """Class to handle an arbitrary list of lens models. - def __init__(self, lens_model_list, z_lens=None, z_source=None, lens_redshift_list=None, cosmo=None, - multi_plane=False, numerical_alpha_class=None, observed_convention_index=None, - z_source_convention=None, cosmo_interp=False, - z_interp_stop=None, num_z_interp=100, - kwargs_interp=None, kwargs_synthesis=None): + This is the main lenstronomy LensModel API for all other modules. + """ + def __init__( + self, + lens_model_list, + z_lens=None, + z_source=None, + lens_redshift_list=None, + cosmo=None, + multi_plane=False, + numerical_alpha_class=None, + observed_convention_index=None, + z_source_convention=None, + cosmo_interp=False, + z_interp_stop=None, + num_z_interp=100, + kwargs_interp=None, + kwargs_synthesis=None, + ): """ :param lens_model_list: list of strings with lens model names @@ -54,143 +66,187 @@ def __init__(self, lens_model_list, z_lens=None, z_source=None, lens_redshift_li if cosmo is None: from astropy.cosmology import default_cosmology + cosmo = default_cosmology.get() self.cosmo = cosmo # Are there line-of-sight corrections? - permitted_los_models = ['LOS', 'LOS_MINIMAL'] - los_models = [(i, model) for (i, model) in enumerate(lens_model_list) - if model in permitted_los_models] + permitted_los_models = ["LOS", "LOS_MINIMAL"] + los_models = [ + (i, model) + for (i, model) in enumerate(lens_model_list) + if model in permitted_los_models + ] if len(los_models) == 0: los_effects = False elif len(los_models) == 1: los_effects = True index_los, los_model = los_models[0] else: - raise ValueError('You can only have one model for line-of-sight corrections.') + raise ValueError( + "You can only have one model for line-of-sight corrections." + ) # Multi-plane or single-plane lensing? self.multi_plane = multi_plane if multi_plane is True: if z_source is None: - raise ValueError('z_source needs to be set for multi-plane lens modelling.') + raise ValueError( + "z_source needs to be set for multi-plane lens modelling." + ) if los_effects is True: - raise ValueError('LOS effects and multi-plane lensing are incompatible.') - self.lens_model = MultiPlane(z_source, lens_model_list, lens_redshift_list, cosmo=cosmo, - numerical_alpha_class=numerical_alpha_class, - observed_convention_index=observed_convention_index, - z_source_convention=z_source_convention, cosmo_interp=cosmo_interp, - z_interp_stop=z_interp_stop, num_z_interp=num_z_interp, - kwargs_interp=kwargs_interp, kwargs_synthesis=kwargs_synthesis) + raise ValueError( + "LOS effects and multi-plane lensing are incompatible." + ) + self.lens_model = MultiPlane( + z_source, + lens_model_list, + lens_redshift_list, + cosmo=cosmo, + numerical_alpha_class=numerical_alpha_class, + observed_convention_index=observed_convention_index, + z_source_convention=z_source_convention, + cosmo_interp=cosmo_interp, + z_interp_stop=z_interp_stop, + num_z_interp=num_z_interp, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) else: if los_effects is True: - self.lens_model = SinglePlaneLOS(lens_model_list, + self.lens_model = SinglePlaneLOS( + lens_model_list, index_los=index_los, numerical_alpha_class=numerical_alpha_class, lens_redshift_list=lens_redshift_list, z_source_convention=z_source_convention, kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + kwargs_synthesis=kwargs_synthesis, + ) else: - self.lens_model = SinglePlane(lens_model_list, + self.lens_model = SinglePlane( + lens_model_list, numerical_alpha_class=numerical_alpha_class, lens_redshift_list=lens_redshift_list, z_source_convention=z_source_convention, kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + kwargs_synthesis=kwargs_synthesis, + ) if z_lens is not None and z_source is not None: self._lensCosmo = LensCosmo(z_lens, z_source, cosmo=cosmo) def ray_shooting(self, x, y, kwargs, k=None): - """ - maps image to source position (inverse deflection) + """Maps image to source position (inverse deflection) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model :return: source plane positions corresponding to (x, y) in the image plane """ return self.lens_model.ray_shooting(x, y, kwargs, k=k) - def fermat_potential(self, x_image, y_image, kwargs_lens, x_source=None, y_source=None): - """ - Fermat potential (negative sign means earlier arrival time) - for Multi-plane lensing, it computes the effective Fermat potential (derived from the arrival time and - subtracted off the time-delay distance for the given cosmology). The units are given in arcsecond square. + def fermat_potential( + self, x_image, y_image, kwargs_lens, x_source=None, y_source=None + ): + """Fermat potential (negative sign means earlier arrival time) for Multi-plane + lensing, it computes the effective Fermat potential (derived from the arrival + time and subtracted off the time-delay distance for the given cosmology). The + units are given in arcsecond square. :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position - :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes - :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list - """ - if hasattr(self.lens_model, 'fermat_potential'): - return self.lens_model.fermat_potential(x_image, y_image, kwargs_lens, x_source, y_source) - elif hasattr(self.lens_model, 'arrival_time') and hasattr(self, '_lensCosmo'): + :param kwargs_lens: list of keyword arguments of lens model parameters matching + the lens model classes + :return: fermat potential in arcsec**2 without geometry term (second part of Eqn + 1 in Suyu et al. 2013) as a list + """ + if hasattr(self.lens_model, "fermat_potential"): + return self.lens_model.fermat_potential( + x_image, y_image, kwargs_lens, x_source, y_source + ) + elif hasattr(self.lens_model, "arrival_time") and hasattr(self, "_lensCosmo"): dt = self.lens_model.arrival_time(x_image, y_image, kwargs_lens) - fermat_pot_eff = dt * const.c / self._lensCosmo.ddt / const.Mpc * const.day_s / const.arcsec ** 2 + fermat_pot_eff = ( + dt + * const.c + / self._lensCosmo.ddt + / const.Mpc + * const.day_s + / const.arcsec**2 + ) return fermat_pot_eff else: - raise ValueError('In multi-plane lensing you need to provide a specific z_lens and z_source for which the ' - 'effective Fermat potential is evaluated') - - def arrival_time(self, x_image, y_image, kwargs_lens, kappa_ext=0, x_source=None, y_source=None): - """ - Arrival time of images relative to a straight line without lensing. - Negative values correspond to images arriving earlier, and positive signs correspond to images arriving later. + raise ValueError( + "In multi-plane lensing you need to provide a specific z_lens and z_source for which the " + "effective Fermat potential is evaluated" + ) + + def arrival_time( + self, x_image, y_image, kwargs_lens, kappa_ext=0, x_source=None, y_source=None + ): + """Arrival time of images relative to a straight line without lensing. Negative + values correspond to images arriving earlier, and positive signs correspond to + images arriving later. :param x_image: image position :param y_image: image position :param kwargs_lens: lens model parameter keyword argument list - :param kappa_ext: external convergence contribution not accounted in the lens model that leads to the same - observables in position and relative fluxes but rescales the time delays + :param kappa_ext: external convergence contribution not accounted in the lens + model that leads to the same observables in position and relative fluxes but + rescales the time delays :param x_source: source position (optional), otherwise computed with ray-tracing :param y_source: source position (optional), otherwise computed with ray-tracing :return: arrival time of image positions in units of days """ - if hasattr(self.lens_model, 'arrival_time'): + if hasattr(self.lens_model, "arrival_time"): arrival_time = self.lens_model.arrival_time(x_image, y_image, kwargs_lens) else: - fermat_pot = self.lens_model.fermat_potential(x_image, y_image, kwargs_lens, x_source=x_source, - y_source=y_source) - if not hasattr(self, '_lensCosmo'): - raise ValueError("LensModel class was not initialized with lens and source redshifts!") + fermat_pot = self.lens_model.fermat_potential( + x_image, y_image, kwargs_lens, x_source=x_source, y_source=y_source + ) + if not hasattr(self, "_lensCosmo"): + raise ValueError( + "LensModel class was not initialized with lens and source redshifts!" + ) arrival_time = self._lensCosmo.time_delay_units(fermat_pot) - arrival_time *= (1 - kappa_ext) + arrival_time *= 1 - kappa_ext return arrival_time def potential(self, x, y, kwargs, k=None): - """ - lensing potential + """Lensing potential. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model :return: lensing potential in units of arcsec^2 """ return self.lens_model.potential(x, y, kwargs, k=k) def alpha(self, x, y, kwargs, k=None, diff=None): - """ - deflection angles + """Deflection angles. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model - :param diff: None or float. If set, computes the deflection as a finite numerical differential of the lensing - potential. This differential is only applicable in the single lensing plane where the form of the lensing - potential is analytically known + :param diff: None or float. If set, computes the deflection as a finite + numerical differential of the lensing potential. This differential is only + applicable in the single lensing plane where the form of the lensing + potential is analytically known :return: deflection angles in units of arcsec """ if diff is None: @@ -198,56 +254,63 @@ def alpha(self, x, y, kwargs, k=None, diff=None): elif self.multi_plane is False: return self._deflection_differential(x, y, kwargs, k=k, diff=diff) else: - raise ValueError('numerical differentiation of lensing potential is not available in the multi-plane ' - 'setting as analytical form of lensing potential is not available.') + raise ValueError( + "numerical differentiation of lensing potential is not available in the multi-plane " + "setting as analytical form of lensing potential is not available." + ) - def hessian(self, x, y, kwargs, k=None, diff=None, diff_method='square'): - """ - hessian matrix + def hessian(self, x, y, kwargs, k=None, diff=None, diff_method="square"): + """Hessian matrix. :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model - :param diff: float, scale over which the finite numerical differential is computed. If None, then using the - exact (if available) differentials. - :param diff_method: string, 'square' or 'cross', indicating whether finite differentials are computed from a - cross or a square of points around (x, y) + :param diff: float, scale over which the finite numerical differential is + computed. If None, then using the exact (if available) differentials. + :param diff_method: string, 'square' or 'cross', indicating whether finite + differentials are computed from a cross or a square of points around (x, y) :return: f_xx, f_xy, f_yx, f_yy components """ if diff is None: return self.lens_model.hessian(x, y, kwargs, k=k) - elif diff_method == 'square': + elif diff_method == "square": return self._hessian_differential_square(x, y, kwargs, k=k, diff=diff) - elif diff_method == 'cross': + elif diff_method == "cross": return self._hessian_differential_cross(x, y, kwargs, k=k, diff=diff) else: - raise ValueError('diff_method %s not supported. Chose among "square" or "cross".' % diff_method) + raise ValueError( + 'diff_method %s not supported. Chose among "square" or "cross".' + % diff_method + ) - def kappa(self, x, y, kwargs, k=None, diff=None, diff_method='square'): - """ - lensing convergence k = 1/2 laplacian(phi) + def kappa(self, x, y, kwargs, k=None, diff=None, diff_method="square"): + """Lensing convergence k = 1/2 laplacian(phi) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param k: only evaluate the k-th lens model - :param diff: float, scale over which the finite numerical differential is computed. If None, then using the - exact (if available) differentials. - :param diff_method: string, 'square' or 'cross', indicating whether finite differentials are computed from a - cross or a square of points around (x, y) + :param diff: float, scale over which the finite numerical differential is + computed. If None, then using the exact (if available) differentials. + :param diff_method: string, 'square' or 'cross', indicating whether finite + differentials are computed from a cross or a square of points around (x, y) :return: lensing convergence """ - f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, k=k, diff=diff, diff_method=diff_method) - kappa = 1./2 * (f_xx + f_yy) + f_xx, f_xy, f_yx, f_yy = self.hessian( + x, y, kwargs, k=k, diff=diff, diff_method=diff_method + ) + kappa = 1.0 / 2 * (f_xx + f_yy) return kappa - def curl(self, x, y, kwargs, k=None, diff=None, diff_method='square'): + def curl(self, x, y, kwargs, k=None, diff=None, diff_method="square"): """ curl computation F_xy - F_yx @@ -263,10 +326,12 @@ def curl(self, x, y, kwargs, k=None, diff=None, diff_method='square'): cross or a square of points around (x, y) :return: curl at position (x, y) """ - f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, k=k, diff=diff, diff_method=diff_method) + f_xx, f_xy, f_yx, f_yy = self.hessian( + x, y, kwargs, k=k, diff=diff, diff_method=diff_method + ) return f_xy - f_yx - def gamma(self, x, y, kwargs, k=None, diff=None, diff_method='square'): + def gamma(self, x, y, kwargs, k=None, diff=None, diff_method="square"): """ shear computation g1 = 1/2(d^2phi/dx^2 - d^2phi/dy^2) @@ -285,14 +350,16 @@ def gamma(self, x, y, kwargs, k=None, diff=None, diff_method='square'): :return: gamma1, gamma2 """ - f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, k=k, diff=diff, diff_method=diff_method) - gamma1 = 1./2 * (f_xx - f_yy) + f_xx, f_xy, f_yx, f_yy = self.hessian( + x, y, kwargs, k=k, diff=diff, diff_method=diff_method + ) + gamma1 = 1.0 / 2 * (f_xx - f_yy) gamma2 = f_xy return gamma1, gamma2 - def magnification(self, x, y, kwargs, k=None, diff=None, diff_method='square'): - """ - magnification + def magnification(self, x, y, kwargs, k=None, diff=None, diff_method="square"): + """magnification. + mag = 1/det(A) A = 1 - d^2phi/d_ij @@ -309,31 +376,43 @@ def magnification(self, x, y, kwargs, k=None, diff=None, diff_method='square'): :return: magnification """ - f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, k=k, diff=diff, diff_method=diff_method) - det_A = (1 - f_xx) * (1 - f_yy) - f_xy*f_yx - return 1./det_A # attention, if dividing by zero + f_xx, f_xy, f_yx, f_yy = self.hessian( + x, y, kwargs, k=k, diff=diff, diff_method=diff_method + ) + det_A = (1 - f_xx) * (1 - f_yy) - f_xy * f_yx + return 1.0 / det_A # attention, if dividing by zero def flexion(self, x, y, kwargs, k=None, diff=0.000001, hessian_diff=True): - """ - third derivatives (flexion) + """Third derivatives (flexion) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes - :param k: int or None, if set, only evaluates the differential from one model component + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes + :param k: int or None, if set, only evaluates the differential from one model + component :param diff: numerical differential length of Flexion - :param hessian_diff: boolean, if true also computes the numerical differential length of Hessian (optional) + :param hessian_diff: boolean, if true also computes the numerical differential + length of Hessian (optional) :return: f_xxx, f_xxy, f_xyy, f_yyy """ if hessian_diff is not True: hessian_diff = None - f_xx_dx, f_xy_dx, f_yx_dx, f_yy_dx = self.hessian(x + diff/2, y, kwargs, k=k, diff=hessian_diff) - f_xx_dy, f_xy_dy, f_yx_dy, f_yy_dy = self.hessian(x, y + diff/2, kwargs, k=k, diff=hessian_diff) - - f_xx_dx_, f_xy_dx_, f_yx_dx_, f_yy_dx_ = self.hessian(x - diff/2, y, kwargs, k=k, diff=hessian_diff) - f_xx_dy_, f_xy_dy_, f_yx_dy_, f_yy_dy_ = self.hessian(x, y - diff/2, kwargs, k=k, diff=hessian_diff) + f_xx_dx, f_xy_dx, f_yx_dx, f_yy_dx = self.hessian( + x + diff / 2, y, kwargs, k=k, diff=hessian_diff + ) + f_xx_dy, f_xy_dy, f_yx_dy, f_yy_dy = self.hessian( + x, y + diff / 2, kwargs, k=k, diff=hessian_diff + ) + + f_xx_dx_, f_xy_dx_, f_yx_dx_, f_yy_dx_ = self.hessian( + x - diff / 2, y, kwargs, k=k, diff=hessian_diff + ) + f_xx_dy_, f_xy_dy_, f_yx_dy_, f_yy_dy_ = self.hessian( + x, y - diff / 2, kwargs, k=k, diff=hessian_diff + ) f_xxx = (f_xx_dx - f_xx_dx_) / diff f_xxy = (f_xx_dy - f_xx_dy_) / diff @@ -342,20 +421,21 @@ def flexion(self, x, y, kwargs, k=None, diff=0.000001, hessian_diff=True): return f_xxx, f_xxy, f_xyy, f_yyy def set_static(self, kwargs): - """ - set this instance to a static lens model. This can improve the speed in evaluating lensing quantities at - different positions but must not be used with different lens model parameters! + """Set this instance to a static lens model. This can improve the speed in + evaluating lensing quantities at different positions but must not be used with + different lens model parameters! :param kwargs: lens model keyword argument list - :return: kwargs_updated (in case of image position convention in multiplane lensing this is changed) + :return: kwargs_updated (in case of image position convention in multiplane + lensing this is changed) """ return self.lens_model.set_static(kwargs) def set_dynamic(self): - """ - deletes cache for static setting and makes sure the observed convention in the position of lensing profiles in - the multi-plane setting is enabled. Dynamic is the default setting of this class enabling an accurate computation - of lensing quantities with different parameters in the lensing profiles. + """Deletes cache for static setting and makes sure the observed convention in + the position of lensing profiles in the multi-plane setting is enabled. Dynamic + is the default setting of this class enabling an accurate computation of lensing + quantities with different parameters in the lensing profiles. :return: None """ @@ -371,31 +451,33 @@ def _deflection_differential(self, x, y, kwargs, k=None, diff=0.00001): :param diff: finite differential length :return: f_x, f_y """ - phi_dx = self.lens_model.potential(x + diff/2, y, kwargs=kwargs, k=k) - phi_dy = self.lens_model.potential(x, y + diff/2, kwargs=kwargs, k=k) - phi_dx_ = self.lens_model.potential(x - diff/2, y, kwargs=kwargs, k=k) - phi_dy_ = self.lens_model.potential(x, y - diff/2, kwargs=kwargs, k=k) + phi_dx = self.lens_model.potential(x + diff / 2, y, kwargs=kwargs, k=k) + phi_dy = self.lens_model.potential(x, y + diff / 2, kwargs=kwargs, k=k) + phi_dx_ = self.lens_model.potential(x - diff / 2, y, kwargs=kwargs, k=k) + phi_dy_ = self.lens_model.potential(x, y - diff / 2, kwargs=kwargs, k=k) f_x = (phi_dx - phi_dx_) / diff f_y = (phi_dy - phi_dy_) / diff return f_x, f_y def _hessian_differential_cross(self, x, y, kwargs, k=None, diff=0.00001): - """ - computes the numerical differentials over a finite range for f_xx, f_yy, f_xy from f_x and f_y - The differentials are computed along the cross centered at (x, y). + """Computes the numerical differentials over a finite range for f_xx, f_yy, f_xy + from f_x and f_y The differentials are computed along the cross centered at (x, + y). :param x: x-coordinate :param y: y-coordinate :param kwargs: lens model keyword argument list - :param k: int, list of bools or None, indicating a subset of lens models to be evaluated - :param diff: float, scale of the finite differential (diff/2 in each direction used to compute the differential + :param k: int, list of bools or None, indicating a subset of lens models to be + evaluated + :param diff: float, scale of the finite differential (diff/2 in each direction + used to compute the differential :return: f_xx, f_xy, f_yx, f_yy """ - alpha_ra_dx, alpha_dec_dx = self.alpha(x + diff/2, y, kwargs, k=k) - alpha_ra_dy, alpha_dec_dy = self.alpha(x, y + diff/2, kwargs, k=k) + alpha_ra_dx, alpha_dec_dx = self.alpha(x + diff / 2, y, kwargs, k=k) + alpha_ra_dy, alpha_dec_dy = self.alpha(x, y + diff / 2, kwargs, k=k) - alpha_ra_dx_, alpha_dec_dx_ = self.alpha(x - diff/2, y, kwargs, k=k) - alpha_ra_dy_, alpha_dec_dy_ = self.alpha(x, y - diff/2, kwargs, k=k) + alpha_ra_dx_, alpha_dec_dx_ = self.alpha(x - diff / 2, y, kwargs, k=k) + alpha_ra_dy_, alpha_dec_dy_ = self.alpha(x, y - diff / 2, kwargs, k=k) dalpha_rara = (alpha_ra_dx - alpha_ra_dx_) / diff dalpha_radec = (alpha_ra_dy - alpha_ra_dy_) / diff @@ -409,19 +491,21 @@ def _hessian_differential_cross(self, x, y, kwargs, k=None, diff=0.00001): return f_xx, f_xy, f_yx, f_yy def _hessian_differential_square(self, x, y, kwargs, k=None, diff=0.00001): - """ - computes the numerical differentials over a finite range for f_xx, f_yy, f_xy from f_x and f_y - The differentials are computed on the square around (x, y). This minimizes curl. + """Computes the numerical differentials over a finite range for f_xx, f_yy, f_xy + from f_x and f_y The differentials are computed on the square around (x, y). + This minimizes curl. :param x: x-coordinate :param y: y-coordinate :param kwargs: lens model keyword argument list - :param k: int, list of booleans or None, indicating a subset of lens models to be evaluated - :param diff: float, scale of the finite differential (diff/2 in each direction used to compute the differential + :param k: int, list of booleans or None, indicating a subset of lens models to + be evaluated + :param diff: float, scale of the finite differential (diff/2 in each direction + used to compute the differential :return: f_xx, f_xy, f_yx, f_yy """ - alpha_ra_pp, alpha_dec_pp = self.alpha(x + diff/2, y + diff/2, kwargs, k=k) - alpha_ra_pn, alpha_dec_pn = self.alpha(x + diff/2, y - diff/2, kwargs, k=k) + alpha_ra_pp, alpha_dec_pp = self.alpha(x + diff / 2, y + diff / 2, kwargs, k=k) + alpha_ra_pn, alpha_dec_pn = self.alpha(x + diff / 2, y - diff / 2, kwargs, k=k) alpha_ra_np, alpha_dec_np = self.alpha(x - diff / 2, y + diff / 2, kwargs, k=k) alpha_ra_nn, alpha_dec_nn = self.alpha(x - diff / 2, y - diff / 2, kwargs, k=k) diff --git a/lenstronomy/LensModel/lens_model_extensions.py b/lenstronomy/LensModel/lens_model_extensions.py index 13ab3b164..64c62b3be 100644 --- a/lenstronomy/LensModel/lens_model_extensions.py +++ b/lenstronomy/LensModel/lens_model_extensions.py @@ -2,15 +2,13 @@ import lenstronomy.Util.util as util from lenstronomy.Util.magnification_finite_util import setup_mag_finite -__all__ = ['LensModelExtensions'] +__all__ = ["LensModelExtensions"] class LensModelExtensions(object): - """ - class with extension routines not part of the LensModel core routines - """ - def __init__(self, lensModel): + """Class with extension routines not part of the LensModel core routines.""" + def __init__(self, lensModel): """ :param lensModel: instance of the LensModel() class, or with same functionalities. In particular, the following definitions are required to execute all functionalities presented in this class: @@ -23,21 +21,35 @@ def hessian() """ self._lensModel = lensModel - def magnification_finite_adaptive(self, x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source, - cosmo=None, grid_resolution=None, - grid_radius_arcsec=None, axis_ratio=0.5, - tol=0.001, step_size=0.05, - use_largest_eigenvalue=True, - source_light_model='SINGLE_GAUSSIAN', - dx=None, dy=None, size_scale=None, amp_scale=None, - fixed_aperture_size=False): - """ - This method computes image magnifications with a finite-size background source assuming a Gaussian or a - double Gaussian source light profile. It can be much faster that magnification_finite for lens models with many - deflectors and a compact source. This is because most pixels in a rectangular window around a lensed - image of a compact source do not map onto the source, and therefore don't contribute to the integrated flux in - the image plane. + def magnification_finite_adaptive( + self, + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=None, + grid_resolution=None, + grid_radius_arcsec=None, + axis_ratio=0.5, + tol=0.001, + step_size=0.05, + use_largest_eigenvalue=True, + source_light_model="SINGLE_GAUSSIAN", + dx=None, + dy=None, + size_scale=None, + amp_scale=None, + fixed_aperture_size=False, + ): + """This method computes image magnifications with a finite-size background + source assuming a Gaussian or a double Gaussian source light profile. It can be + much faster that magnification_finite for lens models with many deflectors and a + compact source. This is because most pixels in a rectangular window around a + lensed image of a compact source do not map onto the source, and therefore don't + contribute to the integrated flux in the image plane. Rather than ray tracing through a rectangular grid, this routine accelerates the computation of image magnifications with finite-size sources by ray tracing through an elliptical region oriented such that @@ -87,18 +99,28 @@ def magnification_finite_adaptive(self, x_image, y_image, source_x, source_y, kw :return: an array of image magnifications """ - grid_x_0, grid_y_0, source_model, kwargs_source, grid_resolution, grid_radius_arcsec = setup_mag_finite(cosmo, - self._lensModel, - grid_radius_arcsec, - grid_resolution, - source_fwhm_parsec, - source_light_model, - z_source, - source_x, - source_y, - dx, dy, - amp_scale, - size_scale) + ( + grid_x_0, + grid_y_0, + source_model, + kwargs_source, + grid_resolution, + grid_radius_arcsec, + ) = setup_mag_finite( + cosmo, + self._lensModel, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, + ) grid_x_0, grid_y_0 = grid_x_0.ravel(), grid_y_0.ravel() minimum_magnification = 1e-5 @@ -106,11 +128,12 @@ def magnification_finite_adaptive(self, x_image, y_image, source_x, source_y, kw magnifications = [] for xi, yi in zip(x_image, y_image): - if axis_ratio == 1: grid_r = np.hypot(grid_x_0, grid_y_0) else: - w1, w2, v11, v12, v21, v22 = self.hessian_eigenvectors(xi, yi, kwargs_lens) + w1, w2, v11, v12, v21, v22 = self.hessian_eigenvectors( + xi, yi, kwargs_lens + ) _v = [np.array([v11, v12]), np.array([v21, v22])] _w = [abs(w1), abs(w2)] if use_largest_eigenvalue: @@ -137,15 +160,27 @@ def magnification_finite_adaptive(self, x_image, y_image, source_x, source_y, kw r_max = grid_radius_arcsec else: r_max = step - magnification_current = 0. + magnification_current = 0.0 while True: - - flux_array = self._magnification_adaptive_iteration(flux_array, xi, yi, grid_x_0, grid_y_0, grid_r, - r_min, r_max, self._lensModel, kwargs_lens, - source_model, kwargs_source) - new_magnification = np.sum(flux_array) * grid_resolution ** 2 - diff = abs(new_magnification - magnification_current) / new_magnification + flux_array = self._magnification_adaptive_iteration( + flux_array, + xi, + yi, + grid_x_0, + grid_y_0, + grid_r, + r_min, + r_max, + self._lensModel, + kwargs_lens, + source_model, + kwargs_source, + ) + new_magnification = np.sum(flux_array) * grid_resolution**2 + diff = ( + abs(new_magnification - magnification_current) / new_magnification + ) if r_max >= grid_radius_arcsec: break @@ -161,11 +196,23 @@ def magnification_finite_adaptive(self, x_image, y_image, source_x, source_y, kw return np.array(magnifications) @staticmethod - def _magnification_adaptive_iteration(flux_array, x_image, y_image, grid_x, grid_y, grid_r, r_min, r_max, - lensModel, kwargs_lens, source_model, kwargs_source): - """ - This function computes the surface brightness of coordinates in 'flux_array' that satisfy r_min < grid_r < r_max, - where each coordinate in grid_r corresponds to a certain entry in flux_array. Likewise, grid_x, and grid_y + def _magnification_adaptive_iteration( + flux_array, + x_image, + y_image, + grid_x, + grid_y, + grid_r, + r_min, + r_max, + lensModel, + kwargs_lens, + source_model, + kwargs_source, + ): + """This function computes the surface brightness of coordinates in 'flux_array' + that satisfy r_min < grid_r < r_max, where each coordinate in grid_r corresponds + to a certain entry in flux_array. Likewise, grid_x, and grid_y. :param flux_array: an array that contains the flux in each pixel :param x_image: image x coordinate @@ -179,8 +226,8 @@ def _magnification_adaptive_iteration(flux_array, x_image, y_image, grid_x, grid :param kwargs_lens: keywords for the lens model :param source_model: an instance of LightModel :param kwargs_source: keywords for the light model - :return: the flux array where the surface brightness has been computed for all pixels - with r_min < grid_r < r_max. + :return: the flux array where the surface brightness has been computed for all + pixels with r_min < grid_r < r_max. """ condition1 = grid_r >= r_min @@ -197,29 +244,38 @@ def _magnification_adaptive_iteration(flux_array, x_image, y_image, grid_x, grid return flux_array - def magnification_finite(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, grid_number=100, - polar_grid=False, aspect_ratio=0.5): - """ - returns the magnification of an extended source with Gaussian light profile - :param x_pos: x-axis positons of point sources - :param y_pos: y-axis position of point sources - :param kwargs_lens: lens model kwargs - :param source_sigma: Gaussian sigma in arc sec in source - :param window_size: size of window to compute the finite flux - :param grid_number: number of grid cells per axis in the window to numerically compute the flux - :return: numerically computed brightness of the sources - """ + def magnification_finite( + self, + x_pos, + y_pos, + kwargs_lens, + source_sigma=0.003, + window_size=0.1, + grid_number=100, + polar_grid=False, + aspect_ratio=0.5, + ): + """Returns the magnification of an extended source with Gaussian light profile + :param x_pos: x-axis positons of point sources :param y_pos: y-axis position of + point sources :param kwargs_lens: lens model kwargs :param source_sigma: + Gaussian sigma in arc sec in source :param window_size: size of window to + compute the finite flux :param grid_number: number of grid cells per axis in the + window to numerically compute the flux :return: numerically computed brightness + of the sources.""" mag_finite = np.zeros_like(x_pos) - deltaPix = float(window_size)/grid_number + deltaPix = float(window_size) / grid_number from lenstronomy.LightModel.Profiles.gaussian import Gaussian + quasar = Gaussian() - x_grid, y_grid = util.make_grid(numPix=grid_number, deltapix=deltaPix, subgrid_res=1) + x_grid, y_grid = util.make_grid( + numPix=grid_number, deltapix=deltaPix, subgrid_res=1 + ) if polar_grid is True: - a = window_size*0.5 - b = window_size*0.5*aspect_ratio - ellipse_inds = (x_grid*a**-1) ** 2 + (y_grid*b**-1) ** 2 <= 1 + a = window_size * 0.5 + b = window_size * 0.5 * aspect_ratio + ellipse_inds = (x_grid * a**-1) ** 2 + (y_grid * b**-1) ** 2 <= 1 x_grid, y_grid = x_grid[ellipse_inds], y_grid[ellipse_inds] for i in range(len(x_pos)): @@ -233,16 +289,27 @@ def magnification_finite(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, wi else: xcoord, ycoord = x_grid, y_grid - betax, betay = self._lensModel.ray_shooting(xcoord + ra, ycoord + dec, kwargs_lens) + betax, betay = self._lensModel.ray_shooting( + xcoord + ra, ycoord + dec, kwargs_lens + ) - I_image = quasar.function(betax, betay, 1., source_sigma, center_x, center_y) + I_image = quasar.function( + betax, betay, 1.0, source_sigma, center_x, center_y + ) mag_finite[i] = np.sum(I_image) * deltaPix**2 return mag_finite - def zoom_source(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, grid_number=100, - shape="GAUSSIAN"): - """ - computes the surface brightness on an image with a zoomed window + def zoom_source( + self, + x_pos, + y_pos, + kwargs_lens, + source_sigma=0.003, + window_size=0.1, + grid_number=100, + shape="GAUSSIAN", + ): + """Computes the surface brightness on an image with a zoomed window. :param x_pos: angular coordinate of center of image :param y_pos: angular coordinate of center of image @@ -254,21 +321,35 @@ def zoom_source(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size :return: 2d numpy array """ deltaPix = float(window_size) / grid_number - if shape == 'GAUSSIAN': + if shape == "GAUSSIAN": from lenstronomy.LightModel.Profiles.gaussian import Gaussian + quasar = Gaussian() - elif shape == 'TORUS': + elif shape == "TORUS": import lenstronomy.LightModel.Profiles.ellipsoid as quasar else: - raise ValueError("shape %s not valid for finite magnification computation!" % shape) - x_grid, y_grid = util.make_grid(numPix=grid_number, deltapix=deltaPix, subgrid_res=1) + raise ValueError( + "shape %s not valid for finite magnification computation!" % shape + ) + x_grid, y_grid = util.make_grid( + numPix=grid_number, deltapix=deltaPix, subgrid_res=1 + ) center_x, center_y = self._lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) - betax, betay = self._lensModel.ray_shooting(x_grid + x_pos, y_grid + y_pos, kwargs_lens) - image = quasar.function(betax, betay, 1., source_sigma, center_x, center_y) + betax, betay = self._lensModel.ray_shooting( + x_grid + x_pos, y_grid + y_pos, kwargs_lens + ) + image = quasar.function(betax, betay, 1.0, source_sigma, center_x, center_y) return util.array2image(image) - def critical_curve_tiling(self, kwargs_lens, compute_window=5, start_scale=0.5, max_order=10, center_x=0, - center_y=0): + def critical_curve_tiling( + self, + kwargs_lens, + compute_window=5, + start_scale=0.5, + max_order=10, + center_x=0, + center_y=0, + ): """ :param kwargs_lens: lens model keyword argument list @@ -281,46 +362,75 @@ def critical_curve_tiling(self, kwargs_lens, compute_window=5, start_scale=0.5, :return: list of positions representing coordinates of the critical curve (in RA and DEC) """ numPix = int(compute_window / start_scale) - x_grid_init, y_grid_init = util.make_grid(numPix, deltapix=start_scale, subgrid_res=1) + x_grid_init, y_grid_init = util.make_grid( + numPix, deltapix=start_scale, subgrid_res=1 + ) x_grid_init += center_x y_grid_init += center_y - mag_init = util.array2image(self._lensModel.magnification(x_grid_init, y_grid_init, kwargs_lens)) + mag_init = util.array2image( + self._lensModel.magnification(x_grid_init, y_grid_init, kwargs_lens) + ) x_grid_init = util.array2image(x_grid_init) y_grid_init = util.array2image(y_grid_init) ra_crit_list = [] dec_crit_list = [] # iterate through original triangles and return ra_crit, dec_crit list - for i in range(numPix-1): - for j in range(numPix-1): + for i in range(numPix - 1): + for j in range(numPix - 1): edge1 = [x_grid_init[i, j], y_grid_init[i, j], mag_init[i, j]] - edge2 = [x_grid_init[i+1, j+1], y_grid_init[i+1, j+1], mag_init[i+1, j+1]] - edge_90_1 = [x_grid_init[i, j+1], y_grid_init[i, j+1], mag_init[i, j+1]] - edge_90_2 = [x_grid_init[i+1, j], y_grid_init[i+1, j], mag_init[i+1, j]] - ra_crit, dec_crit = self._tiling_crit(edge1, edge2, edge_90_1, max_order=max_order, - kwargs_lens=kwargs_lens) + edge2 = [ + x_grid_init[i + 1, j + 1], + y_grid_init[i + 1, j + 1], + mag_init[i + 1, j + 1], + ] + edge_90_1 = [ + x_grid_init[i, j + 1], + y_grid_init[i, j + 1], + mag_init[i, j + 1], + ] + edge_90_2 = [ + x_grid_init[i + 1, j], + y_grid_init[i + 1, j], + mag_init[i + 1, j], + ] + ra_crit, dec_crit = self._tiling_crit( + edge1, + edge2, + edge_90_1, + max_order=max_order, + kwargs_lens=kwargs_lens, + ) ra_crit_list += ra_crit # list addition dec_crit_list += dec_crit # list addition - ra_crit, dec_crit = self._tiling_crit(edge1, edge2, edge_90_2, max_order=max_order, - kwargs_lens=kwargs_lens) + ra_crit, dec_crit = self._tiling_crit( + edge1, + edge2, + edge_90_2, + max_order=max_order, + kwargs_lens=kwargs_lens, + ) ra_crit_list += ra_crit # list addition dec_crit_list += dec_crit # list addition return np.array(ra_crit_list), np.array(dec_crit_list) def caustic_area(self, kwargs_lens, kwargs_caustic_num, index_vertices=0): - """ - computes the area inside a connected caustic curve + """Computes the area inside a connected caustic curve. :param kwargs_lens: lens model keyword argument list - :param kwargs_caustic_num: keyword arguments for the numerical calculation of the caustics, as input of - self.critical_curve_caustics() - :param index_vertices: integer, index of connected vortex from the output of self.critical_curve_caustics() - of disconnected curves. + :param kwargs_caustic_num: keyword arguments for the numerical calculation of + the caustics, as input of self.critical_curve_caustics() + :param index_vertices: integer, index of connected vortex from the output of + self.critical_curve_caustics() of disconnected curves. :return: area within the caustic curve selected """ - ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = self.critical_curve_caustics(kwargs_lens, - **kwargs_caustic_num) + ( + ra_crit_list, + dec_crit_list, + ra_caustic_list, + dec_caustic_list, + ) = self.critical_curve_caustics(kwargs_lens, **kwargs_caustic_num) # select specific vortex ra_caustic_inner = ra_caustic_list[index_vertices] @@ -334,8 +444,7 @@ def caustic_area(self, kwargs_lens, kwargs_caustic_num, index_vertices=0): return a def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens): - """ - tiles a rectangular triangle and compares the signs of the magnification + """Tiles a rectangular triangle and compares the signs of the magnification. :param edge1: [ra_coord, dec_coord, magnification] :param edge2: [ra_coord, dec_coord, magnification] @@ -348,7 +457,9 @@ def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens): ra_2, dec_2, mag_2 = edge2 ra_3, dec_3, mag_3 = edge_90 sign_list = np.sign([mag_1, mag_2, mag_3]) - if sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2]: # if all signs are the same + if ( + sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2] + ): # if all signs are the same return [], [] else: # split triangle along the long axis @@ -357,22 +468,36 @@ def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens): # if max depth has been reached, return the mean value in the triangle max_order -= 1 if max_order <= 0: - return [(ra_1 + ra_2 + ra_3)/3], [(dec_1 + dec_2 + dec_3)/3] + return [(ra_1 + ra_2 + ra_3) / 3], [(dec_1 + dec_2 + dec_3) / 3] else: # split triangle - ra_90_ = (ra_1 + ra_2)/2 # find point in the middle of the long axis to split triangle - dec_90_ = (dec_1 + dec_2)/2 + ra_90_ = ( + ra_1 + ra_2 + ) / 2 # find point in the middle of the long axis to split triangle + dec_90_ = (dec_1 + dec_2) / 2 mag_90_ = self._lensModel.magnification(ra_90_, dec_90_, kwargs_lens) edge_90_ = [ra_90_, dec_90_, mag_90_] - ra_crit, dec_crit = self._tiling_crit(edge1=edge_90, edge2=edge1, edge_90=edge_90_, max_order=max_order, - kwargs_lens=kwargs_lens) - ra_crit_2, dec_crit_2 = self._tiling_crit(edge1=edge_90, edge2=edge2, edge_90=edge_90_, max_order=max_order, - kwargs_lens=kwargs_lens) + ra_crit, dec_crit = self._tiling_crit( + edge1=edge_90, + edge2=edge1, + edge_90=edge_90_, + max_order=max_order, + kwargs_lens=kwargs_lens, + ) + ra_crit_2, dec_crit_2 = self._tiling_crit( + edge1=edge_90, + edge2=edge2, + edge_90=edge_90_, + max_order=max_order, + kwargs_lens=kwargs_lens, + ) ra_crit += ra_crit_2 dec_crit += dec_crit_2 return ra_crit, dec_crit - def critical_curve_caustics(self, kwargs_lens, compute_window=5, grid_scale=0.01, center_x=0, center_y=0): + def critical_curve_caustics( + self, kwargs_lens, compute_window=5, grid_scale=0.01, center_x=0, center_y=0 + ): """ :param kwargs_lens: lens model kwargs @@ -386,10 +511,14 @@ def critical_curve_caustics(self, kwargs_lens, compute_window=5, grid_scale=0.01 num_pix = int(compute_window / grid_scale) if num_pix % 2 == 1: num_pix += 1 - x_grid_high_res, y_grid_high_res = util.make_grid(num_pix, deltapix=grid_scale, subgrid_res=1) + x_grid_high_res, y_grid_high_res = util.make_grid( + num_pix, deltapix=grid_scale, subgrid_res=1 + ) x_grid_high_res += center_x y_grid_high_res += center_y - mag_high_res = util.array2image(self._lensModel.magnification(x_grid_high_res, y_grid_high_res, kwargs_lens)) + mag_high_res = util.array2image( + self._lensModel.magnification(x_grid_high_res, y_grid_high_res, kwargs_lens) + ) ra_crit_list = [] dec_crit_list = [] @@ -398,22 +527,28 @@ def critical_curve_caustics(self, kwargs_lens, compute_window=5, grid_scale=0.01 # Import moved here to avoid import-time exception if skimage is missing from skimage.measure import find_contours - paths = find_contours(1/mag_high_res, 0.) + + paths = find_contours(1 / mag_high_res, 0.0) for i, v in enumerate(paths): # x, y changed because of skimage conventions - ra_points = v[:, 1] * grid_scale - grid_scale * (num_pix-1)/2. + center_x - dec_points = v[:, 0] * grid_scale - grid_scale * (num_pix-1)/2. + center_y + ra_points = ( + v[:, 1] * grid_scale - grid_scale * (num_pix - 1) / 2.0 + center_x + ) + dec_points = ( + v[:, 0] * grid_scale - grid_scale * (num_pix - 1) / 2.0 + center_y + ) ra_crit_list.append(ra_points) dec_crit_list.append(dec_points) - ra_caustics, dec_caustics = self._lensModel.ray_shooting(ra_points, dec_points, kwargs_lens) + ra_caustics, dec_caustics = self._lensModel.ray_shooting( + ra_points, dec_points, kwargs_lens + ) ra_caustic_list.append(ra_caustics) dec_caustic_list.append(dec_caustics) return ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list def hessian_eigenvectors(self, x, y, kwargs_lens, diff=None): - """ - computes magnification eigenvectors at position (x, y) + """Computes magnification eigenvectors at position (x, y) :param x: x-position :param y: y-position @@ -422,12 +557,19 @@ def hessian_eigenvectors(self, x, y, kwargs_lens, diff=None): """ f_xx, f_xy, f_yx, f_yy = self._lensModel.hessian(x, y, kwargs_lens, diff=diff) if isinstance(x, int) or isinstance(x, float): - A = np.array([[1-f_xx, f_xy], [f_yx, 1-f_yy]]) + A = np.array([[1 - f_xx, f_xy], [f_yx, 1 - f_yy]]) w, v = np.linalg.eig(A) v11, v12, v21, v22 = v[0, 0], v[0, 1], v[1, 0], v[1, 1] w1, w2 = w[0], w[1] else: - w1, w2, v11, v12, v21, v22 = np.empty(len(x), dtype=float), np.empty(len(x), dtype=float), np.empty_like(x), np.empty_like(x), np.empty_like(x), np.empty_like(x) + w1, w2, v11, v12, v21, v22 = ( + np.empty(len(x), dtype=float), + np.empty(len(x), dtype=float), + np.empty_like(x), + np.empty_like(x), + np.empty_like(x), + np.empty_like(x), + ) for i in range(len(x)): A = np.array([[1 - f_xx[i], f_xy[i]], [f_yx[i], 1 - f_yy[i]]]) w, v = np.linalg.eig(A) @@ -435,10 +577,17 @@ def hessian_eigenvectors(self, x, y, kwargs_lens, diff=None): v11[i], v12[i], v21[i], v22[i] = v[0, 0], v[0, 1], v[1, 0], v[1, 1] return w1, w2, v11, v12, v21, v22 - def radial_tangential_stretch(self, x, y, kwargs_lens, diff=None, ra_0=0, dec_0=0, - coordinate_frame_definitions=False): - """ - computes the radial and tangential stretches at a given position + def radial_tangential_stretch( + self, + x, + y, + kwargs_lens, + diff=None, + ra_0=0, + dec_0=0, + coordinate_frame_definitions=False, + ): + """Computes the radial and tangential stretches at a given position. :param x: x-position :param y: y-position @@ -446,184 +595,332 @@ def radial_tangential_stretch(self, x, y, kwargs_lens, diff=None, ra_0=0, dec_0= :param diff: float or None, finite average differential scale :return: radial stretch, tangential stretch """ - w1, w2, v11, v12, v21, v22 = self.hessian_eigenvectors(x, y, kwargs_lens, diff=diff) + w1, w2, v11, v12, v21, v22 = self.hessian_eigenvectors( + x, y, kwargs_lens, diff=diff + ) v_x, v_y = x - ra_0, y - dec_0 - prod_v1 = v_x*v11 + v_y*v12 - prod_v2 = v_x*v21 + v_y*v22 + prod_v1 = v_x * v11 + v_y * v12 + prod_v2 = v_x * v21 + v_y * v22 if isinstance(x, int) or isinstance(x, float): - if (coordinate_frame_definitions is True and abs(prod_v1) >= abs(prod_v2)) or (coordinate_frame_definitions is False and w1 >= w2): - lambda_rad = 1. / w1 - lambda_tan = 1. / w2 + if ( + coordinate_frame_definitions is True and abs(prod_v1) >= abs(prod_v2) + ) or (coordinate_frame_definitions is False and w1 >= w2): + lambda_rad = 1.0 / w1 + lambda_tan = 1.0 / w2 v1_rad, v2_rad = v11, v12 v1_tan, v2_tan = v21, v22 prod_r = prod_v1 else: - lambda_rad = 1. / w2 - lambda_tan = 1. / w1 + lambda_rad = 1.0 / w2 + lambda_tan = 1.0 / w1 v1_rad, v2_rad = v21, v22 v1_tan, v2_tan = v11, v12 prod_r = prod_v2 if prod_r < 0: # if radial eigenvector points towards the center v1_rad, v2_rad = -v1_rad, -v2_rad - if v1_rad * v2_tan - v2_rad * v1_tan < 0: # cross product defines orientation of the tangential eigenvector + if ( + v1_rad * v2_tan - v2_rad * v1_tan < 0 + ): # cross product defines orientation of the tangential eigenvector v1_tan *= -1 v2_tan *= -1 else: - lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan = np.empty(len(x), dtype=float), np.empty(len(x), dtype=float), np.empty_like(x), np.empty_like(x), np.empty_like(x), np.empty_like(x) + lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan = ( + np.empty(len(x), dtype=float), + np.empty(len(x), dtype=float), + np.empty_like(x), + np.empty_like(x), + np.empty_like(x), + np.empty_like(x), + ) for i in range(len(x)): - if (coordinate_frame_definitions is True and abs(prod_v1[i]) >= abs(prod_v2[i])) or ( - coordinate_frame_definitions is False and w1[i] >= w2[i]): - # if w1[i] > w2[i]: - lambda_rad[i] = 1. / w1[i] - lambda_tan[i] = 1. / w2[i] + if ( + coordinate_frame_definitions is True + and abs(prod_v1[i]) >= abs(prod_v2[i]) + ) or (coordinate_frame_definitions is False and w1[i] >= w2[i]): + # if w1[i] > w2[i]: + lambda_rad[i] = 1.0 / w1[i] + lambda_tan[i] = 1.0 / w2[i] v1_rad[i], v2_rad[i] = v11[i], v12[i] v1_tan[i], v2_tan[i] = v21[i], v22[i] prod_r = prod_v1[i] else: - lambda_rad[i] = 1. / w2[i] - lambda_tan[i] = 1. / w1[i] + lambda_rad[i] = 1.0 / w2[i] + lambda_tan[i] = 1.0 / w1[i] v1_rad[i], v2_rad[i] = v21[i], v22[i] v1_tan[i], v2_tan[i] = v11[i], v12[i] prod_r = prod_v2[i] if prod_r < 0: # if radial eigenvector points towards the center v1_rad[i], v2_rad[i] = -v1_rad[i], -v2_rad[i] - if v1_rad[i] * v2_tan[i] - v2_rad[i] * v1_tan[i] < 0: # cross product defines orientation of the tangential eigenvector + if ( + v1_rad[i] * v2_tan[i] - v2_rad[i] * v1_tan[i] < 0 + ): # cross product defines orientation of the tangential eigenvector v1_tan[i] *= -1 v2_tan[i] *= -1 return lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan - def radial_tangential_differentials(self, x, y, kwargs_lens, center_x=0, center_y=0, smoothing_3rd=0.001, - smoothing_2nd=None): - """ - computes the differentials in stretches and directions + def radial_tangential_differentials( + self, + x, + y, + kwargs_lens, + center_x=0, + center_y=0, + smoothing_3rd=0.001, + smoothing_2nd=None, + ): + """Computes the differentials in stretches and directions. :param x: x-position :param y: y-position :param kwargs_lens: lens model keyword arguments - :param center_x: x-coord of center towards which the rotation direction is defined - :param center_y: x-coord of center towards which the rotation direction is defined - :param smoothing_3rd: finite differential length of third order in units of angle - :param smoothing_2nd: float or None, finite average differential scale of Hessian + :param center_x: x-coord of center towards which the rotation direction is + defined + :param center_y: x-coord of center towards which the rotation direction is + defined + :param smoothing_3rd: finite differential length of third order in units of + angle + :param smoothing_2nd: float or None, finite average differential scale of + Hessian :return: """ - lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan = self.radial_tangential_stretch(x, y, kwargs_lens, - diff=smoothing_2nd, - ra_0=center_x, dec_0=center_y, - coordinate_frame_definitions=True) + ( + lambda_rad, + lambda_tan, + v1_rad, + v2_rad, + v1_tan, + v2_tan, + ) = self.radial_tangential_stretch( + x, + y, + kwargs_lens, + diff=smoothing_2nd, + ra_0=center_x, + dec_0=center_y, + coordinate_frame_definitions=True, + ) x0 = x - center_x y0 = y - center_y # computing angle of tangential vector in regard to the defined coordinate center - cos_angle = (v1_tan * x0 + v2_tan * y0) / np.sqrt((x0 ** 2 + y0 ** 2) * (v1_tan ** 2 + v2_tan ** 2)) # * np.sign(v1_tan * y0 - v2_tan * x0) + cos_angle = (v1_tan * x0 + v2_tan * y0) / np.sqrt( + (x0**2 + y0**2) * (v1_tan**2 + v2_tan**2) + ) # * np.sign(v1_tan * y0 - v2_tan * x0) orientation_angle = np.arccos(cos_angle) - np.pi / 2 # computing differentials in tangential and radial directions dx_tan = x + smoothing_3rd * v1_tan dy_tan = y + smoothing_3rd * v2_tan - lambda_rad_dtan, lambda_tan_dtan, v1_rad_dtan, v2_rad_dtan, v1_tan_dtan, v2_tan_dtan = self.radial_tangential_stretch(dx_tan, dy_tan, kwargs_lens, diff=smoothing_2nd, - ra_0=center_x, dec_0=center_y, coordinate_frame_definitions=True) + ( + lambda_rad_dtan, + lambda_tan_dtan, + v1_rad_dtan, + v2_rad_dtan, + v1_tan_dtan, + v2_tan_dtan, + ) = self.radial_tangential_stretch( + dx_tan, + dy_tan, + kwargs_lens, + diff=smoothing_2nd, + ra_0=center_x, + dec_0=center_y, + coordinate_frame_definitions=True, + ) dx_rad = x + smoothing_3rd * v1_rad dy_rad = y + smoothing_3rd * v2_rad - lambda_rad_drad, lambda_tan_drad, v1_rad_drad, v2_rad_drad, v1_tan_drad, v2_tan_drad = self.radial_tangential_stretch( - dx_rad, dy_rad, kwargs_lens, diff=smoothing_2nd, ra_0=center_x, dec_0=center_y, coordinate_frame_definitions=True) + ( + lambda_rad_drad, + lambda_tan_drad, + v1_rad_drad, + v2_rad_drad, + v1_tan_drad, + v2_tan_drad, + ) = self.radial_tangential_stretch( + dx_rad, + dy_rad, + kwargs_lens, + diff=smoothing_2nd, + ra_0=center_x, + dec_0=center_y, + coordinate_frame_definitions=True, + ) # eigenvalue differentials in tangential and radial direction - dlambda_tan_dtan = (lambda_tan_dtan - lambda_tan) / smoothing_3rd # * np.sign(v1_tan * y0 - v2_tan * x0) - dlambda_tan_drad = (lambda_tan_drad - lambda_tan) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) - dlambda_rad_drad = (lambda_rad_drad - lambda_rad) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) - dlambda_rad_dtan = (lambda_rad_dtan - lambda_rad) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) + dlambda_tan_dtan = ( + lambda_tan_dtan - lambda_tan + ) / smoothing_3rd # * np.sign(v1_tan * y0 - v2_tan * x0) + dlambda_tan_drad = ( + lambda_tan_drad - lambda_tan + ) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) + dlambda_rad_drad = ( + lambda_rad_drad - lambda_rad + ) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) + dlambda_rad_dtan = ( + lambda_rad_dtan - lambda_rad + ) / smoothing_3rd # * np.sign(v1_rad * x0 + v2_rad * y0) # eigenvector direction differentials in tangential and radial direction - cos_dphi_tan_dtan = v1_tan * v1_tan_dtan + v2_tan * v2_tan_dtan # / (np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt(v1_tan_dtan**2 + v2_tan_dtan**2)) - norm = np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt(v1_tan_dtan**2 + v2_tan_dtan**2) + cos_dphi_tan_dtan = ( + v1_tan * v1_tan_dtan + v2_tan * v2_tan_dtan + ) # / (np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt(v1_tan_dtan**2 + v2_tan_dtan**2)) + norm = np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt( + v1_tan_dtan**2 + v2_tan_dtan**2 + ) cos_dphi_tan_dtan /= norm arc_cos_dphi_tan_dtan = np.arccos(np.abs(np.minimum(cos_dphi_tan_dtan, 1))) dphi_tan_dtan = arc_cos_dphi_tan_dtan / smoothing_3rd - cos_dphi_tan_drad = v1_tan * v1_tan_drad + v2_tan * v2_tan_drad # / (np.sqrt(v1_tan ** 2 + v2_tan ** 2) * np.sqrt(v1_tan_drad ** 2 + v2_tan_drad ** 2)) - norm = np.sqrt(v1_tan ** 2 + v2_tan ** 2) * np.sqrt(v1_tan_drad ** 2 + v2_tan_drad ** 2) + cos_dphi_tan_drad = ( + v1_tan * v1_tan_drad + v2_tan * v2_tan_drad + ) # / (np.sqrt(v1_tan ** 2 + v2_tan ** 2) * np.sqrt(v1_tan_drad ** 2 + v2_tan_drad ** 2)) + norm = np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt( + v1_tan_drad**2 + v2_tan_drad**2 + ) cos_dphi_tan_drad /= norm arc_cos_dphi_tan_drad = np.arccos(np.abs(np.minimum(cos_dphi_tan_drad, 1))) dphi_tan_drad = arc_cos_dphi_tan_drad / smoothing_3rd - cos_dphi_rad_drad = v1_rad * v1_rad_drad + v2_rad * v2_rad_drad # / (np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt(v1_rad_drad**2 + v2_rad_drad**2)) - norm = np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt(v1_rad_drad**2 + v2_rad_drad**2) + cos_dphi_rad_drad = ( + v1_rad * v1_rad_drad + v2_rad * v2_rad_drad + ) # / (np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt(v1_rad_drad**2 + v2_rad_drad**2)) + norm = np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt( + v1_rad_drad**2 + v2_rad_drad**2 + ) cos_dphi_rad_drad /= norm cos_dphi_rad_drad = np.minimum(cos_dphi_rad_drad, 1) dphi_rad_drad = np.arccos(cos_dphi_rad_drad) / smoothing_3rd - cos_dphi_rad_dtan = v1_rad * v1_rad_dtan + v2_rad * v2_rad_dtan # / (np.sqrt(v1_rad ** 2 + v2_rad ** 2) * np.sqrt(v1_rad_dtan ** 2 + v2_rad_dtan ** 2)) - norm = np.sqrt(v1_rad ** 2 + v2_rad ** 2) * np.sqrt(v1_rad_dtan ** 2 + v2_rad_dtan ** 2) + cos_dphi_rad_dtan = ( + v1_rad * v1_rad_dtan + v2_rad * v2_rad_dtan + ) # / (np.sqrt(v1_rad ** 2 + v2_rad ** 2) * np.sqrt(v1_rad_dtan ** 2 + v2_rad_dtan ** 2)) + norm = np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt( + v1_rad_dtan**2 + v2_rad_dtan**2 + ) cos_dphi_rad_dtan /= norm cos_dphi_rad_dtan = np.minimum(cos_dphi_rad_dtan, 1) dphi_rad_dtan = np.arccos(cos_dphi_rad_dtan) / smoothing_3rd - return lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan - - def curved_arc_estimate(self, x, y, kwargs_lens, smoothing=None, smoothing_3rd=0.001, tan_diff=False): - """ - performs the estimation of the curved arc description at a particular position of an arbitrary lens profile + return ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) + + def curved_arc_estimate( + self, x, y, kwargs_lens, smoothing=None, smoothing_3rd=0.001, tan_diff=False + ): + """Performs the estimation of the curved arc description at a particular + position of an arbitrary lens profile. :param x: float, x-position where the estimate is provided :param y: float, y-position where the estimate is provided :param kwargs_lens: lens model keyword arguments - :param smoothing: (optional) finite differential of second derivative (radial and tangential stretches) - :param smoothing_3rd: differential scale for third derivative to estimate the tangential curvature - :param tan_diff: boolean, if True, also returns the relative tangential stretch differential in tangential direction - :return: keyword argument list corresponding to a CURVED_ARC profile at (x, y) given the initial lens model + :param smoothing: (optional) finite differential of second derivative (radial + and tangential stretches) + :param smoothing_3rd: differential scale for third derivative to estimate the + tangential curvature + :param tan_diff: boolean, if True, also returns the relative tangential stretch + differential in tangential direction + :return: keyword argument list corresponding to a CURVED_ARC profile at (x, y) + given the initial lens model """ - radial_stretch, tangential_stretch, v_rad1, v_rad2, v_tang1, v_tang2 = self.radial_tangential_stretch(x, y, kwargs_lens, diff=smoothing) + ( + radial_stretch, + tangential_stretch, + v_rad1, + v_rad2, + v_tang1, + v_tang2, + ) = self.radial_tangential_stretch(x, y, kwargs_lens, diff=smoothing) dx_tang = x + smoothing_3rd * v_tang1 dy_tang = y + smoothing_3rd * v_tang2 - _, _, _, _, v_tang1_dt, v_tang2_dt = self.radial_tangential_stretch(dx_tang, dy_tang, kwargs_lens, - diff=smoothing) + _, _, _, _, v_tang1_dt, v_tang2_dt = self.radial_tangential_stretch( + dx_tang, dy_tang, kwargs_lens, diff=smoothing + ) d_tang1 = v_tang1_dt - v_tang1 d_tang2 = v_tang2_dt - v_tang2 delta = np.sqrt(d_tang1**2 + d_tang2**2) if delta > 1: d_tang1 = v_tang1_dt + v_tang1 d_tang2 = v_tang2_dt + v_tang2 - delta = np.sqrt(d_tang1 ** 2 + d_tang2 ** 2) + delta = np.sqrt(d_tang1**2 + d_tang2**2) curvature = delta / smoothing_3rd - direction = np.arctan2(v_rad2 * np.sign(v_rad1 * x + v_rad2 * y), v_rad1 * np.sign(v_rad1 * x + v_rad2 * y)) - - kwargs_arc = {'radial_stretch': radial_stretch, - 'tangential_stretch': tangential_stretch, - 'curvature': curvature, - 'direction': direction, - 'center_x': x, 'center_y': y} + direction = np.arctan2( + v_rad2 * np.sign(v_rad1 * x + v_rad2 * y), + v_rad1 * np.sign(v_rad1 * x + v_rad2 * y), + ) + + kwargs_arc = { + "radial_stretch": radial_stretch, + "tangential_stretch": tangential_stretch, + "curvature": curvature, + "direction": direction, + "center_x": x, + "center_y": y, + } if tan_diff: - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = self.radial_tangential_differentials(x, y, kwargs_lens, center_x=0, center_y=0, smoothing_3rd=smoothing_3rd) - kwargs_arc['dtan_dtan'] = dlambda_tan_dtan / lambda_tan + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = self.radial_tangential_differentials( + x, y, kwargs_lens, center_x=0, center_y=0, smoothing_3rd=smoothing_3rd + ) + kwargs_arc["dtan_dtan"] = dlambda_tan_dtan / lambda_tan return kwargs_arc def tangential_average(self, x, y, kwargs_lens, dr, smoothing=None, num_average=9): - """ - computes average tangential stretch around position (x, y) within dr in radial direction + """Computes average tangential stretch around position (x, y) within dr in + radial direction. :param x: x-position (float) :param y: y-position (float) :param kwargs_lens: lens model keyword argument list :param dr: averaging scale in radial direction :param smoothing: smoothing scale of derivative - :param num_average: integer, number of points averaged over within dr in the radial direction + :param num_average: integer, number of points averaged over within dr in the + radial direction :return: """ - radial_stretch, tangential_stretch, v_rad1, v_rad2, v_tang1, v_tang2 = self.radial_tangential_stretch(x, y, - kwargs_lens, - diff=smoothing) - dr_array = np.linspace(start=-dr/2., stop=dr/2., num=num_average) + ( + radial_stretch, + tangential_stretch, + v_rad1, + v_rad2, + v_tang1, + v_tang2, + ) = self.radial_tangential_stretch(x, y, kwargs_lens, diff=smoothing) + dr_array = np.linspace(start=-dr / 2.0, stop=dr / 2.0, num=num_average) dx_r = x + dr_array * v_rad1 dy_r = y + dr_array * v_rad2 - _, tangential_stretch_dr, _, _, _, _ = self.radial_tangential_stretch(dx_r, dy_r, kwargs_lens, diff=smoothing) + _, tangential_stretch_dr, _, _, _, _ = self.radial_tangential_stretch( + dx_r, dy_r, kwargs_lens, diff=smoothing + ) return np.average(tangential_stretch_dr) def curved_arc_finite_area(self, x, y, kwargs_lens, dr): - """ - computes an estimated curved arc over a finite extent mimicking the appearance of a finite source with radius dr + """Computes an estimated curved arc over a finite extent mimicking the + appearance of a finite source with radius dr. :param x: x-position (float) :param y: y-position (float) @@ -641,8 +938,8 @@ def curved_arc_finite_area(self, x, y, kwargs_lens, dr): # loop through curved arc estimate and compute curvature centroid for x_, y_ in zip(x_c, y_c): kwargs_arc_ = self.curved_arc_estimate(x_, y_, kwargs_lens) - direction = kwargs_arc_['direction'] - curvature = kwargs_arc_['curvature'] + direction = kwargs_arc_["direction"] + curvature = kwargs_arc_["curvature"] center_x = x_ - np.cos(direction) / curvature center_y = y_ - np.sin(direction) / curvature c_x_list.append(center_x) @@ -650,7 +947,7 @@ def curved_arc_finite_area(self, x, y, kwargs_lens, dr): center_x, center_y = np.median(c_x_list), np.median(c_y_list) # compute curvature and direction to the average centroid from the position of interest - r = np.sqrt((x - center_x) ** 2 + (y - center_y)**2) + r = np.sqrt((x - center_x) ** 2 + (y - center_y) ** 2) curvature = 1 / r direction = np.arctan2(y - center_y, x - center_x) @@ -662,7 +959,7 @@ def curved_arc_finite_area(self, x, y, kwargs_lens, dr): xs_r, ys_r = self._lensModel.ray_shooting(x_r, y_r, kwargs_lens) xs_r_, ys_r_ = self._lensModel.ray_shooting(x_r_, y_r_, kwargs_lens) - ds = np.sqrt((xs_r - xs_r_)**2 + (ys_r - ys_r_)**2) + ds = np.sqrt((xs_r - xs_r_) ** 2 + (ys_r - ys_r_) ** 2) radial_stretch = (2 * dr) / ds # compute average tangential stretch as the inverse difference in the sosurce position @@ -675,7 +972,12 @@ def curved_arc_finite_area(self, x, y, kwargs_lens, dr): xs_t_, ys_t_ = self._lensModel.ray_shooting(x_t_, y_t_, kwargs_lens) ds = np.sqrt((xs_t - xs_t_) ** 2 + (ys_t - ys_t_) ** 2) tangential_stretch = (2 * dr) / ds - kwargs_arc = {'direction': direction, 'radial_stretch': radial_stretch, - 'tangential_stretch': tangential_stretch, 'center_x': x, 'center_y': y, - 'curvature': curvature} + kwargs_arc = { + "direction": direction, + "radial_stretch": radial_stretch, + "tangential_stretch": tangential_stretch, + "center_x": x, + "center_y": y, + "curvature": curvature, + } return kwargs_arc diff --git a/lenstronomy/LensModel/lens_param.py b/lenstronomy/LensModel/lens_param.py index e848c7b2f..ee518e53c 100644 --- a/lenstronomy/LensModel/lens_param.py +++ b/lenstronomy/LensModel/lens_param.py @@ -1,17 +1,23 @@ from lenstronomy.LensModel.single_plane import SinglePlane import numpy as np -__all__ = ['LensParam'] +__all__ = ["LensParam"] class LensParam(object): - """ - class to handle the lens model parameter - - """ - def __init__(self, lens_model_list, kwargs_fixed, - kwargs_lower=None, kwargs_upper=None, kwargs_logsampling=None, - num_images=0, solver_type='NONE', num_shapelet_lens=0): + """Class to handle the lens model parameter.""" + + def __init__( + self, + lens_model_list, + kwargs_fixed, + kwargs_lower=None, + kwargs_upper=None, + kwargs_logsampling=None, + num_images=0, + solver_type="NONE", + num_shapelet_lens=0, + ): """ :param lens_model_list: list of strings of lens model names @@ -73,33 +79,49 @@ def get_params(self, args, i): param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS_POLAR', 'SHAPELETS_CART'] and name == 'coeffs': + if ( + model in ["SHAPELETS_POLAR", "SHAPELETS_CART"] + and name == "coeffs" + ): num_coeffs = self._num_shapelet_lens - if self._solver_type == 'SHAPELETS' and k == 0: + if self._solver_type == "SHAPELETS" and k == 0: if self._num_images == 4: num_coeffs -= 6 - coeffs = args[i:i + num_coeffs] + coeffs = args[i : i + num_coeffs] coeffs = [0, 0, 0, 0, 0, 0] + list(coeffs[0:]) elif self._num_images == 2: num_coeffs -= 3 - coeffs = args[i:i + num_coeffs] + coeffs = args[i : i + num_coeffs] coeffs = [0, 0, 0] + list(coeffs[0:]) else: raise ValueError("Option for solver_type not valid!") - kwargs['coeffs'] = coeffs + kwargs["coeffs"] = coeffs else: - kwargs['coeffs'] = args[i:i + num_coeffs] + kwargs["coeffs"] = args[i : i + num_coeffs] i += num_coeffs - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'amp': - if 'sigma' in kwargs_fixed: - num_param = len(kwargs_fixed['sigma']) + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "amp" + ): + if "sigma" in kwargs_fixed: + num_param = len(kwargs_fixed["sigma"]) else: - num_param = len(kwargs['sigma']) - kwargs['amp'] = args[i:i + num_param] + num_param = len(kwargs["sigma"]) + kwargs["amp"] = args[i : i + num_param] i += num_param - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'sigma': + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "sigma" + ): raise ValueError("%s must have fixed 'sigma' list!" % model) - elif model in ['INTERPOL', 'INTERPOL_SCALED'] and name in ['f_', 'f_xx', 'f_xy', 'f_yy']: + elif model in ["INTERPOL", "INTERPOL_SCALED"] and name in [ + "f_", + "f_xx", + "f_xy", + "f_yy", + ]: pass else: kwargs[name] = args[i] @@ -108,7 +130,7 @@ def get_params(self, args, i): kwargs[name] = kwargs_fixed[name] if name in kwargs_logsampling and name not in kwargs_fixed: - kwargs[name] = 10**(kwargs[name]) + kwargs[name] = 10 ** (kwargs[name]) kwargs_list.append(kwargs) @@ -130,20 +152,36 @@ def set_params(self, kwargs_list): param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS_POLAR', 'SHAPELETS_CART'] and name == 'coeffs': - coeffs = kwargs['coeffs'] - if self._solver_type == 'SHAPELETS' and k == 0: + if ( + model in ["SHAPELETS_POLAR", "SHAPELETS_CART"] + and name == "coeffs" + ): + coeffs = kwargs["coeffs"] + if self._solver_type == "SHAPELETS" and k == 0: if self._num_images == 4: coeffs = coeffs[6:] elif self._num_images == 2: coeffs = coeffs[3:] args += list(coeffs) - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'amp': - amp = kwargs['amp'] + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "amp" + ): + amp = kwargs["amp"] args += list(amp) - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'sigma': + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "sigma" + ): raise ValueError("%s must have fixed 'sigma' list!" % model) - elif model in ['INTERPOL', 'INTERPOL_SCALED'] and name in ['f_', 'f_xx', 'f_xy', 'f_yy']: + elif model in ["INTERPOL", "INTERPOL_SCALED"] and name in [ + "f_", + "f_xx", + "f_xy", + "f_yy", + ]: pass # elif self._solver_type == 'PROFILE_SHEAR' and k == 1: # if name == 'e1': @@ -166,31 +204,49 @@ def num_param(self): """ num = 0 list = [] - type = 'lens' + type = "lens" for k, model in enumerate(self.model_list): kwargs_fixed = self.kwargs_fixed[k] param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS_POLAR', 'SHAPELETS_CART'] and name == 'coeffs': + if ( + model in ["SHAPELETS_POLAR", "SHAPELETS_CART"] + and name == "coeffs" + ): num_coeffs = self._num_shapelet_lens - if self._solver_type == 'SHAPELETS' and k == 0: + if self._solver_type == "SHAPELETS" and k == 0: if self._num_images == 4: num_coeffs -= 6 elif self._num_images == 2: num_coeffs -= 3 num += num_coeffs - list += [str(name + '_' + type + str(k))] * num_coeffs - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'amp': - num_param = len(kwargs_fixed['sigma']) + list += [str(name + "_" + type + str(k))] * num_coeffs + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "amp" + ): + num_param = len(kwargs_fixed["sigma"]) num += num_param for i in range(num_param): - list.append(str(name + '_' + type + str(k))) - elif model in ['MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] and name == 'sigma': - raise ValueError("'sigma' must be a fixed keyword argument for MULTI_GAUSSIAN") - elif model in ['INTERPOL', 'INTERPOL_SCALED'] and name in ['f_', 'f_xx', 'f_xy', 'f_yy']: + list.append(str(name + "_" + type + str(k))) + elif ( + model + in ["MULTI_GAUSSIAN_KAPPA", "MULTI_GAUSSIAN_KAPPA_ELLIPSE"] + and name == "sigma" + ): + raise ValueError( + "'sigma' must be a fixed keyword argument for MULTI_GAUSSIAN" + ) + elif model in ["INTERPOL", "INTERPOL_SCALED"] and name in [ + "f_", + "f_xx", + "f_xy", + "f_yy", + ]: pass else: num += 1 - list.append(str(name + '_' + type + str(k))) + list.append(str(name + "_" + type + str(k))) return num, list diff --git a/lenstronomy/LensModel/profile_integrals.py b/lenstronomy/LensModel/profile_integrals.py index 276da760b..b3b1bc6c4 100644 --- a/lenstronomy/LensModel/profile_integrals.py +++ b/lenstronomy/LensModel/profile_integrals.py @@ -2,7 +2,7 @@ import scipy.integrate as integrate import numpy as np -__all__ = ['ProfileIntegrals'] +__all__ = ["ProfileIntegrals"] class ProfileIntegrals(object): @@ -12,6 +12,7 @@ class to perform integrals of spherical profiles to compute: - enclosed densities - projected enclosed densities """ + def __init__(self, profile_class): """ @@ -20,43 +21,56 @@ def __init__(self, profile_class): self._profile = profile_class def mass_enclosed_3d(self, r, kwargs_profile, lens_param=False): - """ - computes the mass enclosed within a sphere of radius r + """Computes the mass enclosed within a sphere of radius r. :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters - :param lens_param: boolean, if True uses the lens model parameterization in computing the 3d density convention - and the return is the convergence + :param lens_param: boolean, if True uses the lens model parameterization in + computing the 3d density convention and the return is the convergence :return: 3d mass enclosed of r """ kwargs = copy.deepcopy(kwargs_profile) - kwargs.pop('center_x', None) - kwargs.pop('center_y', None) + kwargs.pop("center_x", None) + kwargs.pop("center_y", None) # integral of self._profile.density(x)* 4*np.pi * x^2 *dx, 0,r if lens_param is True: - out = integrate.quad(lambda x: self._profile.density_lens(x, **kwargs) * 4 * np.pi * x ** 2, 0, r) + out = integrate.quad( + lambda x: self._profile.density_lens(x, **kwargs) * 4 * np.pi * x**2, + 0, + r, + ) else: - out = integrate.quad(lambda x: self._profile.density(x, **kwargs)*4*np.pi*x**2, 0, r) + out = integrate.quad( + lambda x: self._profile.density(x, **kwargs) * 4 * np.pi * x**2, 0, r + ) return out[0] def density_2d(self, r, kwargs_profile, lens_param=False): - """ - computes the projected density along the line-of-sight + """Computes the projected density along the line-of-sight. :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters - :param lens_param: boolean, if True uses the lens model parameterization in computing the 3d density convention - and the return is the convergence + :param lens_param: boolean, if True uses the lens model parameterization in + computing the 3d density convention and the return is the convergence :return: 2d projected density at projected radius r """ kwargs = copy.deepcopy(kwargs_profile) - kwargs.pop('center_x', None) - kwargs.pop('center_y', None) + kwargs.pop("center_x", None) + kwargs.pop("center_y", None) # integral of self._profile.density(np.sqrt(x^2+r^2))* dx, 0, infty if lens_param is True: - out = integrate.quad(lambda x: 2 * self._profile.density_lens(np.sqrt(x ** 2 + r ** 2), **kwargs), 0, 100) + out = integrate.quad( + lambda x: 2 + * self._profile.density_lens(np.sqrt(x**2 + r**2), **kwargs), + 0, + 100, + ) else: - out = integrate.quad(lambda x: 2*self._profile.density(np.sqrt(x**2+r**2), **kwargs), 0, 100) + out = integrate.quad( + lambda x: 2 * self._profile.density(np.sqrt(x**2 + r**2), **kwargs), + 0, + 100, + ) return out[0] def mass_enclosed_2d(self, r, kwargs_profile): @@ -67,8 +81,8 @@ def mass_enclosed_2d(self, r, kwargs_profile): :return: projected mass enclosed radius r """ kwargs = copy.deepcopy(kwargs_profile) - kwargs.pop('center_x', None) - kwargs.pop('center_y', None) + kwargs.pop("center_x", None) + kwargs.pop("center_y", None) # integral of self.density_2d(x)* 2*np.pi * x *dx, 0, r - out = integrate.quad(lambda x: self.density_2d(x, kwargs)*2*np.pi*x, 0, r) + out = integrate.quad(lambda x: self.density_2d(x, kwargs) * 2 * np.pi * x, 0, r) return out[0] diff --git a/lenstronomy/LensModel/profile_list_base.py b/lenstronomy/LensModel/profile_list_base.py index ae7969ba5..27793e68f 100644 --- a/lenstronomy/LensModel/profile_list_base.py +++ b/lenstronomy/LensModel/profile_list_base.py @@ -1,33 +1,108 @@ from lenstronomy.Util.util import convert_bool_list -__all__ = ['ProfileListBase'] - - -_SUPPORTED_MODELS = ['SHIFT', 'NIE_POTENTIAL', 'CONST_MAG', 'SHEAR', 'SHEAR_GAMMA_PSI', 'SHEAR_REDUCED', 'CONVERGENCE', 'FLEXION', - 'FLEXIONFG', 'POINT_MASS', 'SIS', 'SIS_TRUNCATED', 'SIE', 'SPP', 'NIE', 'NIE_SIMPLE', 'CHAMELEON', - 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'SPEP', 'PEMD', 'SPEMD', 'EPL', 'EPL_NUMBA', - 'EPL_BOXYDISKY', 'SPL_CORE', - 'NFW', 'NFW_ELLIPSE', 'NFW_ELLIPSE_GAUSS_DEC', 'NFW_ELLIPSE_CSE', 'TNFW', 'TNFW_ELLIPSE', - 'CNFW', 'CNFW_ELLIPSE', 'CTNFW_GAUSS_DEC', 'NFW_MC', 'SERSIC', - 'SERSIC_ELLIPSE_POTENTIAL', 'SERSIC_ELLIPSE_KAPPA', 'SERSIC_ELLIPSE_GAUSS_DEC', 'PJAFFE', - 'PJAFFE_ELLIPSE', 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'HERNQUIST_ELLIPSE_CSE', 'GAUSSIAN', 'GAUSSIAN_KAPPA', - 'GAUSSIAN_ELLIPSE_KAPPA', 'GAUSSIAN_ELLIPSE_POTENTIAL', 'MULTI_GAUSSIAN_KAPPA', - 'MULTI_GAUSSIAN_KAPPA_ELLIPSE', 'INTERPOL', 'INTERPOL_SCALED', 'SHAPELETS_POLAR', 'SHAPELETS_CART', - 'DIPOLE', 'CURVED_ARC_CONST', 'CURVED_ARC_SPP', 'CURVED_ARC_SIS_MST', 'CURVED_ARC_SPT', - 'CURVED_ARC_TAN_DIFF', 'ARC_PERT', 'coreBURKERT', - 'CORED_DENSITY', 'CORED_DENSITY_2', 'CORED_DENSITY_MST', 'CORED_DENSITY_2_MST', 'CORED_DENSITY_EXP', - 'CORED_DENSITY_EXP_MST', 'TABULATED_DEFLECTIONS', 'MULTIPOLE', 'HESSIAN', 'ElliSLICE', 'ULDM','CORED_DENSITY_ULDM_MST', - 'LOS', 'LOS_MINIMAL' , 'SYNTHESIS', - 'GNFW','CSE', 'TNFWC'] +__all__ = ["ProfileListBase"] + + +_SUPPORTED_MODELS = [ + "SHIFT", + "NIE_POTENTIAL", + "CONST_MAG", + "SHEAR", + "SHEAR_GAMMA_PSI", + "SHEAR_REDUCED", + "CONVERGENCE", + "FLEXION", + "FLEXIONFG", + "POINT_MASS", + "SIS", + "SIS_TRUNCATED", + "SIE", + "SPP", + "NIE", + "NIE_SIMPLE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "SPEP", + "PEMD", + "SPEMD", + "EPL", + "EPL_NUMBA", + "EPL_BOXYDISKY", + "SPL_CORE", + "NFW", + "NFW_ELLIPSE", + "NFW_ELLIPSE_GAUSS_DEC", + "NFW_ELLIPSE_CSE", + "TNFW", + "TNFW_ELLIPSE", + "CNFW", + "CNFW_ELLIPSE", + "CTNFW_GAUSS_DEC", + "NFW_MC", + "SERSIC", + "SERSIC_ELLIPSE_POTENTIAL", + "SERSIC_ELLIPSE_KAPPA", + "SERSIC_ELLIPSE_GAUSS_DEC", + "PJAFFE", + "PJAFFE_ELLIPSE", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "HERNQUIST_ELLIPSE_CSE", + "GAUSSIAN", + "GAUSSIAN_KAPPA", + "GAUSSIAN_ELLIPSE_KAPPA", + "GAUSSIAN_ELLIPSE_POTENTIAL", + "MULTI_GAUSSIAN_KAPPA", + "MULTI_GAUSSIAN_KAPPA_ELLIPSE", + "INTERPOL", + "INTERPOL_SCALED", + "SHAPELETS_POLAR", + "SHAPELETS_CART", + "DIPOLE", + "CURVED_ARC_CONST", + "CURVED_ARC_SPP", + "CURVED_ARC_SIS_MST", + "CURVED_ARC_SPT", + "CURVED_ARC_TAN_DIFF", + "ARC_PERT", + "coreBURKERT", + "CORED_DENSITY", + "CORED_DENSITY_2", + "CORED_DENSITY_MST", + "CORED_DENSITY_2_MST", + "CORED_DENSITY_EXP", + "CORED_DENSITY_EXP_MST", + "TABULATED_DEFLECTIONS", + "MULTIPOLE", + "HESSIAN", + "ElliSLICE", + "ULDM", + "CORED_DENSITY_ULDM_MST", + "LOS", + "LOS_MINIMAL", + "SYNTHESIS", + "GNFW", + "CSE", + "TNFWC", +] class ProfileListBase(object): + """Class that manages the list of lens model class instances. + + This class is applicable for single plane and multi plane lensing """ - class that manages the list of lens model class instances. This class is applicable for single plane and multi - plane lensing - """ - def __init__(self, lens_model_list, numerical_alpha_class=None, lens_redshift_list=None, z_source_convention=None, - kwargs_interp=None, kwargs_synthesis=None): + + def __init__( + self, + lens_model_list, + numerical_alpha_class=None, + lens_redshift_list=None, + z_source_convention=None, + kwargs_interp=None, + kwargs_synthesis=None, + ): """ :param lens_model_list: list of strings with lens model names @@ -37,16 +112,26 @@ def __init__(self, lens_model_list, numerical_alpha_class=None, lens_redshift_li See description in the Interpolate() class. Only applicable for 'INTERPOL' and 'INTERPOL_SCALED' models. :param kwargs_synthesis: keyword arguments for the 'SYNTHESIS' lens model, if applicable """ - self.func_list = self._load_model_instances(lens_model_list, custom_class=numerical_alpha_class, - lens_redshift_list=lens_redshift_list, - z_source_convention=z_source_convention, - kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + self.func_list = self._load_model_instances( + lens_model_list, + custom_class=numerical_alpha_class, + lens_redshift_list=lens_redshift_list, + z_source_convention=z_source_convention, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) self._num_func = len(self.func_list) self._model_list = lens_model_list - def _load_model_instances(self, lens_model_list, custom_class=None, lens_redshift_list=None, - z_source_convention=None, kwargs_interp=None, kwargs_synthesis=None): + def _load_model_instances( + self, + lens_model_list, + custom_class=None, + lens_redshift_list=None, + z_source_convention=None, + kwargs_interp=None, + kwargs_synthesis=None, + ): if lens_redshift_list is None: lens_redshift_list = [None] * len(lens_model_list) if kwargs_interp is None: @@ -59,15 +144,34 @@ def _load_model_instances(self, lens_model_list, custom_class=None, lens_redshif # those models require a new instance per profile as some pre-computations are different when parameters or # other settings are changed. For example, the 'INTERPOL' model needs to know the specific map to be # interpolated. - if lens_type in ['NFW_MC', 'CHAMELEON', 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'NFW_ELLIPSE_GAUSS_DEC', - 'CTNFW_GAUSS_DEC', 'INTERPOL', 'INTERPOL_SCALED', 'NIE', 'NIE_SIMPLE']: - lensmodel_class = self._import_class(lens_type, custom_class, z_lens=lens_redshift_list[i], - z_source=z_source_convention, kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + if lens_type in [ + "NFW_MC", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "NFW_ELLIPSE_GAUSS_DEC", + "CTNFW_GAUSS_DEC", + "INTERPOL", + "INTERPOL_SCALED", + "NIE", + "NIE_SIMPLE", + ]: + lensmodel_class = self._import_class( + lens_type, + custom_class, + z_lens=lens_redshift_list[i], + z_source=z_source_convention, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) else: if lens_type not in imported_classes.keys(): - lensmodel_class = self._import_class(lens_type, custom_class, kwargs_interp=kwargs_interp, - kwargs_synthesis=kwargs_synthesis) + lensmodel_class = self._import_class( + lens_type, + custom_class, + kwargs_interp=kwargs_interp, + kwargs_synthesis=kwargs_synthesis, + ) imported_classes.update({lens_type: lensmodel_class}) else: lensmodel_class = imported_classes[lens_type] @@ -75,7 +179,14 @@ def _load_model_instances(self, lens_model_list, custom_class=None, lens_redshif return func_list @staticmethod - def _import_class(lens_type, custom_class, kwargs_interp, kwargs_synthesis, z_lens=None, z_source=None): + def _import_class( + lens_type, + custom_class, + kwargs_interp, + kwargs_synthesis, + z_lens=None, + z_source=None, + ): """ :param lens_type: string, lens model type @@ -87,262 +198,382 @@ def _import_class(lens_type, custom_class, kwargs_interp, kwargs_synthesis, z_le :return: class instance of the lens model type """ - if lens_type == 'SHIFT': + if lens_type == "SHIFT": from lenstronomy.LensModel.Profiles.constant_shift import Shift + return Shift() - elif lens_type == 'NIE_POTENTIAL': + elif lens_type == "NIE_POTENTIAL": from lenstronomy.LensModel.Profiles.nie_potential import NIE_POTENTIAL + return NIE_POTENTIAL() - elif lens_type == 'CONST_MAG': + elif lens_type == "CONST_MAG": from lenstronomy.LensModel.Profiles.const_mag import ConstMag + return ConstMag() - elif lens_type == 'SHEAR': + elif lens_type == "SHEAR": from lenstronomy.LensModel.Profiles.shear import Shear + return Shear() - elif lens_type == 'SHEAR_GAMMA_PSI': + elif lens_type == "SHEAR_GAMMA_PSI": from lenstronomy.LensModel.Profiles.shear import ShearGammaPsi + return ShearGammaPsi() - elif lens_type == 'SHEAR_REDUCED': + elif lens_type == "SHEAR_REDUCED": from lenstronomy.LensModel.Profiles.shear import ShearReduced + return ShearReduced() - elif lens_type == 'CONVERGENCE': + elif lens_type == "CONVERGENCE": from lenstronomy.LensModel.Profiles.convergence import Convergence + return Convergence() - elif lens_type == 'HESSIAN': + elif lens_type == "HESSIAN": from lenstronomy.LensModel.Profiles.hessian import Hessian + return Hessian() - elif lens_type == 'FLEXION': + elif lens_type == "FLEXION": from lenstronomy.LensModel.Profiles.flexion import Flexion + return Flexion() - elif lens_type == 'FLEXIONFG': + elif lens_type == "FLEXIONFG": from lenstronomy.LensModel.Profiles.flexionfg import Flexionfg + return Flexionfg() - elif lens_type == 'POINT_MASS': + elif lens_type == "POINT_MASS": from lenstronomy.LensModel.Profiles.point_mass import PointMass + return PointMass() - elif lens_type == 'SIS': + elif lens_type == "SIS": from lenstronomy.LensModel.Profiles.sis import SIS + return SIS() - elif lens_type == 'SIS_TRUNCATED': + elif lens_type == "SIS_TRUNCATED": from lenstronomy.LensModel.Profiles.sis_truncate import SIS_truncate + return SIS_truncate() - elif lens_type == 'SIE': + elif lens_type == "SIE": from lenstronomy.LensModel.Profiles.sie import SIE + return SIE() - elif lens_type == 'SPP': + elif lens_type == "SPP": from lenstronomy.LensModel.Profiles.spp import SPP + return SPP() - elif lens_type == 'NIE': + elif lens_type == "NIE": from lenstronomy.LensModel.Profiles.nie import NIE + return NIE() - elif lens_type == 'NIE_SIMPLE': + elif lens_type == "NIE_SIMPLE": from lenstronomy.LensModel.Profiles.nie import NIEMajorAxis + return NIEMajorAxis() - elif lens_type == 'CHAMELEON': + elif lens_type == "CHAMELEON": from lenstronomy.LensModel.Profiles.chameleon import Chameleon + return Chameleon() - elif lens_type == 'DOUBLE_CHAMELEON': + elif lens_type == "DOUBLE_CHAMELEON": from lenstronomy.LensModel.Profiles.chameleon import DoubleChameleon + return DoubleChameleon() - elif lens_type == 'TRIPLE_CHAMELEON': + elif lens_type == "TRIPLE_CHAMELEON": from lenstronomy.LensModel.Profiles.chameleon import TripleChameleon + return TripleChameleon() - elif lens_type == 'SPEP': + elif lens_type == "SPEP": from lenstronomy.LensModel.Profiles.spep import SPEP + return SPEP() - elif lens_type == 'PEMD': + elif lens_type == "PEMD": from lenstronomy.LensModel.Profiles.pemd import PEMD + return PEMD() - elif lens_type == 'SPEMD': + elif lens_type == "SPEMD": from lenstronomy.LensModel.Profiles.spemd import SPEMD + return SPEMD() - elif lens_type == 'EPL': + elif lens_type == "EPL": from lenstronomy.LensModel.Profiles.epl import EPL + return EPL() - elif lens_type == 'EPL_NUMBA': + elif lens_type == "EPL_NUMBA": from lenstronomy.LensModel.Profiles.epl_numba import EPL_numba + return EPL_numba() - elif lens_type == 'EPL_BOXYDISKY': + elif lens_type == "EPL_BOXYDISKY": from lenstronomy.LensModel.Profiles.epl_boxydisky import EPL_BOXYDISKY + return EPL_BOXYDISKY() - elif lens_type == 'SPL_CORE': + elif lens_type == "SPL_CORE": from lenstronomy.LensModel.Profiles.splcore import SPLCORE + return SPLCORE() - elif lens_type == 'NFW': + elif lens_type == "NFW": from lenstronomy.LensModel.Profiles.nfw import NFW + return NFW() - elif lens_type == 'NFW_ELLIPSE': + elif lens_type == "NFW_ELLIPSE": from lenstronomy.LensModel.Profiles.nfw_ellipse import NFW_ELLIPSE + return NFW_ELLIPSE() - elif lens_type == 'NFW_ELLIPSE_GAUSS_DEC': - from lenstronomy.LensModel.Profiles.gauss_decomposition import NFWEllipseGaussDec + elif lens_type == "NFW_ELLIPSE_GAUSS_DEC": + from lenstronomy.LensModel.Profiles.gauss_decomposition import ( + NFWEllipseGaussDec, + ) + return NFWEllipseGaussDec() - elif lens_type == 'NFW_ELLIPSE_CSE': + elif lens_type == "NFW_ELLIPSE_CSE": from lenstronomy.LensModel.Profiles.nfw_ellipse_cse import NFW_ELLIPSE_CSE + return NFW_ELLIPSE_CSE() - elif lens_type == 'TNFW': + elif lens_type == "TNFW": from lenstronomy.LensModel.Profiles.tnfw import TNFW + return TNFW() - elif lens_type == 'TNFW_ELLIPSE': + elif lens_type == "TNFW_ELLIPSE": from lenstronomy.LensModel.Profiles.tnfw_ellipse import TNFW_ELLIPSE + return TNFW_ELLIPSE() - elif lens_type == 'CNFW': + elif lens_type == "CNFW": from lenstronomy.LensModel.Profiles.cnfw import CNFW + return CNFW() - elif lens_type == 'CNFW_ELLIPSE': + elif lens_type == "CNFW_ELLIPSE": from lenstronomy.LensModel.Profiles.cnfw_ellipse import CNFW_ELLIPSE + return CNFW_ELLIPSE() - elif lens_type == 'CTNFW_GAUSS_DEC': + elif lens_type == "CTNFW_GAUSS_DEC": from lenstronomy.LensModel.Profiles.gauss_decomposition import CTNFWGaussDec + return CTNFWGaussDec() - elif lens_type == 'NFW_MC': + elif lens_type == "NFW_MC": from lenstronomy.LensModel.Profiles.nfw_mass_concentration import NFWMC + return NFWMC(z_lens=z_lens, z_source=z_source) - elif lens_type == 'SERSIC': + elif lens_type == "SERSIC": from lenstronomy.LensModel.Profiles.sersic import Sersic + return Sersic() - elif lens_type == 'SERSIC_ELLIPSE_POTENTIAL': - from lenstronomy.LensModel.Profiles.sersic_ellipse_potential import SersicEllipse + elif lens_type == "SERSIC_ELLIPSE_POTENTIAL": + from lenstronomy.LensModel.Profiles.sersic_ellipse_potential import ( + SersicEllipse, + ) + return SersicEllipse() - elif lens_type == 'SERSIC_ELLIPSE_KAPPA': - from lenstronomy.LensModel.Profiles.sersic_ellipse_kappa import SersicEllipseKappa + elif lens_type == "SERSIC_ELLIPSE_KAPPA": + from lenstronomy.LensModel.Profiles.sersic_ellipse_kappa import ( + SersicEllipseKappa, + ) + return SersicEllipseKappa() - elif lens_type == 'SERSIC_ELLIPSE_GAUSS_DEC': - from lenstronomy.LensModel.Profiles.gauss_decomposition import SersicEllipseGaussDec + elif lens_type == "SERSIC_ELLIPSE_GAUSS_DEC": + from lenstronomy.LensModel.Profiles.gauss_decomposition import ( + SersicEllipseGaussDec, + ) + return SersicEllipseGaussDec() - elif lens_type == 'PJAFFE': + elif lens_type == "PJAFFE": from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe + return PJaffe() - elif lens_type == 'PJAFFE_ELLIPSE': + elif lens_type == "PJAFFE_ELLIPSE": from lenstronomy.LensModel.Profiles.p_jaffe_ellipse import PJaffe_Ellipse + return PJaffe_Ellipse() - elif lens_type == 'HERNQUIST': + elif lens_type == "HERNQUIST": from lenstronomy.LensModel.Profiles.hernquist import Hernquist + return Hernquist() - elif lens_type == 'HERNQUIST_ELLIPSE': - from lenstronomy.LensModel.Profiles.hernquist_ellipse import Hernquist_Ellipse + elif lens_type == "HERNQUIST_ELLIPSE": + from lenstronomy.LensModel.Profiles.hernquist_ellipse import ( + Hernquist_Ellipse, + ) + return Hernquist_Ellipse() - elif lens_type =='HERNQUIST_ELLIPSE_CSE': - from lenstronomy.LensModel.Profiles.hernquist_ellipse_cse import HernquistEllipseCSE + elif lens_type == "HERNQUIST_ELLIPSE_CSE": + from lenstronomy.LensModel.Profiles.hernquist_ellipse_cse import ( + HernquistEllipseCSE, + ) + return HernquistEllipseCSE() - elif lens_type == 'GAUSSIAN': + elif lens_type == "GAUSSIAN": from lenstronomy.LensModel.Profiles.gaussian_potential import Gaussian + return Gaussian() - elif lens_type == 'GAUSSIAN_KAPPA': + elif lens_type == "GAUSSIAN_KAPPA": from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa + return GaussianKappa() - elif lens_type == 'GAUSSIAN_ELLIPSE_KAPPA': - from lenstronomy.LensModel.Profiles.gaussian_ellipse_kappa import GaussianEllipseKappa + elif lens_type == "GAUSSIAN_ELLIPSE_KAPPA": + from lenstronomy.LensModel.Profiles.gaussian_ellipse_kappa import ( + GaussianEllipseKappa, + ) + return GaussianEllipseKappa() - elif lens_type == 'GAUSSIAN_ELLIPSE_POTENTIAL': - from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import GaussianEllipsePotential + elif lens_type == "GAUSSIAN_ELLIPSE_POTENTIAL": + from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import ( + GaussianEllipsePotential, + ) + return GaussianEllipsePotential() - elif lens_type == 'MULTI_GAUSSIAN_KAPPA': - from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import MultiGaussianKappa + elif lens_type == "MULTI_GAUSSIAN_KAPPA": + from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import ( + MultiGaussianKappa, + ) + return MultiGaussianKappa() - elif lens_type == 'MULTI_GAUSSIAN_KAPPA_ELLIPSE': - from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import MultiGaussianKappaEllipse + elif lens_type == "MULTI_GAUSSIAN_KAPPA_ELLIPSE": + from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import ( + MultiGaussianKappaEllipse, + ) + return MultiGaussianKappaEllipse() - elif lens_type == 'INTERPOL': + elif lens_type == "INTERPOL": from lenstronomy.LensModel.Profiles.interpol import Interpol + return Interpol(**kwargs_interp) - elif lens_type == 'INTERPOL_SCALED': + elif lens_type == "INTERPOL_SCALED": from lenstronomy.LensModel.Profiles.interpol import InterpolScaled + return InterpolScaled(**kwargs_interp) - elif lens_type == 'SHAPELETS_POLAR': + elif lens_type == "SHAPELETS_POLAR": from lenstronomy.LensModel.Profiles.shapelet_pot_polar import PolarShapelets + return PolarShapelets() - elif lens_type == 'SHAPELETS_CART': - from lenstronomy.LensModel.Profiles.shapelet_pot_cartesian import CartShapelets + elif lens_type == "SHAPELETS_CART": + from lenstronomy.LensModel.Profiles.shapelet_pot_cartesian import ( + CartShapelets, + ) + return CartShapelets() - elif lens_type == 'DIPOLE': + elif lens_type == "DIPOLE": from lenstronomy.LensModel.Profiles.dipole import Dipole + return Dipole() - elif lens_type == 'CURVED_ARC_CONST': + elif lens_type == "CURVED_ARC_CONST": from lenstronomy.LensModel.Profiles.curved_arc_const import CurvedArcConst + return CurvedArcConst() - elif lens_type == 'CURVED_ARC_CONST_MST': - from lenstronomy.LensModel.Profiles.curved_arc_const import CurvedArcConstMST + elif lens_type == "CURVED_ARC_CONST_MST": + from lenstronomy.LensModel.Profiles.curved_arc_const import ( + CurvedArcConstMST, + ) + return CurvedArcConstMST() - elif lens_type == 'CURVED_ARC_SPP': + elif lens_type == "CURVED_ARC_SPP": from lenstronomy.LensModel.Profiles.curved_arc_spp import CurvedArcSPP + return CurvedArcSPP() - elif lens_type == 'CURVED_ARC_SIS_MST': - from lenstronomy.LensModel.Profiles.curved_arc_sis_mst import CurvedArcSISMST + elif lens_type == "CURVED_ARC_SIS_MST": + from lenstronomy.LensModel.Profiles.curved_arc_sis_mst import ( + CurvedArcSISMST, + ) + return CurvedArcSISMST() - elif lens_type == 'CURVED_ARC_SPT': + elif lens_type == "CURVED_ARC_SPT": from lenstronomy.LensModel.Profiles.curved_arc_spt import CurvedArcSPT + return CurvedArcSPT() - elif lens_type == 'CURVED_ARC_TAN_DIFF': - from lenstronomy.LensModel.Profiles.curved_arc_tan_diff import CurvedArcTanDiff + elif lens_type == "CURVED_ARC_TAN_DIFF": + from lenstronomy.LensModel.Profiles.curved_arc_tan_diff import ( + CurvedArcTanDiff, + ) + return CurvedArcTanDiff() - elif lens_type == 'ARC_PERT': - from lenstronomy.LensModel.Profiles.arc_perturbations import ArcPerturbations + elif lens_type == "ARC_PERT": + from lenstronomy.LensModel.Profiles.arc_perturbations import ( + ArcPerturbations, + ) + return ArcPerturbations() - elif lens_type == 'coreBURKERT': + elif lens_type == "coreBURKERT": from lenstronomy.LensModel.Profiles.coreBurkert import CoreBurkert + return CoreBurkert() - elif lens_type == 'CORED_DENSITY': + elif lens_type == "CORED_DENSITY": from lenstronomy.LensModel.Profiles.cored_density import CoredDensity + return CoredDensity() - elif lens_type == 'CORED_DENSITY_2': + elif lens_type == "CORED_DENSITY_2": from lenstronomy.LensModel.Profiles.cored_density_2 import CoredDensity2 + return CoredDensity2() - elif lens_type == 'CORED_DENSITY_EXP': + elif lens_type == "CORED_DENSITY_EXP": from lenstronomy.LensModel.Profiles.cored_density_exp import CoredDensityExp + return CoredDensityExp() - elif lens_type == 'CORED_DENSITY_MST': + elif lens_type == "CORED_DENSITY_MST": from lenstronomy.LensModel.Profiles.cored_density_mst import CoredDensityMST - return CoredDensityMST(profile_type='CORED_DENSITY') - elif lens_type == 'CORED_DENSITY_2_MST': + + return CoredDensityMST(profile_type="CORED_DENSITY") + elif lens_type == "CORED_DENSITY_2_MST": from lenstronomy.LensModel.Profiles.cored_density_mst import CoredDensityMST - return CoredDensityMST(profile_type='CORED_DENSITY_2') - elif lens_type == 'CORED_DENSITY_EXP_MST': + + return CoredDensityMST(profile_type="CORED_DENSITY_2") + elif lens_type == "CORED_DENSITY_EXP_MST": from lenstronomy.LensModel.Profiles.cored_density_mst import CoredDensityMST - return CoredDensityMST(profile_type='CORED_DENSITY_EXP') - elif lens_type == 'TABULATED_DEFLECTIONS': - from lenstronomy.LensModel.Profiles.numerical_deflections import TabulatedDeflections + + return CoredDensityMST(profile_type="CORED_DENSITY_EXP") + elif lens_type == "TABULATED_DEFLECTIONS": + from lenstronomy.LensModel.Profiles.numerical_deflections import ( + TabulatedDeflections, + ) + return TabulatedDeflections(custom_class) - elif lens_type == 'MULTIPOLE': + elif lens_type == "MULTIPOLE": from lenstronomy.LensModel.Profiles.multipole import Multipole + return Multipole() - elif lens_type == 'CSE': + elif lens_type == "CSE": from lenstronomy.LensModel.Profiles.cored_steep_ellipsoid import CSE + return CSE() - elif lens_type == 'ElliSLICE': - from lenstronomy.LensModel.Profiles.elliptical_density_slice import ElliSLICE + elif lens_type == "ElliSLICE": + from lenstronomy.LensModel.Profiles.elliptical_density_slice import ( + ElliSLICE, + ) + return ElliSLICE() - elif lens_type == 'ULDM': + elif lens_type == "ULDM": from lenstronomy.LensModel.Profiles.uldm import Uldm + return Uldm() - elif lens_type == 'GNFW': + elif lens_type == "GNFW": from lenstronomy.LensModel.Profiles.general_nfw import GNFW + return GNFW() - elif lens_type == 'CORED_DENSITY_ULDM_MST': + elif lens_type == "CORED_DENSITY_ULDM_MST": from lenstronomy.LensModel.Profiles.cored_density_mst import CoredDensityMST - return CoredDensityMST(profile_type='CORED_DENSITY_ULDM') - elif lens_type == 'LOS': + + return CoredDensityMST(profile_type="CORED_DENSITY_ULDM") + elif lens_type == "LOS": from lenstronomy.LensModel.LineOfSight.LOSModels.los import LOS + return LOS() - elif lens_type == 'LOS_MINIMAL': - from lenstronomy.LensModel.LineOfSight.LOSModels.los_minimal import LOSMinimal + elif lens_type == "LOS_MINIMAL": + from lenstronomy.LensModel.LineOfSight.LOSModels.los_minimal import ( + LOSMinimal, + ) + return LOSMinimal() - elif lens_type == 'SYNTHESIS': + elif lens_type == "SYNTHESIS": from lenstronomy.LensModel.Profiles.synthesis import SynthesisProfile + return SynthesisProfile(**kwargs_synthesis) - elif lens_type == 'TNFWC': + elif lens_type == "TNFWC": from lenstronomy.LensModel.Profiles.nfw_core_truncated import TNFWC + return TNFWC() else: - raise ValueError('%s is not a valid lens model. Supported are: %s.' % (lens_type, _SUPPORTED_MODELS)) + raise ValueError( + "%s is not a valid lens model. Supported are: %s." + % (lens_type, _SUPPORTED_MODELS) + ) def _bool_list(self, k=None): - """ - returns a bool list of the length of the lens models - if k = None: returns bool list with True's - if k is int, returns bool list with False's but k'th is True - if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in the integers listed and False elsewhere - if k is a boolean list, checks for size to match the numbers of models and returns it + """Returns a bool list of the length of the lens models if k = None: returns + bool list with True's if k is int, returns bool list with False's but k'th is + True if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in + the integers listed and False elsewhere if k is a boolean list, checks for size + to match the numbers of models and returns it. :param k: None, int, or list of ints :return: bool list @@ -360,9 +591,9 @@ def set_static(self, kwargs_list): return kwargs_list def set_dynamic(self): - """ - frees cache set by static model (if exists) and re-computes all lensing quantities each time a definition is - called assuming different parameters are executed. This is the default mode if not specified as set_static() + """Frees cache set by static model (if exists) and re-computes all lensing + quantities each time a definition is called assuming different parameters are + executed. This is the default mode if not specified as set_static() :return: None """ diff --git a/lenstronomy/LensModel/single_plane.py b/lenstronomy/LensModel/single_plane.py index d79773a0e..b59f15943 100644 --- a/lenstronomy/LensModel/single_plane.py +++ b/lenstronomy/LensModel/single_plane.py @@ -1,61 +1,53 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from lenstronomy.LensModel.profile_list_base import ProfileListBase -__all__ = ['SinglePlane'] +__all__ = ["SinglePlane"] class SinglePlane(ProfileListBase): - """ - class to handle an arbitrary list of lens models in a single lensing plane - """ + """Class to handle an arbitrary list of lens models in a single lensing plane.""" def ray_shooting(self, x, y, kwargs, k=None): - """ - maps image to source position (inverse deflection) - :param x: x-position (preferentially arcsec) - :type x: numpy array - :param y: y-position (preferentially arcsec) - :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes - :param k: only evaluate the k-th lens model - :return: source plane positions corresponding to (x, y) in the image plane - """ + """Maps image to source position (inverse deflection) :param x: x-position + (preferentially arcsec) :type x: numpy array :param y: y-position + (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword + arguments of lens model parameters matching the lens model classes :param k: + only evaluate the k-th lens model :return: source plane positions corresponding + to (x, y) in the image plane.""" dx, dy = self.alpha(x, y, kwargs, k=k) return x - dx, y - dy - def fermat_potential(self, x_image, y_image, kwargs_lens, x_source=None, y_source=None, k=None): - """ - fermat potential (negative sign means earlier arrival time) + def fermat_potential( + self, x_image, y_image, kwargs_lens, x_source=None, y_source=None, k=None + ): + """Fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position - :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs_lens: list of keyword arguments of lens model parameters matching + the lens model classes :param k: - :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list + :return: fermat potential in arcsec**2 without geometry term (second part of Eqn + 1 in Suyu et al. 2013) as a list """ potential = self.potential(x_image, y_image, kwargs_lens, k=k) if x_source is None or y_source is None: x_source, y_source = self.ray_shooting(x_image, y_image, kwargs_lens, k=k) - geometry = ((x_image - x_source)**2 + (y_image - y_source)**2) / 2. + geometry = ((x_image - x_source) ** 2 + (y_image - y_source) ** 2) / 2.0 return geometry - potential def potential(self, x, y, kwargs, k=None): - """ - lensing potential - :param x: x-position (preferentially arcsec) - :type x: numpy array - :param y: y-position (preferentially arcsec) - :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes - :param k: only evaluate the k-th lens model - :return: lensing potential in units of arcsec^2 - """ + """Lensing potential :param x: x-position (preferentially arcsec) :type x: numpy + array :param y: y-position (preferentially arcsec) :type y: numpy array :param + kwargs: list of keyword arguments of lens model parameters matching the lens + model classes :param k: only evaluate the k-th lens model :return: lensing + potential in units of arcsec^2.""" x = np.array(x, dtype=float) y = np.array(y, dtype=float) if isinstance(k, int): @@ -68,17 +60,11 @@ def potential(self, x, y, kwargs, k=None): return potential def alpha(self, x, y, kwargs, k=None): - - """ - deflection angles - :param x: x-position (preferentially arcsec) - :type x: numpy array - :param y: y-position (preferentially arcsec) - :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes - :param k: only evaluate the k-th lens model - :return: deflection angles in units of arcsec - """ + """Deflection angles :param x: x-position (preferentially arcsec) :type x: numpy + array :param y: y-position (preferentially arcsec) :type y: numpy array :param + kwargs: list of keyword arguments of lens model parameters matching the lens + model classes :param k: only evaluate the k-th lens model :return: deflection + angles in units of arcsec.""" x = np.array(x, dtype=float) y = np.array(y, dtype=float) @@ -96,16 +82,11 @@ def alpha(self, x, y, kwargs, k=None): return f_x, f_y def hessian(self, x, y, kwargs, k=None): - """ - hessian matrix - :param x: x-position (preferentially arcsec) - :type x: numpy array - :param y: y-position (preferentially arcsec) - :type y: numpy array - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes - :param k: only evaluate the k-th lens model - :return: f_xx, f_xy, f_yx, f_yy components - """ + """Hessian matrix :param x: x-position (preferentially arcsec) :type x: numpy + array :param y: y-position (preferentially arcsec) :type y: numpy array :param + kwargs: list of keyword arguments of lens model parameters matching the lens + model classes :param k: only evaluate the k-th lens model :return: f_xx, f_xy, + f_yx, f_yy components.""" x = np.array(x, dtype=float) y = np.array(y, dtype=float) if isinstance(k, int): @@ -113,7 +94,12 @@ def hessian(self, x, y, kwargs, k=None): return f_xx, f_xy, f_yx, f_yy bool_list = self._bool_list(k) - f_xx, f_xy, f_yx, f_yy = np.zeros_like(x), np.zeros_like(x), np.zeros_like(x), np.zeros_like(x) + f_xx, f_xy, f_yx, f_yy = ( + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + np.zeros_like(x), + ) for i, func in enumerate(self.func_list): if bool_list[i] is True: f_xx_i, f_xy_i, f_yx_i, f_yy_i = func.hessian(x, y, **kwargs[i]) @@ -124,15 +110,16 @@ def hessian(self, x, y, kwargs, k=None): return f_xx, f_xy, f_yx, f_yy def mass_3d(self, r, kwargs, bool_list=None): - """ - computes the mass within a 3d sphere of radius r + """Computes the mass within a 3d sphere of radius r. if you want to have physical units of kg, you need to multiply by this factor: - const.arcsec ** 2 * self._cosmo.dd * self._cosmo.ds / self._cosmo.dds * const.Mpc * const.c ** 2 / (4 * np.pi * const.G) - grav_pot = -const.G * mass_dim / (r * const.arcsec * self._cosmo.dd * const.Mpc) + const.arcsec ** 2 * self._cosmo.dd * self._cosmo.ds / self._cosmo.dds * + const.Mpc * const.c ** 2 / (4 * np.pi * const.G) grav_pot = -const.G * mass_dim + / (r * const.arcsec * self._cosmo.dd * const.Mpc) :param r: radius (in angular units) - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ @@ -140,14 +127,17 @@ def mass_3d(self, r, kwargs, bool_list=None): mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: - kwargs_i = {k: v for k, v in kwargs[i].items() if k not in ['center_x', 'center_y']} + kwargs_i = { + k: v + for k, v in kwargs[i].items() + if k not in ["center_x", "center_y"] + } mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i return mass_3d def mass_2d(self, r, kwargs, bool_list=None): - """ - computes the mass enclosed a projected (2d) radius r + """Computes the mass enclosed a projected (2d) radius r. The mass definition is such that: @@ -165,18 +155,22 @@ def mass_2d(self, r, kwargs, bool_list=None): mass_2d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: - kwargs_i = {k: v for k, v in kwargs[i].items() if k not in ['center_x', 'center_y']} + kwargs_i = { + k: v + for k, v in kwargs[i].items() + if k not in ["center_x", "center_y"] + } mass_2d_i = func.mass_2d_lens(r, **kwargs_i) mass_2d += mass_2d_i return mass_2d def density(self, r, kwargs, bool_list=None): - """ - 3d mass density at radius r - The integral in the LOS projection of this quantity results in the convergence quantity. + """3d mass density at radius r The integral in the LOS projection of this + quantity results in the convergence quantity. :param r: radius (in angular units) - :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes + :param kwargs: list of keyword arguments of lens model parameters matching the + lens model classes :param bool_list: list of bools that are part of the output :return: mass density at radius r (in angular units, modulo epsilon_crit) """ @@ -184,7 +178,11 @@ def density(self, r, kwargs, bool_list=None): density = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: - kwargs_i = {k: v for k, v in kwargs[i].items() if k not in ['center_x', 'center_y']} + kwargs_i = { + k: v + for k, v in kwargs[i].items() + if k not in ["center_x", "center_y"] + } density_i = func.density_lens(r, **kwargs_i) density += density_i return density diff --git a/lenstronomy/LightModel/Profiles/chameleon.py b/lenstronomy/LightModel/Profiles/chameleon.py index 4ead5f6be..b36834e93 100644 --- a/lenstronomy/LightModel/Profiles/chameleon.py +++ b/lenstronomy/LightModel/Profiles/chameleon.py @@ -1,18 +1,34 @@ from lenstronomy.LightModel.Profiles.nie import NIE from lenstronomy.LensModel.Profiles.chameleon import Chameleon as ChameleonLens from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Chameleon(object): - """ - class of the Chameleon model (See Dutton+ 2011, Suyu+2014) an elliptical truncated double isothermal profile + """Class of the Chameleon model (See Dutton+ 2011, Suyu+2014) an elliptical + truncated double isothermal profile.""" - """ - param_names = ['amp', 'w_c', 'w_t', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'w_c': 0, 'w_t': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'w_c': 100, 'w_t': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + param_names = ["amp", "w_c", "w_t", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "w_c": 0, + "w_t": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "w_c": 100, + "w_t": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.nie = NIE() @@ -32,7 +48,9 @@ def function(self, x, y, amp, w_c, w_t, e1, e2, center_x=0, center_y=0): :param center_y: center :return: flux of chameleon profile """ - amp_new, w_c, w_t, s_scale_1, s_scale_2 = self._chameleonLens.param_convert(amp, w_c, w_t, e1, e2) + amp_new, w_c, w_t, s_scale_1, s_scale_2 = self._chameleonLens.param_convert( + amp, w_c, w_t, e1, e2 + ) flux1 = self.nie.function(x, y, 1, e1, e2, s_scale_1, center_x, center_y) flux2 = self.nie.function(x, y, 1, e1, e2, s_scale_2, center_x, center_y) flux = amp_new * (flux1 - flux2) @@ -51,7 +69,9 @@ def light_3d(self, r, amp, w_c, w_t, e1, e2, center_x=0, center_y=0): :param center_y: center :return: 3d flux of chameleon profile at radius r """ - amp_new, w_c, w_t, s_scale_1, s_scale_2 = self._chameleonLens.param_convert(amp, w_c, w_t, e1, e2) + amp_new, w_c, w_t, s_scale_1, s_scale_2 = self._chameleonLens.param_convert( + amp, w_c, w_t, e1, e2 + ) flux1 = self.nie.light_3d(r, 1, e1, e2, s_scale_1, center_x, center_y) flux2 = self.nie.light_3d(r, 1, e1, e2, s_scale_2, center_x, center_y) flux = amp_new * (flux1 - flux2) @@ -60,22 +80,74 @@ def light_3d(self, r, amp, w_c, w_t, e1, e2, center_x=0, center_y=0): @export class DoubleChameleon(object): - """ - class of the double Chameleon model. See Dutton+2011, Suyu+2014 for the single Chameleon model. + """Class of the double Chameleon model. + See Dutton+2011, Suyu+2014 for the single Chameleon model. """ - param_names = ['amp', 'ratio', 'w_c1', 'w_t1', 'e11', 'e21', 'w_c2', 'w_t2', 'e12', 'e22', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'ratio': 0, 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8, - 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'ratio': 100, 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8, - 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8, - 'center_x': 100, 'center_y': 100} + + param_names = [ + "amp", + "ratio", + "w_c1", + "w_t1", + "e11", + "e21", + "w_c2", + "w_t2", + "e12", + "e22", + "center_x", + "center_y", + ] + lower_limit_default = { + "amp": 0, + "ratio": 0, + "w_c1": 0, + "w_t1": 0, + "e11": -0.8, + "e21": -0.8, + "w_c2": 0, + "w_t2": 0, + "e12": -0.8, + "e22": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "ratio": 100, + "w_c1": 100, + "w_t1": 100, + "e11": 0.8, + "e21": 0.8, + "w_c2": 100, + "w_t2": 100, + "e12": 0.8, + "e22": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.chameleon = Chameleon() - def function(self, x, y, amp, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def function( + self, + x, + y, + amp, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param x: @@ -94,11 +166,30 @@ def function(self, x, y, amp, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, :param center_y: :return: """ - f_1 = self.chameleon.function(x, y, amp / (1. + 1./ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self.chameleon.function(x, y, amp / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + f_1 = self.chameleon.function( + x, y, amp / (1.0 + 1.0 / ratio), w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self.chameleon.function( + x, y, amp / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_1 + f_2 - def light_3d(self, r, amp, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, center_x=0, center_y=0): + def light_3d( + self, + r, + amp, + ratio, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + center_x=0, + center_y=0, + ): """ :param r: 3d radius @@ -116,36 +207,103 @@ def light_3d(self, r, amp, ratio, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, ce :param center_y: :return: 3d light density at radius r """ - f_1 = self.chameleon.light_3d(r, amp / (1. + 1./ratio), w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self.chameleon.light_3d(r, amp / (1. + ratio), w_c2, w_t2, e12, e22, center_x, center_y) + f_1 = self.chameleon.light_3d( + r, amp / (1.0 + 1.0 / ratio), w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self.chameleon.light_3d( + r, amp / (1.0 + ratio), w_c2, w_t2, e12, e22, center_x, center_y + ) return f_1 + f_2 @export class TripleChameleon(object): - """ - class of the Chameleon model (See Suyu+2014) an elliptical truncated double isothermal profile + """Class of the Chameleon model (See Suyu+2014) an elliptical truncated double + isothermal profile.""" - """ - param_names = ['amp', 'ratio12', 'ratio13', 'w_c1', 'w_t1', 'e11', 'e21', - 'w_c2', 'w_t2', 'e12', 'e22', 'w_c3', 'w_t3', 'e13', - 'e23', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'ratio12': 0, 'ratio13': 0., - 'w_c1': 0, 'w_t1': 0, 'e11': -0.8, 'e21': -0.8, - 'w_c2': 0, 'w_t2': 0, 'e12': -0.8, 'e22': -0.8, - 'w_c3': 0, 'w_t3': 0, 'e13': -0.8, 'e23': -0.8, - 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'ratio12': 100, 'ratio13': 100, - 'w_c1': 100, 'w_t1': 100, 'e11': 0.8, 'e21': 0.8, - 'w_c2': 100, 'w_t2': 100, 'e12': 0.8, 'e22': 0.8, - 'w_c3': 100, 'w_t3': 100, 'e13': 0.8, 'e23': 0.8, - 'center_x': 100, 'center_y': 100} + param_names = [ + "amp", + "ratio12", + "ratio13", + "w_c1", + "w_t1", + "e11", + "e21", + "w_c2", + "w_t2", + "e12", + "e22", + "w_c3", + "w_t3", + "e13", + "e23", + "center_x", + "center_y", + ] + lower_limit_default = { + "amp": 0, + "ratio12": 0, + "ratio13": 0.0, + "w_c1": 0, + "w_t1": 0, + "e11": -0.8, + "e21": -0.8, + "w_c2": 0, + "w_t2": 0, + "e12": -0.8, + "e22": -0.8, + "w_c3": 0, + "w_t3": 0, + "e13": -0.8, + "e23": -0.8, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "ratio12": 100, + "ratio13": 100, + "w_c1": 100, + "w_t1": 100, + "e11": 0.8, + "e21": 0.8, + "w_c2": 100, + "w_t2": 100, + "e12": 0.8, + "e22": 0.8, + "w_c3": 100, + "w_t3": 100, + "e13": 0.8, + "e23": 0.8, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.chameleon = Chameleon() - def function(self, x, y, amp, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def function( + self, + x, + y, + amp, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param x: @@ -169,16 +327,41 @@ def function(self, x, y, amp, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2 :param center_y: :return: """ - amp1 = amp / (1. + 1./ratio12 + 1./ratio13) + amp1 = amp / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 - f_1 = self.chameleon.function(x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y) - f_2 = self.chameleon.function(x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y) - f_3 = self.chameleon.function(x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y) + f_1 = self.chameleon.function( + x, y, amp1, w_c1, w_t1, e11, e21, center_x, center_y + ) + f_2 = self.chameleon.function( + x, y, amp2, w_c2, w_t2, e12, e22, center_x, center_y + ) + f_3 = self.chameleon.function( + x, y, amp3, w_c3, w_t3, e13, e23, center_x, center_y + ) return f_1 + f_2 + f_3 - def light_3d(self, r, amp, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e12, e22, w_c3, w_t3, e13, e23, - center_x=0, center_y=0): + def light_3d( + self, + r, + amp, + ratio12, + ratio13, + w_c1, + w_t1, + e11, + e21, + w_c2, + w_t2, + e12, + e22, + w_c3, + w_t3, + e13, + e23, + center_x=0, + center_y=0, + ): """ :param r: 3d light radius @@ -201,7 +384,7 @@ def light_3d(self, r, amp, ratio12, ratio13, w_c1, w_t1, e11, e21, w_c2, w_t2, e :param center_y: :return: """ - amp1 = amp / (1. + 1./ratio12 + 1./ratio13) + amp1 = amp / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 f_1 = self.chameleon.light_3d(r, amp1, w_c1, w_t1, e11, e21, center_x, center_y) diff --git a/lenstronomy/LightModel/Profiles/ellipsoid.py b/lenstronomy/LightModel/Profiles/ellipsoid.py index d104f5c84..7c0da1511 100644 --- a/lenstronomy/LightModel/Profiles/ellipsoid.py +++ b/lenstronomy/LightModel/Profiles/ellipsoid.py @@ -1,20 +1,33 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to make an Ellipsoid import numpy as np from lenstronomy.Util import param_util -__all__ = ['Ellipsoid'] +__all__ = ["Ellipsoid"] class Ellipsoid(object): - """ - class for an universal surface brightness within an ellipsoid - """ + """Class for an universal surface brightness within an ellipsoid.""" + def __init__(self): - self.param_names = ['amp', 'radius', 'e1', 'e2', 'center_x', 'center_y'] - self.lower_limit_default = {'amp': 0, 'radius': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - self.upper_limit_default = {'amp': 1000, 'radius': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + self.param_names = ["amp", "radius", "e1", "e2", "center_x", "center_y"] + self.lower_limit_default = { + "amp": 0, + "radius": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + self.upper_limit_default = { + "amp": 1000, + "radius": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, amp, radius, e1, e2, center_x, center_y): """ @@ -29,22 +42,22 @@ def function(self, x, y, amp, radius, e1, e2, center_x, center_y): :param center_y: center :return: surface brightness """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) r2 = x_**2 + y_**2 flux = np.zeros_like(x) flux[r2 <= radius**2] = 1 - area = np.pi * radius ** 2 + area = np.pi * radius**2 return amp / area * flux def function(x, y, amp, sigma, center_x, center_y): - """ - returns torus (ellipse with constant surface brightness) profile - """ + """Returns torus (ellipse with constant surface brightness) profile.""" x_shift = x - center_x y_shift = y - center_y area = np.pi * sigma**2 dist = (x_shift / sigma) ** 2 + (y_shift / sigma) ** 2 torus = np.zeros_like(x) torus[dist <= 1] = 1 - return amp/area * torus + return amp / area * torus diff --git a/lenstronomy/LightModel/Profiles/gaussian.py b/lenstronomy/LightModel/Profiles/gaussian.py index ccafb8bb1..ac1b89521 100644 --- a/lenstronomy/LightModel/Profiles/gaussian.py +++ b/lenstronomy/LightModel/Profiles/gaussian.py @@ -2,25 +2,35 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Gaussian(object): - """ - class for Gaussian light profile - The two-dimensional Gaussian profile amplitude is defined such that the 2D integral leads to the 'amp' value. + """Class for Gaussian light profile The two-dimensional Gaussian profile amplitude + is defined such that the 2D integral leads to the 'amp' value. profile name in LightModel module: 'GAUSSIAN' """ + def __init__(self): - self.param_names = ['amp', 'sigma', 'center_x', 'center_y'] - self.lower_limit_default = {'amp': 0, 'sigma': 0, 'center_x': -100, 'center_y': -100} - self.upper_limit_default = {'amp': 1000, 'sigma': 100, 'center_x': 100, 'center_y': 100} + self.param_names = ["amp", "sigma", "center_x", "center_y"] + self.lower_limit_default = { + "amp": 0, + "sigma": 0, + "center_x": -100, + "center_y": -100, + } + self.upper_limit_default = { + "amp": 1000, + "sigma": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - surface brightness per angular unit + """Surface brightness per angular unit. :param x: coordinate on the sky :param y: coordinate on the sky @@ -32,11 +42,10 @@ def function(self, x, y, amp, sigma, center_x=0, center_y=0): """ c = amp / (2 * np.pi * sigma**2) r2 = (x - center_x) ** 2 / sigma**2 + (y - center_y) ** 2 / sigma**2 - return c * np.exp(-r2 / 2.) + return c * np.exp(-r2 / 2.0) def total_flux(self, amp, sigma, center_x=0, center_y=0): - """ - integrated flux of the profile + """Integrated flux of the profile. :param amp: amplitude, such that 2D integral leads to this value :param sigma: sigma of Gaussian in each direction @@ -47,8 +56,7 @@ def total_flux(self, amp, sigma, center_x=0, center_y=0): return amp def light_3d(self, r, amp, sigma): - """ - 3D brightness per angular volume element + """3D brightness per angular volume element. :param r: 3d distance from center of profile :param amp: amplitude, such that 2D integral leads to this value @@ -62,14 +70,28 @@ def light_3d(self, r, amp, sigma): @export class GaussianEllipse(object): - """ - class for Gaussian light profile with ellipticity + """Class for Gaussian light profile with ellipticity. profile name in LightModel module: 'GAUSSIAN_ELLIPSE' """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 1000, 'sigma': 100, 'e1': -0.5, 'e2': -0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 1000, + "sigma": 100, + "e1": -0.5, + "e2": -0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.gaussian = Gaussian() @@ -87,12 +109,15 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): :param center_y: center of profile :return: surface brightness at (x, y) """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) return self.gaussian.function(x_, y_, amp, sigma, center_x=0, center_y=0) - def total_flux(self, amp, sigma=None, e1=None, e2=None, center_x=None, center_y=None): - """ - total integrated flux of profile + def total_flux( + self, amp, sigma=None, e1=None, e2=None, center_x=None, center_y=None + ): + """Total integrated flux of profile. :param amp: amplitude, such that 2D integral leads to this value :param sigma: sigma of Gaussian in each direction @@ -105,8 +130,7 @@ def total_flux(self, amp, sigma=None, e1=None, e2=None, center_x=None, center_y= return self.gaussian.total_flux(amp, sigma, center_x, center_y) def light_3d(self, r, amp, sigma, e1=0, e2=0): - """ - 3D brightness per angular volume element + """3D brightness per angular volume element. :param r: 3d distance from center of profile :param amp: amplitude, such that 2D integral leads to this value @@ -120,21 +144,35 @@ def light_3d(self, r, amp, sigma, e1=0, e2=0): @export class MultiGaussian(object): - """ - class for elliptical pseudo Jaffe lens light (2d projected light/mass distribution + """Class for elliptical pseudo Jaffe lens light (2d projected light/mass + distribution. profile name in LightModel module: 'MULTI_GAUSSIAN' """ - param_names = ['amp', 'sigma', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 1000, 'sigma': 100, 'e1': -0.5, 'e2': -0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "sigma", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 1000, + "sigma": 100, + "e1": -0.5, + "e2": -0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.gaussian = Gaussian() def function(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - surface brightness per angular unit + """Surface brightness per angular unit. :param x: coordinate on the sky :param y: coordinate on the sky @@ -150,8 +188,7 @@ def function(self, x, y, amp, sigma, center_x=0, center_y=0): return f_ def total_flux(self, amp, sigma, center_x=0, center_y=0): - """ - total integrated flux of profile + """Total integrated flux of profile. :param amp: list of amplitudes of individual Gaussian profiles :param sigma: list of widths of individual Gaussian profiles @@ -165,8 +202,7 @@ def total_flux(self, amp, sigma, center_x=0, center_y=0): return flux def function_split(self, x, y, amp, sigma, center_x=0, center_y=0): - """ - split surface brightness in individual components + """Split surface brightness in individual components. :param x: coordinate on the sky :param y: coordinate on the sky @@ -178,12 +214,13 @@ def function_split(self, x, y, amp, sigma, center_x=0, center_y=0): """ f_list = [] for i in range(len(amp)): - f_list.append(self.gaussian.function(x, y, amp[i], sigma[i], center_x, center_y)) + f_list.append( + self.gaussian.function(x, y, amp[i], sigma[i], center_x, center_y) + ) return f_list def light_3d(self, r, amp, sigma): - """ - 3D brightness per angular volume element + """3D brightness per angular volume element. :param r: 3d distance from center of profile :param amp: list of amplitudes of individual Gaussian profiles @@ -198,21 +235,34 @@ def light_3d(self, r, amp, sigma): @export class MultiGaussianEllipse(object): - """ - class for elliptical multi Gaussian profile + """Class for elliptical multi Gaussian profile. profile name in LightModel module: 'MULTI_GAUSSIAN_ELLIPSE' """ - param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 1000, 'sigma': 100, 'e1': -0.5, 'e2': -0.5, 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "sigma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "sigma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 1000, + "sigma": 100, + "e1": -0.5, + "e2": -0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.gaussian = Gaussian() def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - surface brightness per angular unit + """Surface brightness per angular unit. :param x: coordinate on the sky :param y: coordinate on the sky @@ -224,16 +274,19 @@ def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): :param center_y: center of profile :return: surface brightness at (x, y) """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) f_ = np.zeros_like(x) for i in range(len(amp)): - f_ += self.gaussian.function(x_, y_, amp[i], sigma[i], center_x=0, center_y=0) + f_ += self.gaussian.function( + x_, y_, amp[i], sigma[i], center_x=0, center_y=0 + ) return f_ def total_flux(self, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - total integrated flux of profile + """Total integrated flux of profile. :param amp: list of amplitudes of individual Gaussian profiles :param sigma: list of widths of individual Gaussian profiles @@ -249,8 +302,7 @@ def total_flux(self, amp, sigma, e1, e2, center_x=0, center_y=0): return flux def function_split(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): - """ - split surface brightness in individual components + """Split surface brightness in individual components. :param x: coordinate on the sky :param y: coordinate on the sky @@ -262,15 +314,18 @@ def function_split(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0): :param center_y: center of profile :return: list of arrays of surface brightness """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) f_list = [] for i in range(len(amp)): - f_list.append(self.gaussian.function(x_, y_, amp[i], sigma[i], center_x=0, center_y=0)) + f_list.append( + self.gaussian.function(x_, y_, amp[i], sigma[i], center_x=0, center_y=0) + ) return f_list def light_3d(self, r, amp, sigma, e1=0, e2=0): - """ - 3D brightness per angular volume element + """3D brightness per angular volume element. :param r: 3d distance from center of profile :param amp: list of amplitudes of individual Gaussian profiles diff --git a/lenstronomy/LightModel/Profiles/hernquist.py b/lenstronomy/LightModel/Profiles/hernquist.py index ba8abecd5..7a33bb4a6 100644 --- a/lenstronomy/LightModel/Profiles/hernquist.py +++ b/lenstronomy/LightModel/Profiles/hernquist.py @@ -1,18 +1,28 @@ import lenstronomy.Util.param_util as param_util -__all__ = ['Hernquist', 'HernquistEllipse'] +__all__ = ["Hernquist", "HernquistEllipse"] class Hernquist(object): - """ - class for pseudo Jaffe lens light (2d projected light/mass distribution - """ + """Class for pseudo Jaffe lens light (2d projected light/mass distribution.""" + def __init__(self): from lenstronomy.LensModel.Profiles.hernquist import Hernquist as Hernquist_lens + self.lens = Hernquist_lens() - self.param_names = ['amp', 'Rs', 'center_x', 'center_y'] - self.lower_limit_default = {'amp': 0, 'Rs': 0, 'center_x': -100, 'center_y': -100} - self.upper_limit_default = {'amp': 100, 'Rs': 100, 'center_x': 100, 'center_y': 100} + self.param_names = ["amp", "Rs", "center_x", "center_y"] + self.lower_limit_default = { + "amp": 0, + "Rs": 0, + "center_x": -100, + "center_y": -100, + } + self.upper_limit_default = { + "amp": 100, + "Rs": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, amp, Rs, center_x=0, center_y=0): """ @@ -41,15 +51,30 @@ def light_3d(self, r, amp, Rs): class HernquistEllipse(object): - """ - class for elliptical pseudo Jaffe lens light (2d projected light/mass distribution - """ - param_names = ['amp', 'Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'Rs': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + """Class for elliptical pseudo Jaffe lens light (2d projected light/mass + distribution.""" + + param_names = ["amp", "Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "Rs": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): from lenstronomy.LensModel.Profiles.hernquist import Hernquist as Hernquist_lens + self.lens = Hernquist_lens() self.spherical = Hernquist() @@ -66,7 +91,9 @@ def function(self, x, y, amp, Rs, e1, e2, center_x=0, center_y=0): :param center_y: :return: """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) return self.spherical.function(x_, y_, amp, Rs) def light_3d(self, r, amp, Rs, e1=0, e2=0): diff --git a/lenstronomy/LightModel/Profiles/interpolation.py b/lenstronomy/LightModel/Profiles/interpolation.py index 2c438aba1..d1c6414ee 100644 --- a/lenstronomy/LightModel/Profiles/interpolation.py +++ b/lenstronomy/LightModel/Profiles/interpolation.py @@ -1,33 +1,46 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import scipy.interpolate import numpy as np import lenstronomy.Util.util as util -__all__ = ['Interpol'] +__all__ = ["Interpol"] class Interpol(object): + """Class which uses an interpolation of an image to compute the surface brightness. + + parameters are 'image': 2d numpy array of surface brightness per square arc second + (not integrated flux per pixel!) 'center_x': coordinate of center of image in + angular units (i.e. arc seconds) 'center_y': coordinate of center of image in + angular units (i.e. arc seconds) 'phi_G': rotation of image relative to the + rectangular ra-to-dec orientation 'scale': arcseconds per pixel of the image to be + interpolated """ - class which uses an interpolation of an image to compute the surface brightness - parameters are - 'image': 2d numpy array of surface brightness per square arc second (not integrated flux per pixel!) - 'center_x': coordinate of center of image in angular units (i.e. arc seconds) - 'center_y': coordinate of center of image in angular units (i.e. arc seconds) - 'phi_G': rotation of image relative to the rectangular ra-to-dec orientation - 'scale': arcseconds per pixel of the image to be interpolated - - """ - param_names = ['image', 'amp', 'center_x', 'center_y', 'phi_G', 'scale'] - lower_limit_default = {'amp': 0, 'center_x': -1000, 'center_y': -1000, 'scale': 0.000000001, 'phi_G': -np.pi} - upper_limit_default = {'amp': 1000000, 'center_x': 1000, 'center_y': 1000, 'scale': 10000000000, 'phi_G': np.pi} + param_names = ["image", "amp", "center_x", "center_y", "phi_G", "scale"] + lower_limit_default = { + "amp": 0, + "center_x": -1000, + "center_y": -1000, + "scale": 0.000000001, + "phi_G": -np.pi, + } + upper_limit_default = { + "amp": 1000000, + "center_x": 1000, + "center_y": 1000, + "scale": 10000000000, + "phi_G": np.pi, + } def __init__(self): pass - def function(self, x, y, image=None, amp=1, center_x=0, center_y=0, phi_G=0, scale=1): + def function( + self, x, y, image=None, amp=1, center_x=0, center_y=0, phi_G=0, scale=1 + ): """ :param x: x-coordinate to evaluate surface brightness @@ -46,7 +59,7 @@ def function(self, x, y, image=None, amp=1, center_x=0, center_y=0, phi_G=0, sca return amp * self.image_interp(x_, y_, image) def image_interp(self, x, y, image): - if not hasattr(self, '_image_interp'): + if not hasattr(self, "_image_interp"): # Setup the interpolator. # Note that 'x' and 'y' in this block only refer to first and second # image array axes. Outside this block it is more complicated. @@ -56,7 +69,9 @@ def image_interp(self, x, y, image): image_bounds[1:-1, 1:-1] = image x_grid = np.linspace(start=-(nx0 - 1) / 2, stop=(nx0 - 1) / 2, num=nx0) y_grid = np.linspace(start=-(ny0 - 1) / 2, stop=(ny0 - 1) / 2, num=ny0) - self._image_interp = scipy.interpolate.RectBivariateSpline(x_grid, y_grid, image_bounds, kx=1, ky=1, s=0) + self._image_interp = scipy.interpolate.RectBivariateSpline( + x_grid, y_grid, image_bounds, kx=1, ky=1, s=0 + ) # y and x must be flipped in call to interpolator # (try reversing, the unit tests will fail) @@ -64,14 +79,14 @@ def image_interp(self, x, y, image): @staticmethod def total_flux(image, scale, amp=1, center_x=0, center_y=0, phi_G=0): - """ - sums up all the image surface brightness (image pixels defined in surface brightness at the coordinate of the - pixel) times pixel area + """Sums up all the image surface brightness (image pixels defined in surface + brightness at the coordinate of the pixel) times pixel area. - :param image: pixelized surface brightness used to interpolate in units of surface brightness - (flux per square arc seconds, not flux per pixel!) + :param image: pixelized surface brightness used to interpolate in units of + surface brightness (flux per square arc seconds, not flux per pixel!) :param scale: scale of the pixel in units of angle - :param amp: linear scaling parameter of the surface brightness multiplicative with the initial image + :param amp: linear scaling parameter of the surface brightness multiplicative + with the initial image :param center_x: center of image in angular coordinates :param center_y: center of image in angular coordinates :param phi_G: rotation angle @@ -99,6 +114,6 @@ def coord2image_pixel(ra, dec, center_x, center_y, phi_G, scale): return x, y def delete_cache(self): - """delete the cached interpolated image""" - if hasattr(self, '_image_interp'): + """Delete the cached interpolated image.""" + if hasattr(self, "_image_interp"): del self._image_interp diff --git a/lenstronomy/LightModel/Profiles/moffat.py b/lenstronomy/LightModel/Profiles/moffat.py index 62e0c8f94..e31d3622e 100644 --- a/lenstronomy/LightModel/Profiles/moffat.py +++ b/lenstronomy/LightModel/Profiles/moffat.py @@ -1,29 +1,39 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to make a Moffat profile -__all__ = ['Moffat'] +__all__ = ["Moffat"] class Moffat(object): - """ - this class contains functions to evaluate a Moffat surface brightness profile + """This class contains functions to evaluate a Moffat surface brightness profile. .. math:: I(r) = I_0 * (1 + (r/\\alpha)^2)^{-\\beta} with :math:`I_0 = amp`. - """ + def __init__(self): - self.param_names = ['amp', 'alpha', 'beta', 'center_x', 'center_y'] - self.lower_limit_default = {'amp': 0, 'alpha': 0, 'beta': 0, 'center_x': -100, 'center_y': -100} - self.upper_limit_default = {'amp': 100, 'alpha': 10, 'beta': 10, 'center_x': 100, 'center_y': 100} + self.param_names = ["amp", "alpha", "beta", "center_x", "center_y"] + self.lower_limit_default = { + "amp": 0, + "alpha": 0, + "beta": 0, + "center_x": -100, + "center_y": -100, + } + self.upper_limit_default = { + "amp": 100, + "alpha": 10, + "beta": 10, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, amp, alpha, beta, center_x=0, center_y=0): - """ - 2D Moffat profile + """2D Moffat profile. :param x: x-position (angle) :param y: y-position (angle) @@ -37,4 +47,4 @@ def function(self, x, y, amp, alpha, beta, center_x=0, center_y=0): x_shift = x - center_x y_shift = y - center_y - return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta) + return amp * (1.0 + (x_shift**2 + y_shift**2) / alpha**2) ** (-beta) diff --git a/lenstronomy/LightModel/Profiles/nie.py b/lenstronomy/LightModel/Profiles/nie.py index 0ff4bf304..aabcdcdfb 100644 --- a/lenstronomy/LightModel/Profiles/nie.py +++ b/lenstronomy/LightModel/Profiles/nie.py @@ -2,18 +2,31 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.LightModel.Profiles.profile_base import LightProfileBase -__all__ = ['NIE'] +__all__ = ["NIE"] class NIE(LightProfileBase): - """ - non-divergent isothermal ellipse (projected) - This is effectively the convergence profile of the NIE lens model with an amplitude 'amp' rather than an Einstein - radius 'theta_E' - """ - param_names = ['amp', 'e1', 'e2', 's_scale', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'e1': -0.5, 'e2': -0.5, 's_scale': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'e1': 0.5, 'e2': 0.5, 's_scale': 100, 'center_x': 100, 'center_y': 100} + """Non-divergent isothermal ellipse (projected) This is effectively the convergence + profile of the NIE lens model with an amplitude 'amp' rather than an Einstein radius + 'theta_E'.""" + + param_names = ["amp", "e1", "e2", "s_scale", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "e1": -0.5, + "e2": -0.5, + "s_scale": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "e1": 0.5, + "e2": 0.5, + "s_scale": 100, + "center_x": 100, + "center_y": 100, + } def function(self, x, y, amp, e1, e2, s_scale, center_x=0, center_y=0): """ @@ -30,13 +43,14 @@ def function(self, x, y, amp, e1, e2, s_scale, center_x=0, center_y=0): """ # phi_G, q = param_util.ellipticity2phi_q(e1, e2) # s = s_scale * np.sqrt((1 + q ** 2) / (2 * q ** 2)) - x__, y__ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) - f_ = amp / 2. * (s_scale**2 + x__ ** 2 + y__ ** 2) ** (-1. / 2) + x__, y__ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) + f_ = amp / 2.0 * (s_scale**2 + x__**2 + y__**2) ** (-1.0 / 2) return f_ def light_3d(self, r, amp, e1, e2, s_scale, center_x=0, center_y=0): - """ - 3d light distribution (in spherical regime) + """3d light distribution (in spherical regime) :param r: 3d radius :param amp: surface brightness normalization @@ -52,8 +66,8 @@ def light_3d(self, r, amp, e1, e2, s_scale, center_x=0, center_y=0): @staticmethod def _amp2rho(amp): - """ - converts surface brightness normalization 'amp' into 3d density normalization rho + """Converts surface brightness normalization 'amp' into 3d density normalization + rho. :param amp: :return: rho diff --git a/lenstronomy/LightModel/Profiles/p_jaffe.py b/lenstronomy/LightModel/Profiles/p_jaffe.py index 89ad4ea11..c7910f411 100644 --- a/lenstronomy/LightModel/Profiles/p_jaffe.py +++ b/lenstronomy/LightModel/Profiles/p_jaffe.py @@ -1,18 +1,30 @@ import lenstronomy.Util.param_util as param_util -__all__ = ['PJaffe', 'PJaffeEllipse'] +__all__ = ["PJaffe", "PJaffeEllipse"] class PJaffe(object): - """ - class for pseudo Jaffe lens light (2d projected light/mass distribution) - """ - param_names = ['amp', 'Ra', 'Rs', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'Ra': 0, 'Rs': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'Ra': 100, 'Rs': 100, 'center_x': 100, 'center_y': 100} + """Class for pseudo Jaffe lens light (2d projected light/mass distribution)""" + + param_names = ["amp", "Ra", "Rs", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "Ra": 0, + "Rs": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "Ra": 100, + "Rs": 100, + "center_x": 100, + "center_y": 100, + } def __init__(self): from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe as PJaffe_lens + self.lens = PJaffe_lens() def function(self, x, y, amp, Ra, Rs, center_x=0, center_y=0): @@ -44,15 +56,31 @@ def light_3d(self, r, amp, Ra, Rs): class PJaffeEllipse(object): - """ - calss for elliptical pseudo Jaffe lens light - """ - param_names = ['amp', 'Ra', 'Rs', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'Ra': 0, 'Rs': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'Ra': 100, 'Rs': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + """Calss for elliptical pseudo Jaffe lens light.""" + + param_names = ["amp", "Ra", "Rs", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "Ra": 0, + "Rs": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "Ra": 100, + "Rs": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe as PJaffe_lens + self.lens = PJaffe_lens() self.spherical = PJaffe() @@ -68,7 +96,9 @@ def function(self, x, y, amp, Ra, Rs, e1, e2, center_x=0, center_y=0): :param center_y: :return: """ - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x, center_y + ) return self.spherical.function(x_, y_, amp, Ra, Rs) def light_3d(self, r, amp, Ra, Rs, e1=0, e2=0): diff --git a/lenstronomy/LightModel/Profiles/power_law.py b/lenstronomy/LightModel/Profiles/power_law.py index 291a3034e..8f08c1ded 100644 --- a/lenstronomy/LightModel/Profiles/power_law.py +++ b/lenstronomy/LightModel/Profiles/power_law.py @@ -3,17 +3,29 @@ import numpy as np import scipy.special as special -__all__ = ['PowerLaw'] +__all__ = ["PowerLaw"] class PowerLaw(object): - """ - class for power-law elliptical light distribution + """Class for power-law elliptical light distribution.""" - """ - param_names = ['amp', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'gamma': 1, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'gamma': 3, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} + param_names = ["amp", "gamma", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "gamma": 1, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "gamma": 3, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } def __init__(self): self.lens = SPP() @@ -31,11 +43,13 @@ def function(self, x, y, amp, gamma, e1, e2, center_x=0, center_y=0): :param center_y: center :return: projected flux """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x, center_y + ) _s = 0.0001 - a = x_ ** 2 + y_ ** 2 + _s**2 + a = x_**2 + y_**2 + _s**2 - sigma = amp * a ** ((1. - gamma)/2.) + sigma = amp * a ** ((1.0 - gamma) / 2.0) return sigma def light_3d(self, r, amp, gamma, e1=0, e2=0): @@ -49,7 +63,7 @@ def light_3d(self, r, amp, gamma, e1=0, e2=0): :return: """ rho0 = self._amp2rho(amp, gamma) - rho = rho0 / r ** gamma + rho = rho0 / r**gamma return rho @staticmethod @@ -60,5 +74,9 @@ def _amp2rho(amp, gamma): :param gamma: :return: """ - factor = np.sqrt(np.pi) * special.gamma(1./2*(-1+gamma))/special.gamma(gamma/2.) + factor = ( + np.sqrt(np.pi) + * special.gamma(1.0 / 2 * (-1 + gamma)) + / special.gamma(gamma / 2.0) + ) return amp / factor diff --git a/lenstronomy/LightModel/Profiles/profile_base.py b/lenstronomy/LightModel/Profiles/profile_base.py index b3da6f6a8..c6cf0186e 100644 --- a/lenstronomy/LightModel/Profiles/profile_base.py +++ b/lenstronomy/LightModel/Profiles/profile_base.py @@ -1,11 +1,9 @@ - -__all__ = ['LightProfileBase'] +__all__ = ["LightProfileBase"] class LightProfileBase(object): - """ - base class of all light profiles - """ + """Base class of all light profiles.""" + def __init__(self): pass @@ -17,7 +15,7 @@ def function(self, *args, **kwargs): :param kwargs: keyword arguments of profile :return: surface brightness, raise as definition is not defined """ - raise ValueError('function definition not defined in the light profile.') + raise ValueError("function definition not defined in the light profile.") def light_3d(self, *args, **kwargs): """ @@ -26,4 +24,4 @@ def light_3d(self, *args, **kwargs): :param kwargs: keyword arguments of profile :return: 3d light profile, raise as definition is not defined """ - raise ValueError('light_3d definition not defined in the light profile.') + raise ValueError("light_3d definition not defined in the light profile.") diff --git a/lenstronomy/LightModel/Profiles/sersic.py b/lenstronomy/LightModel/Profiles/sersic.py index ab840274d..e7d9863b2 100644 --- a/lenstronomy/LightModel/Profiles/sersic.py +++ b/lenstronomy/LightModel/Profiles/sersic.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class to make a Sersic profile @@ -6,13 +6,13 @@ from lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Sersic(SersicUtil): - """ - this class contains functions to evaluate a spherical Sersic function + """This class contains functions to evaluate a spherical Sersic function. .. math:: I(R) = I_0 \\exp \\left[ -b_n (R/R_{\\rm Sersic})^{\\frac{1}{n}}\\right] @@ -20,14 +20,27 @@ class Sersic(SersicUtil): with :math:`I_0 = amp` and with :math:`b_{n}\\approx 1.999n-0.327` - """ - param_names = ['amp', 'R_sersic', 'n_sersic', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'R_sersic': 100, 'n_sersic': 8, 'center_x': 100, 'center_y': 100} - - def function(self, x, y, amp, R_sersic, n_sersic, center_x=0, center_y=0, max_R_frac=1000.0): + param_names = ["amp", "R_sersic", "n_sersic", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "R_sersic": 100, + "n_sersic": 8, + "center_x": 100, + "center_y": 100, + } + + def function( + self, x, y, amp, R_sersic, n_sersic, center_x=0, center_y=0, max_R_frac=1000.0 + ): """ :param x: @@ -40,15 +53,16 @@ def function(self, x, y, amp, R_sersic, n_sersic, center_x=0, center_y=0, max_R_ :param max_R_frac: maximum window outside which the mass is zeroed, in units of R_sersic (float) :return: Sersic profile value at (x, y) """ - R = self.get_distance_from_center(x, y, e1=0, e2=0, center_x=center_x, center_y=center_y) + R = self.get_distance_from_center( + x, y, e1=0, e2=0, center_x=center_x, center_y=center_y + ) result = self._r_sersic(R, R_sersic, n_sersic, max_R_frac) return amp * result @export class SersicElliptic(SersicUtil): - """ - this class contains functions to evaluate an elliptical Sersic function + """This class contains functions to evaluate an elliptical Sersic function. .. math:: @@ -58,15 +72,41 @@ class SersicElliptic(SersicUtil): :math:`R = \\sqrt{q \\theta^2_x + \\theta^2_y/q}` and with :math:`b_{n}\\approx 1.999n-0.327` - """ - param_names = ['amp', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, - 'center_y': -100} - upper_limit_default = {'amp': 100, 'R_sersic': 100, 'n_sersic': 8, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, - 'center_y': 100} - def function(self, x, y, amp, R_sersic, n_sersic, e1, e2, center_x=0, center_y=0, max_R_frac=1000.0): + param_names = ["amp", "R_sersic", "n_sersic", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "R_sersic": 100, + "n_sersic": 8, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + + def function( + self, + x, + y, + amp, + R_sersic, + n_sersic, + e1, + e2, + center_x=0, + center_y=0, + max_R_frac=1000.0, + ): """ :param x: @@ -90,8 +130,8 @@ def function(self, x, y, amp, R_sersic, n_sersic, e1, e2, center_x=0, center_y=0 @export class CoreSersic(SersicUtil): - """ - this class contains the Core-Sersic function introduced by e.g. Trujillo et al. 2004 + """This class contains the Core-Sersic function introduced by e.g. Trujillo et al. + 2004. .. math:: @@ -104,16 +144,56 @@ class CoreSersic(SersicUtil): I' = I_b 2^{-\\gamma/ \\alpha} \\exp \\left[b_n 2^{1 / (n\\alpha)} (R_b/R_e)^{1/n} \\right] where :math:`I_b` is the intensity at the break radius and :math:`R = \\sqrt{q \\theta^2_x + \\theta^2_y/q}`. - """ - param_names = ['amp', 'R_sersic', 'Rb', 'n_sersic', 'gamma', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'Rb': 0, 'n_sersic': 0.5, 'gamma': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, - 'center_y': -100} - upper_limit_default = {'amp': 100, 'Rb': 100, 'n_sersic': 8, 'gamma': 10, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, - 'center_y': 100} - - def function(self, x, y, amp, R_sersic, Rb, n_sersic, gamma, e1, e2, center_x=0, center_y=0, alpha=3.0, - max_R_frac=1000.0): + + param_names = [ + "amp", + "R_sersic", + "Rb", + "n_sersic", + "gamma", + "e1", + "e2", + "center_x", + "center_y", + ] + lower_limit_default = { + "amp": 0, + "Rb": 0, + "n_sersic": 0.5, + "gamma": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "Rb": 100, + "n_sersic": 8, + "gamma": 10, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + + def function( + self, + x, + y, + amp, + R_sersic, + Rb, + n_sersic, + gamma, + e1, + e2, + center_x=0, + center_y=0, + alpha=3.0, + max_R_frac=1000.0, + ): """ :param x: :param y: @@ -134,6 +214,16 @@ def function(self, x, y, amp, R_sersic, Rb, n_sersic, gamma, e1, e2, center_x=0, R_ = self.get_distance_from_center(x, y, e1, e2, center_x, center_y) R = self._R_stable(R_) bn = self.b_n(n_sersic) - result = amp * (1 + (Rb / R) ** alpha) ** (gamma / alpha) * \ - np.exp(-bn * (((R ** alpha + Rb ** alpha) / R_sersic ** alpha) ** (1. / (alpha * n_sersic)) - 1.)) + result = ( + amp + * (1 + (Rb / R) ** alpha) ** (gamma / alpha) + * np.exp( + -bn + * ( + ((R**alpha + Rb**alpha) / R_sersic**alpha) + ** (1.0 / (alpha * n_sersic)) + - 1.0 + ) + ) + ) return np.nan_to_num(result) diff --git a/lenstronomy/LightModel/Profiles/shapelets.py b/lenstronomy/LightModel/Profiles/shapelets.py index 746f9f5fe..6887efb16 100644 --- a/lenstronomy/LightModel/Profiles/shapelets.py +++ b/lenstronomy/LightModel/Profiles/shapelets.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.polynomial.hermite as hermite @@ -7,13 +7,13 @@ import lenstronomy.Util.util as util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Shapelets(object): - """ - class for 2d cartesian Shapelets. + """Class for 2d cartesian Shapelets. Sources: Refregier 2003: Shapelets: I. A Method for Image Analysis https://arxiv.org/abs/astro-ph/0105178 @@ -42,22 +42,42 @@ class for 2d cartesian Shapelets. .. math:: B_{\\bf n}({\\bf x};\\beta) \\equiv \\beta^{-1/2} \\phi_{\\bf n}(\\beta^{-1}{\\bf x}). - """ - param_names = ['amp', 'beta', 'n1', 'n2', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'beta': 0.01, 'n1': 0, 'n2': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'beta': 100, 'n1': 150, 'n2': 150, 'center_x': 100, 'center_y': 100} - def __init__(self, interpolation=False, precalc=False, stable_cut=True, cut_scale=5): - """ - load interpolation of the Hermite polynomials in a range [-30,30] in order n<= 150 - - :param interpolation: boolean; if True, uses interpolated pre-calculated shapelets in the evaluation - :param precalc: boolean; if True interprets as input (x, y) as pre-calculated normalized shapelets + param_names = ["amp", "beta", "n1", "n2", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "beta": 0.01, + "n1": 0, + "n2": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "beta": 100, + "n1": 150, + "n2": 150, + "center_x": 100, + "center_y": 100, + } + + def __init__( + self, interpolation=False, precalc=False, stable_cut=True, cut_scale=5 + ): + """Load interpolation of the Hermite polynomials in a range [-30,30] in order + n<= 150. + + :param interpolation: boolean; if True, uses interpolated pre-calculated + shapelets in the evaluation + :param precalc: boolean; if True interprets as input (x, y) as pre-calculated + normalized shapelets :param stable_cut: boolean; if True, sets the values outside of - :math:`\\sqrt\\left(n_{\\rm max} + 1 \\right) \\beta s_{\\rm cut scale} = 0`. - :param cut_scale: float, scaling parameter where to cut the shapelets. This is for numerical reasons such that - the polynomials in the Hermite function do not get unstable. + :math:`\\sqrt\\left(n_{\\rm max} + 1 \\right) \\beta s_{\\rm cut scale} = + 0`. + :param cut_scale: float, scaling parameter where to cut the shapelets. This is + for numerical reasons such that the polynomials in the Hermite function do + not get unstable. """ self._interpolation = interpolation @@ -69,7 +89,7 @@ def __init__(self, interpolation=False, precalc=False, stable_cut=True, cut_scal self.H_interp = [[] for _ in range(0, n_order)] self.x_grid = np.linspace(-50, 50, 6000) for k in range(0, n_order): - n_array = np.zeros(k+1) + n_array = np.zeros(k + 1) n_array[k] = 1 values = self.hermval(self.x_grid, n_array) self.H_interp[k] = values @@ -101,8 +121,7 @@ def hermval(self, x, n_array, tensor=True): return out def function(self, x, y, amp, beta, n1, n2, center_x, center_y): - """ - 2d cartesian shapelet + """2d cartesian shapelet. :param x: x-coordinate :param y: y-coordinate @@ -119,40 +138,42 @@ def function(self, x, y, amp, beta, n1, n2, center_x, center_y): return amp * x[n1] * y[n2] # / beta x_ = x - center_x y_ = y - center_y - return np.nan_to_num(amp * self.phi_n(n1, x_/beta) * self.phi_n(n2, y_/beta)) # /beta + return np.nan_to_num( + amp * self.phi_n(n1, x_ / beta) * self.phi_n(n2, y_ / beta) + ) # /beta def H_n(self, n, x): - """ - constructs the Hermite polynomial of order n at position x (dimensionless) + """Constructs the Hermite polynomial of order n at position x (dimensionless) :param n: The n'the basis function. :param x: 1-dim position (dimensionless) :type x: float or numpy array. - :returns: array-- H_n(x). + :returns: array-- H_n(x). """ if not self._interpolation: - n_array = np.zeros(n+1) + n_array = np.zeros(n + 1) n_array[n] = 1 - return self.hermval(x, n_array, tensor=False) # attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) + return self.hermval( + x, n_array, tensor=False + ) # attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) else: return np.interp(x, self.x_grid, self.H_interp[n]) def phi_n(self, n, x): - """ - constructs the 1-dim basis function (formula (1) in Refregier et al. 2001) + """Constructs the 1-dim basis function (formula (1) in Refregier et al. 2001) :param n: The n'the basis function. :type n: int. :param x: 1-dim position (dimensionless) :type x: float or numpy array. - :returns: array-- phi_n(x). + :returns: array-- phi_n(x). """ - prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) - return prefactor*self.H_n(n, x)*np.exp(-x**2/2.) + prefactor = 1.0 / np.sqrt(2**n * np.sqrt(np.pi) * math.factorial(n)) + return prefactor * self.H_n(n, x) * np.exp(-(x**2) / 2.0) def pre_calc(self, x, y, beta, n_order, center_x, center_y): - """ - calculates the H_n(x) and H_n(y) for a given x-array and y-array for the full order in the polynomials + """Calculates the H_n(x) and H_n(y) for a given x-array and y-array for the full + order in the polynomials. :param x: x-coordinates (numpy array) :param y: 7-coordinates (numpy array) @@ -167,27 +188,27 @@ def pre_calc(self, x, y, beta, n_order, center_x, center_y): n = len(np.atleast_1d(x)) H_x = np.empty((n_order + 1, n)) H_y = np.empty((n_order + 1, n)) - exp_x = np.exp(-(x_ / beta) ** 2 / 2.) - exp_y = np.exp(-(y_ / beta) ** 2 / 2.) + exp_x = np.exp(-((x_ / beta) ** 2) / 2.0) + exp_y = np.exp(-((y_ / beta) ** 2) / 2.0) if n_order > 170: - raise ValueError('polynomial order to large', n_order) - for n in range(0, n_order+1): - prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) - n_array = np.zeros(n+1) + raise ValueError("polynomial order to large", n_order) + for n in range(0, n_order + 1): + prefactor = 1.0 / np.sqrt(2**n * np.sqrt(np.pi) * math.factorial(n)) + n_array = np.zeros(n + 1) n_array[n] = 1 - H_x[n] = self.hermval(x_/beta, n_array, tensor=False) * prefactor * exp_x - H_y[n] = self.hermval(y_/beta, n_array, tensor=False) * prefactor * exp_y + H_x[n] = self.hermval(x_ / beta, n_array, tensor=False) * prefactor * exp_x + H_y[n] = self.hermval(y_ / beta, n_array, tensor=False) * prefactor * exp_y return H_x, H_y @export class ShapeletSet(object): - """ - class to operate on entire shapelet set limited by a maximal polynomial order n_max, such that n1 + n2 <= n_max - """ - param_names = ['amp', 'n_max', 'beta', 'center_x', 'center_y'] - lower_limit_default = {'beta': 0.01, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'beta': 100, 'center_x': 100, 'center_y': 100} + """Class to operate on entire shapelet set limited by a maximal polynomial order + n_max, such that n1 + n2 <= n_max.""" + + param_names = ["amp", "n_max", "beta", "center_x", "center_y"] + lower_limit_default = {"beta": 0.01, "center_x": -100, "center_y": -100} + upper_limit_default = {"beta": 100, "center_x": 100, "center_y": 100} def __init__(self): self.shapelets = Shapelets(precalc=True) @@ -204,13 +225,20 @@ def function(self, x, y, amp, n_max, beta, center_x=0, center_y=0): :param center_y: shapelet center :return: surface brightness of combined shapelet set """ - num_param = int((n_max+1)*(n_max+2)/2) + num_param = int((n_max + 1) * (n_max + 2) / 2) f_ = np.zeros(len(np.atleast_1d(x))) n1 = 0 n2 = 0 H_x, H_y = self.shapelets.pre_calc(x, y, beta, n_max, center_x, center_y) for i in range(num_param): - kwargs_source_shapelet = {'center_x': center_x, 'center_y': center_y, 'n1': n1, 'n2': n2, 'beta': beta, 'amp': amp[i]} + kwargs_source_shapelet = { + "center_x": center_x, + "center_y": center_y, + "n1": n1, + "n2": n2, + "beta": beta, + "amp": amp[i], + } out = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet) f_ += out if n1 == 0: @@ -226,8 +254,7 @@ def function(self, x, y, amp, n_max, beta, center_x=0, center_y=0): return np.nan_to_num(f_) def function_split(self, x, y, amp, n_max, beta, center_x=0, center_y=0): - """ - splits shapelet set in list of individual shapelet basis function responses + """Splits shapelet set in list of individual shapelet basis function responses. :param x: x-coordinates :param y: y-coordinates @@ -238,13 +265,20 @@ def function_split(self, x, y, amp, n_max, beta, center_x=0, center_y=0): :param center_y: shapelet center :return: list of individual shapelet basis function responses """ - num_param = int((n_max+1)*(n_max+2)/2) + num_param = int((n_max + 1) * (n_max + 2) / 2) A = [] n1 = 0 n2 = 0 H_x, H_y = self.shapelets.pre_calc(x, y, beta, n_max, center_x, center_y) for i in range(num_param): - kwargs_source_shapelet = {'center_x': center_x, 'center_y': center_y, 'n1': n1, 'n2': n2, 'beta': beta, 'amp': amp[i]} + kwargs_source_shapelet = { + "center_x": center_x, + "center_y": center_y, + "n1": n1, + "n2": n2, + "beta": beta, + "amp": amp[i], + } A.append(self.shapelets.function(H_x, H_y, **kwargs_source_shapelet)) if n1 == 0: n1 = n2 + 1 @@ -254,7 +288,9 @@ def function_split(self, x, y, amp, n_max, beta, center_x=0, center_y=0): n2 += 1 return A - def shapelet_basis_2d(self, num_order, beta, numPix, deltaPix=1, center_x=0, center_y=0): + def shapelet_basis_2d( + self, num_order, beta, numPix, deltaPix=1, center_x=0, center_y=0 + ): """ :param num_order: max shapelet order @@ -262,14 +298,23 @@ def shapelet_basis_2d(self, num_order, beta, numPix, deltaPix=1, center_x=0, cen :param numPix: number of pixel of the grid :return: list of shapelets drawn on pixel grid, centered. """ - num_param = int((num_order+2)*(num_order+1)/2) + num_param = int((num_order + 2) * (num_order + 1) / 2) kernel_list = [] x_grid, y_grid = util.make_grid(numPix, deltapix=deltaPix, subgrid_res=1) n1 = 0 n2 = 0 - H_x, H_y = self.shapelets.pre_calc(x_grid, y_grid, beta, num_order, center_x=center_x, center_y=center_y) + H_x, H_y = self.shapelets.pre_calc( + x_grid, y_grid, beta, num_order, center_x=center_x, center_y=center_y + ) for i in range(num_param): - kwargs_source_shapelet = {'center_x': 0, 'center_y': 0, 'n1': n1, 'n2': n2, 'beta': beta, 'amp': 1} + kwargs_source_shapelet = { + "center_x": 0, + "center_y": 0, + "n1": n1, + "n2": n2, + "beta": beta, + "amp": 1, + } kernel = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet) kernel = util.array2image(kernel) kernel_list.append(kernel) @@ -282,8 +327,8 @@ def shapelet_basis_2d(self, num_order, beta, numPix, deltaPix=1, center_x=0, cen return kernel_list def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y=0): - """ - decomposes an image into the shapelet coefficients in same order as for the function call + """Decomposes an image into the shapelet coefficients in same order as for the + function call. :param image: :param x: @@ -294,16 +339,23 @@ def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y :param center_y: :return: """ - num_param = int((n_max+1)*(n_max+2)/2) + num_param = int((n_max + 1) * (n_max + 2) / 2) param_list = np.zeros(num_param) - amp_norm = 1./beta**2*deltaPix**2 + amp_norm = 1.0 / beta**2 * deltaPix**2 n1 = 0 n2 = 0 H_x, H_y = self.shapelets.pre_calc(x, y, beta, n_max, center_x, center_y) for i in range(num_param): - kwargs_source_shapelet = {'center_x': center_x, 'center_y': center_y, 'n1': n1, 'n2': n2, 'beta': beta, 'amp': amp_norm} + kwargs_source_shapelet = { + "center_x": center_x, + "center_y": center_y, + "n1": n1, + "n2": n2, + "beta": beta, + "amp": amp_norm, + } base = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet) - param = np.sum(image*base) + param = np.sum(image * base) param_list[i] = param if n1 == 0: n1 = n2 + 1 diff --git a/lenstronomy/LightModel/Profiles/shapelets_ellipse.py b/lenstronomy/LightModel/Profiles/shapelets_ellipse.py index 4c5977a28..0bb4f59ff 100644 --- a/lenstronomy/LightModel/Profiles/shapelets_ellipse.py +++ b/lenstronomy/LightModel/Profiles/shapelets_ellipse.py @@ -3,13 +3,23 @@ class ShapeletSetEllipse(object): - """ - cartesian shapelets with elliptical axis ratios + """Cartesian shapelets with elliptical axis ratios.""" - """ - param_names = ['amp', 'n_max', 'beta', 'e1', 'e2', 'center_x', 'center_y'] - lower_limit_default = {'beta': 0.01, 'e1': -0.6, 'e2': -0.6, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'beta': 100, 'e1': 0.6, 'e2': 0.6, 'center_x': 100, 'center_y': 100} + param_names = ["amp", "n_max", "beta", "e1", "e2", "center_x", "center_y"] + lower_limit_default = { + "beta": 0.01, + "e1": -0.6, + "e2": -0.6, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "beta": 100, + "e1": 0.6, + "e2": 0.6, + "center_x": 100, + "center_y": 100, + } def __init__(self): self._shapelet_set = ShapeletSet() @@ -28,12 +38,13 @@ def function(self, x, y, amp, n_max, beta, e1, e2, center_x=0, center_y=0): :param center_y: shapelet center y :return: surface brightness of combined shapelet set """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=0, center_y=0) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=0, center_y=0 + ) return self._shapelet_set.function(x_, y_, amp, n_max, beta, center_x, center_y) def function_split(self, x, y, amp, n_max, beta, e1, e2, center_x=0, center_y=0): - """ - splits shapelet set in list of individual shapelet basis function responses + """Splits shapelet set in list of individual shapelet basis function responses. :param x: x-coordinates :param y: y-coordinates @@ -46,5 +57,9 @@ def function_split(self, x, y, amp, n_max, beta, e1, e2, center_x=0, center_y=0) :param center_y: shapelet center y :return: list of individual shapelet basis function responses """ - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=0, center_y=0) - return self._shapelet_set.function_split(x_, y_, amp, n_max, beta, center_x, center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=0, center_y=0 + ) + return self._shapelet_set.function_split( + x_, y_, amp, n_max, beta, center_x, center_y + ) diff --git a/lenstronomy/LightModel/Profiles/shapelets_polar.py b/lenstronomy/LightModel/Profiles/shapelets_polar.py index 1482cc80c..f6604f49f 100644 --- a/lenstronomy/LightModel/Profiles/shapelets_polar.py +++ b/lenstronomy/LightModel/Profiles/shapelets_polar.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import math @@ -7,24 +7,36 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class ShapeletsPolar(object): - """ - 2D polar Shapelets, see Massey & Refregier 2005 - """ - param_names = ['amp', 'beta', 'n', 'm', 'center_x', 'center_y'] - param_names_latex = {r'$I_0$', r'$\beta$', r'$n$', r'$m$', r'$x_0$', r'$y_0$'} - lower_limit_default = {'amp': 0, 'beta': 0, 'n': 0, 'm': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'beta': 100, 'n': 150, 'm': 150, 'center_x': 100, 'center_y': 100} + """2D polar Shapelets, see Massey & Refregier 2005.""" + + param_names = ["amp", "beta", "n", "m", "center_x", "center_y"] + param_names_latex = {r"$I_0$", r"$\beta$", r"$n$", r"$m$", r"$x_0$", r"$y_0$"} + lower_limit_default = { + "amp": 0, + "beta": 0, + "n": 0, + "m": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "beta": 100, + "n": 150, + "m": 150, + "center_x": 100, + "center_y": 100, + } def __init__(self): - """ - load interpolation of the Hermite polynomials in a range [-30,30] in order n<= 150 - :return: - """ + """Load interpolation of the Hermite polynomials in a range [-30,30] in order + n<= 150 :return:""" pass def function(self, x, y, amp, beta, n, m, complex_bool, center_x, center_y): @@ -58,17 +70,23 @@ def _chi_n_m(r, beta, n, m): :return: value of function (8) in Massey & Refregier, complex numbers """ m_abs = int(abs(m)) - p = int((n - m_abs)/2) - p2 = int((n + m_abs)/2) + p = int((n - m_abs) / 2) + p2 = int((n + m_abs) / 2) if p % 2 == 0: # if p is even prefac = 1 else: prefac = -1 - prefactor = prefac/beta**(m_abs + 1)*np.sqrt(math.factorial(p)/(np.pi*math.factorial(p2))) - poly = scipy.special.genlaguerre(n=p, alpha=m_abs) # lower part, upper part of definition in Massey & Refregier - r_ = (r/beta)**2 + prefactor = ( + prefac + / beta ** (m_abs + 1) + * np.sqrt(math.factorial(p) / (np.pi * math.factorial(p2))) + ) + poly = scipy.special.genlaguerre( + n=p, alpha=m_abs + ) # lower part, upper part of definition in Massey & Refregier + r_ = (r / beta) ** 2 l_n_alpha = poly(r_) - return prefactor*r**m_abs*l_n_alpha*np.exp(-(r/beta)**2/2) + return prefactor * r**m_abs * l_n_alpha * np.exp(-((r / beta) ** 2) / 2) @staticmethod def _index2n(index): @@ -86,8 +104,8 @@ def _index2n(index): return n def index2poly(self, index): - """ - manages the convention from an iterative index to the specific polynomial n, m, (real/imaginary part) + """Manages the convention from an iterative index to the specific polynomial n, + m, (real/imaginary part) :param index: int, index of list :return: n, m bool @@ -129,7 +147,7 @@ def poly2index(n, m, complex_bool): index = n * (n + 1) / 2 if complex_bool is True: if m == 0: - raise ValueError('m=0 can not have imaginary part!') + raise ValueError("m=0 can not have imaginary part!") if n % 2 == 0: if m % 2 == 0: if m == 0: @@ -139,7 +157,7 @@ def poly2index(n, m, complex_bool): if complex_bool is True: index += 1 else: - raise ValueError('m needs to be even for even n!') + raise ValueError("m needs to be even for even n!") else: if complex_bool is True: index += m + 1 @@ -154,23 +172,37 @@ def num_param(n_max): :param n_max: maximal polynomial order :return: number of basis components """ - return int((n_max+1)*(n_max+2)/2) + return int((n_max + 1) * (n_max + 2) / 2) @export class ShapeletsPolarExp(object): + """2D exponential shapelets, Berge et al. + + 2019 """ - 2D exponential shapelets, Berge et al. 2019 - """ - param_names = ['amp', 'beta', 'n', 'm', 'center_x', 'center_y'] - lower_limit_default = {'amp': 0, 'beta': 0, 'n': 0, 'm': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'amp': 100, 'beta': 100, 'n': 150, 'm': 150, 'center_x': 100, 'center_y': 100} + + param_names = ["amp", "beta", "n", "m", "center_x", "center_y"] + lower_limit_default = { + "amp": 0, + "beta": 0, + "n": 0, + "m": 0, + "center_x": -100, + "center_y": -100, + } + upper_limit_default = { + "amp": 100, + "beta": 100, + "n": 150, + "m": 150, + "center_x": 100, + "center_y": 100, + } def __init__(self): - """ - load interpolation of the Hermite polynomials in a range [-30,30] in order n<= 150 - :return: - """ + """Load interpolation of the Hermite polynomials in a range [-30,30] in order + n<= 150 :return:""" pass def function(self, x, y, amp, beta, n, m, complex_bool, center_x, center_y): @@ -210,11 +242,18 @@ def _chi_n_m(r, beta, n, m): prefac = 1 else: prefac = -1 - prefactor = prefac * np.sqrt(2./(beta*np.pi * (2*n + 1)**3) * math.factorial(p) / math.factorial(p2)) - poly = scipy.special.genlaguerre(n=p, alpha=2*m_abs) # lower part, upper part of definition in Massey & Refregier - x_ = 2. * r / (beta * (2*n + 1)) + prefactor = prefac * np.sqrt( + 2.0 + / (beta * np.pi * (2 * n + 1) ** 3) + * math.factorial(p) + / math.factorial(p2) + ) + poly = scipy.special.genlaguerre( + n=p, alpha=2 * m_abs + ) # lower part, upper part of definition in Massey & Refregier + x_ = 2.0 * r / (beta * (2 * n + 1)) l_n_alpha = poly(x_) - return prefactor * x_**m_abs * l_n_alpha * np.exp(-x_/2) / np.sqrt(beta) + return prefactor * x_**m_abs * l_n_alpha * np.exp(-x_ / 2) / np.sqrt(beta) @staticmethod def num_param(n_max): @@ -223,7 +262,7 @@ def num_param(n_max): :param n_max: maximal polynomial order :return: number of basis components """ - return int((n_max + 1)**2) + return int((n_max + 1) ** 2) @staticmethod def _index2n(index): @@ -253,7 +292,7 @@ def index2poly(self, index): m = int((delta - 1) / 2) complex_bool = False else: - m = int(delta/2) + m = int(delta / 2) complex_bool = True return n, m, complex_bool @@ -267,7 +306,7 @@ def poly2index(n, m, complex_bool): :return: index convention, integer """ index = n**2 - index += 2*m + index += 2 * m if complex_bool is True: index -= 1 return int(index) @@ -275,12 +314,11 @@ def poly2index(n, m, complex_bool): @export class ShapeletSetPolar(object): - """ - class to operate on entire shapelet set - """ - param_names = ['amp', 'n_max', 'beta', 'center_x', 'center_y'] - lower_limit_default = {'beta': 0, 'center_x': -100, 'center_y': -100} - upper_limit_default = {'beta': 100, 'center_x': 100, 'center_y': 100} + """Class to operate on entire shapelet set.""" + + param_names = ["amp", "n_max", "beta", "center_x", "center_y"] + lower_limit_default = {"beta": 0, "center_x": -100, "center_y": -100} + upper_limit_default = {"beta": 100, "center_x": 100, "center_y": 100} def __init__(self, exponential=False): if exponential is True: @@ -347,8 +385,8 @@ def _pre_calc(self, x, y, beta, n_max, center_x, center_y): # compute the Laguerre polynomials in n, m chi_n_m_list = [[0 for _ in range(n_max + 1)] for _ in range(n_max + 1)] - for n in range(n_max+1): - for m in range(n+1): + for n in range(n_max + 1): + for m in range(n + 1): if (n - m) % 2 == 0 or self._exponential is True: chi_n_m_list[n][m] = self.shapelets._chi_n_m(r, beta, n, m) @@ -364,8 +402,8 @@ def _pre_calc(self, x, y, beta, n_max, center_x, center_y): @staticmethod def _pre_calc_function(L_list, i): - """ - evaluates the shapelet function based on the pre-calculated components in _pre_calc() + """Evaluates the shapelet function based on the pre-calculated components in + _pre_calc() :param L_list: pre-calculated components :param i: index conventions of the sequence of basis components @@ -374,9 +412,9 @@ def _pre_calc_function(L_list, i): return L_list[i] def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y=0): - """ - decomposes an image into the shapelet coefficients in same order as for the function call - :param image: + """Decomposes an image into the shapelet coefficients in same order as for the + function call :param image: + :param x: :param y: :param n_max: @@ -387,11 +425,11 @@ def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y """ num_param = self.shapelets.num_param(n_max) param_list = np.zeros(num_param) - amp_norm = 1. * deltaPix**2 + amp_norm = 1.0 * deltaPix**2 L_list = self._pre_calc(x, y, beta, n_max, center_x, center_y) for i in range(num_param): base = self._pre_calc_function(L_list, i) * amp_norm - param = np.sum(image*base) + param = np.sum(image * base) n, m, complex_bool = self.shapelets.index2poly(i) if m != 0: param *= 2 diff --git a/lenstronomy/LightModel/Profiles/starlets.py b/lenstronomy/LightModel/Profiles/starlets.py index 2c926f06b..663d766c5 100644 --- a/lenstronomy/LightModel/Profiles/starlets.py +++ b/lenstronomy/LightModel/Profiles/starlets.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import numpy as np import warnings @@ -7,53 +7,96 @@ from lenstronomy.LightModel.Profiles.interpolation import Interpol from lenstronomy.Util import util -__all__ = ['SLIT_Starlets'] +__all__ = ["SLIT_Starlets"] class SLIT_Starlets(object): - """ - Decomposition of an image using the Isotropic Undecimated Walevet Transform, - also known as "starlet" or "B-spline", using the 'a trous' algorithm. + """Decomposition of an image using the Isotropic Undecimated Walevet Transform, also + known as "starlet" or "B-spline", using the 'a trous' algorithm. Astronomical data (galaxies, stars, ...) are often very sparsely represented in the starlet basis. Based on Starck et al. : https://ui.adsabs.harvard.edu/abs/2007ITIP...16..297S/abstract """ - param_names = ['amp', 'n_scales', 'n_pixels', 'scale', 'center_x', 'center_y'] - param_names_latex = {r'$I_0$', r'$n_{\rm scales}$', r'$n_{\rm pix}$', r'scale', r'$x_0$', r'$y_0$'} - lower_limit_default = {'amp': [0], 'n_scales': 2, 'n_pixels': 5, 'center_x': -1000, 'center_y': -1000, 'scale': 0.000000001} - upper_limit_default = {'amp': [1e8], 'n_scales': 20, 'n_pixels': 1e10, 'center_x': 1000, 'center_y': 1000, 'scale': 10000000000} - def __init__(self, thread_count=1, fast_inverse=True, second_gen=False, show_pysap_plots=False, - force_no_pysap=False): - """ - Load pySAP package if found, and initialize the Starlet transform. + param_names = ["amp", "n_scales", "n_pixels", "scale", "center_x", "center_y"] + param_names_latex = { + r"$I_0$", + r"$n_{\rm scales}$", + r"$n_{\rm pix}$", + r"scale", + r"$x_0$", + r"$y_0$", + } + lower_limit_default = { + "amp": [0], + "n_scales": 2, + "n_pixels": 5, + "center_x": -1000, + "center_y": -1000, + "scale": 0.000000001, + } + upper_limit_default = { + "amp": [1e8], + "n_scales": 20, + "n_pixels": 1e10, + "center_x": 1000, + "center_y": 1000, + "scale": 10000000000, + } + + def __init__( + self, + thread_count=1, + fast_inverse=True, + second_gen=False, + show_pysap_plots=False, + force_no_pysap=False, + ): + """Load pySAP package if found, and initialize the Starlet transform. :param thread_count: number of threads used for pySAP computations - :param fast_inverse: if True, reconstruction is simply the sum of each scale (only for 1st generation starlet transform) - :param second_gen: if True, uses the second generation of starlet transform - :param show_pysap_plots: if True, displays pySAP plots when calling the decomposition method - :param force_no_pysap: if True, does not load pySAP and computes starlet transforms in python. - """ + :param fast_inverse: if True, reconstruction is simply the sum of each scale + (only for 1st generation starlet transform) + :param second_gen: if True, uses the second generation of starlet transform + :param show_pysap_plots: if True, displays pySAP plots when calling the + decomposition method + :param force_no_pysap: if True, does not load pySAP and computes starlet + transforms in python. + """ self.use_pysap, pysap = self._load_pysap(force_no_pysap) if self.use_pysap: - self._transf_class = pysap.load_transform('BsplineWaveletTransformATrousAlgorithm') + self._transf_class = pysap.load_transform( + "BsplineWaveletTransformATrousAlgorithm" + ) else: - warnings.warn("The python package pySAP is not used for starlet operations. " - "They will be performed using (slower) python routines.") + warnings.warn( + "The python package pySAP is not used for starlet operations. " + "They will be performed using (slower) python routines." + ) self._fast_inverse = fast_inverse self._second_gen = second_gen self._show_pysap_plots = show_pysap_plots self.interpol = Interpol() self.thread_count = thread_count - def function(self, x, y, amp=None, n_scales=None, n_pixels=None, scale=1, center_x=0, center_y=0): - """ - 1D inverse starlet transform from starlet coefficients stored in coeffs + def function( + self, + x, + y, + amp=None, + n_scales=None, + n_pixels=None, + scale=1, + center_x=0, + center_y=0, + ): + """1D inverse starlet transform from starlet coefficients stored in coeffs Follows lenstronomy conventions for light profiles. - :param amp: decomposition coefficients ('amp' to follow conventions in other light profile) - This is an ndarray with shape (n_scales, sqrt(n_pixels), sqrt(n_pixels)) or (n_scales*n_pixels,) + :param amp: decomposition coefficients ('amp' to follow conventions in other + light profile) This is an ndarray with shape (n_scales, sqrt(n_pixels), + sqrt(n_pixels)) or (n_scales*n_pixels,) :param n_scales: number of decomposition scales :param n_pixels: number of pixels in a single scale :return: reconstructed signal as 1D array of shape (n_pixels,) @@ -63,33 +106,43 @@ def function(self, x, y, amp=None, n_scales=None, n_pixels=None, scale=1, center elif len(amp.shape) == 3: coeffs = amp else: - raise ValueError("Starlets 'amp' has not the right shape (1D or 3D arrays are supported)") + raise ValueError( + "Starlets 'amp' has not the right shape (1D or 3D arrays are supported)" + ) image = self.function_2d(coeffs, n_scales, n_pixels) - image = self.interpol.function(x, y, image=image, scale=scale, - center_x=center_x, center_y=center_y, - amp=1, phi_G=0) + image = self.interpol.function( + x, + y, + image=image, + scale=scale, + center_x=center_x, + center_y=center_y, + amp=1, + phi_G=0, + ) return image def function_2d(self, coeffs, n_scales, n_pixels): - """ - 2D inverse starlet transform from starlet coefficients stored in coeffs + """2D inverse starlet transform from starlet coefficients stored in coeffs. - :param coeffs: decomposition coefficients, - ndarray with shape (n_scales, sqrt(n_pixels), sqrt(n_pixels)) + :param coeffs: decomposition coefficients, ndarray with shape (n_scales, + sqrt(n_pixels), sqrt(n_pixels)) :param n_scales: number of decomposition scales - :return: reconstructed signal as 2D array of shape (sqrt(n_pixels), sqrt(n_pixels)) + :return: reconstructed signal as 2D array of shape (sqrt(n_pixels), + sqrt(n_pixels)) """ if self.use_pysap and not self._second_gen: return self._inverse_transform(coeffs, n_scales, n_pixels) else: - return starlets_util.inverse_transform(coeffs, fast=self._fast_inverse, - second_gen=self._second_gen) + return starlets_util.inverse_transform( + coeffs, fast=self._fast_inverse, second_gen=self._second_gen + ) def decomposition(self, image, n_scales): - """ - 1D starlet transform from starlet coefficients stored in coeffs + """1D starlet transform from starlet coefficients stored in coeffs. - :param image: 2D image to be decomposed, ndarray with shape (sqrt(n_pixels), sqrt(n_pixels)) + :param image: 2D image to be decomposed, ndarray with shape (sqrt(n_pixels), + sqrt(n_pixels)) :param n_scales: number of decomposition scales :return: reconstructed signal as 1D array of shape (n_scales*n_pixels,) """ @@ -98,28 +151,33 @@ def decomposition(self, image, n_scales): elif len(image.shape) == 2: image_2d = image else: - raise ValueError("image has not the right shape (1D or 2D arrays are supported for starlets decomposition)") + raise ValueError( + "image has not the right shape (1D or 2D arrays are supported for starlets decomposition)" + ) return util.cube2array(self.decomposition_2d(image_2d, n_scales)) def decomposition_2d(self, image, n_scales): - """ - 2D starlet transform from starlet coefficients stored in coeffs + """2D starlet transform from starlet coefficients stored in coeffs. - :param image: 2D image to be decomposed, ndarray with shape (sqrt(n_pixels), sqrt(n_pixels)) + :param image: 2D image to be decomposed, ndarray with shape (sqrt(n_pixels), + sqrt(n_pixels)) :param n_scales: number of decomposition scales - :return: reconstructed signal as 2D array of shape (n_scales, sqrt(n_pixels), sqrt(n_pixels)) + :return: reconstructed signal as 2D array of shape (n_scales, sqrt(n_pixels), + sqrt(n_pixels)) """ if self.use_pysap and not self._second_gen: coeffs = self._transform(image, n_scales) else: - coeffs = starlets_util.transform(image, n_scales, second_gen=self._second_gen) + coeffs = starlets_util.transform( + image, n_scales, second_gen=self._second_gen + ) return coeffs def _inverse_transform(self, coeffs, n_scales, n_pixels): - """reconstructs image from starlet coefficients""" + """Reconstructs image from starlet coefficients.""" self._check_transform_pysap(n_scales, n_pixels) if self._fast_inverse and not self._second_gen: - # for 1st gen starlet the reconstruction can be performed by summing all scales + # for 1st gen starlet the reconstruction can be performed by summing all scales image = np.sum(coeffs, axis=0) else: coeffs = self._coeffs2pysap(coeffs) @@ -131,7 +189,7 @@ def _inverse_transform(self, coeffs, n_scales, n_pixels): return image def _transform(self, image, n_scales): - """decomposes an image into starlets coefficients""" + """Decomposes an image into starlets coefficients.""" self._check_transform_pysap(n_scales, image.size) self._transf.data = image self._transf.analysis() @@ -142,26 +200,31 @@ def _transform(self, image, n_scales): return coeffs def _check_transform_pysap(self, n_scales, n_pixels): - """if needed, update the loaded pySAP transform to correct number of scales""" - if not hasattr(self, '_transf') or n_scales != self._n_scales or n_pixels != self._n_pixels: - self._transf = self._transf_class(nb_scale=n_scales, verbose=False, - nb_procs=self.thread_count) + """If needed, update the loaded pySAP transform to correct number of scales.""" + if ( + not hasattr(self, "_transf") + or n_scales != self._n_scales + or n_pixels != self._n_pixels + ): + self._transf = self._transf_class( + nb_scale=n_scales, verbose=False, nb_procs=self.thread_count + ) self._n_scales = n_scales self._n_pixels = n_pixels def _pysap2coeffs(self, coeffs): - """convert pySAP decomposition coefficients to numpy array""" + """Convert pySAP decomposition coefficients to numpy array.""" return np.asarray(coeffs) def _coeffs2pysap(self, coeffs): - """convert coefficients stored in numpy array to list required by pySAP""" + """Convert coefficients stored in numpy array to list required by pySAP.""" coeffs_list = [] for i in range(coeffs.shape[0]): coeffs_list.append(coeffs[i, :, :]) return coeffs_list def _load_pysap(self, force_no_pysap): - """load pySAP module""" + """Load pySAP module.""" if force_no_pysap: return False, None try: @@ -172,5 +235,5 @@ def _load_pysap(self, force_no_pysap): return True, pysap def delete_cache(self): - """delete the cached interpolated image""" + """Delete the cached interpolated image.""" self.interpol.delete_cache() diff --git a/lenstronomy/LightModel/Profiles/starlets_util.py b/lenstronomy/LightModel/Profiles/starlets_util.py index 89c647cce..d75fcb200 100644 --- a/lenstronomy/LightModel/Profiles/starlets_util.py +++ b/lenstronomy/LightModel/Profiles/starlets_util.py @@ -1,47 +1,49 @@ -__author__ = 'herjy', 'aymgal', 'sibirrer' +__author__ = "herjy", "aymgal", "sibirrer" import numpy as np from scipy import ndimage from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def transform(img, n_scales, second_gen=False): - """ - Performs starlet decomposition of an 2D array. + """Performs starlet decomposition of an 2D array. :param img: input image :param n_scales: number of decomposition scales :param second_gen: if True, 'second generation' starlets are used """ - mode = 'nearest' - - lvl = n_scales-1 + mode = "nearest" + + lvl = n_scales - 1 sh = np.shape(img) n1 = sh[1] n2 = sh[1] - + # B-spline filter - h = [1./16, 1./4, 3./8, 1./4, 1./16] + h = [1.0 / 16, 1.0 / 4, 3.0 / 8, 1.0 / 4, 1.0 / 16] n = np.size(h) h = np.array(h) - + max_lvl = np.min((lvl, int(np.log2(n2)))) if lvl > max_lvl: - raise ValueError("Maximum decomposition level is {} (required: {})".format(max_lvl, lvl)) + raise ValueError( + "Maximum decomposition level is {} (required: {})".format(max_lvl, lvl) + ) elif lvl <= 0: raise ValueError("Number of decomposition level can not be non-positive") c = img # wavelet set of coefficients. - wave = np.zeros((lvl+1, n1, n2)) + wave = np.zeros((lvl + 1, n1, n2)) for i in range(lvl): - newh = np.zeros((1, n+(n-1)*(2**i-1))) - newh[0, np.linspace(0, np.size(newh)-1, len(h), dtype=int)] = h + newh = np.zeros((1, n + (n - 1) * (2**i - 1))) + newh[0, np.linspace(0, np.size(newh) - 1, len(h), dtype=int)] = h # H = np.dot(newh.T, newh) @@ -58,46 +60,46 @@ def transform(img, n_scales, second_gen=False): ###### hoh for g; Line convolution hc = ndimage.convolve1d(hc, newh[0, :], axis=1, mode=mode) - + ###### wj+1 = cj - hcj+1 wave[i, :, :] = c - hc - + else: ###### wj+1 = cj - cj+1 wave[i, :, :] = c - cnew c = cnew - - wave[i+1, :, :] = c + + wave[i + 1, :, :] = c return wave @export def inverse_transform(wave, fast=True, second_gen=False): - """ - Reconstructs an image fron its starlet decomposition coefficients + """Reconstructs an image fron its starlet decomposition coefficients. - :param wave: input coefficients, with shape (n_scales, np.sqrt(n_pixel), np.sqrt(n_pixel)) - :param fast: if True, and only with second_gen is False, simply sums up all scales to reconstruct the image + :param wave: input coefficients, with shape (n_scales, np.sqrt(n_pixel), + np.sqrt(n_pixel)) + :param fast: if True, and only with second_gen is False, simply sums up all scales + to reconstruct the image :param second_gen: if True, 'second generation' starlets are used """ if fast and not second_gen: # simply sum all scales, including the coarsest one return np.sum(wave, axis=0) - mode = 'nearest' - + mode = "nearest" + lvl, n1, n2 = np.shape(wave) - h = np.array([1./16, 1./4, 3./8, 1./4, 1./16]) + h = np.array([1.0 / 16, 1.0 / 4, 3.0 / 8, 1.0 / 4, 1.0 / 16]) n = np.size(h) - cJ = np.copy(wave[lvl-1, :, :]) + cJ = np.copy(wave[lvl - 1, :, :]) for i in range(1, lvl): - - newh = np.zeros((1, n+(n-1)*(2**(lvl-1-i)-1))) - newh[0, np.linspace(0, np.size(newh)-1, len(h), dtype=int)] = h + newh = np.zeros((1, n + (n - 1) * (2 ** (lvl - 1 - i) - 1))) + newh[0, np.linspace(0, np.size(newh) - 1, len(h), dtype=int)] = h H = np.dot(newh.T, newh) ###### Line convolution @@ -105,6 +107,6 @@ def inverse_transform(wave, fast=True, second_gen=False): ###### Column convolution cnew = ndimage.convolve1d(cnew, newh[0, :], axis=1, mode=mode) - cJ = cnew + wave[lvl-1-i, :, :] + cJ = cnew + wave[lvl - 1 - i, :, :] return np.reshape(cJ, (n1, n2)) diff --git a/lenstronomy/LightModel/Profiles/uniform.py b/lenstronomy/LightModel/Profiles/uniform.py index 8bf8c0722..11de5544c 100644 --- a/lenstronomy/LightModel/Profiles/uniform.py +++ b/lenstronomy/LightModel/Profiles/uniform.py @@ -1,17 +1,19 @@ import numpy as np -__all__ = ['Uniform'] +__all__ = ["Uniform"] class Uniform(object): - """ - uniform light profile. This profile can also compensate for an inaccurate background subtraction. + """Uniform light profile. + + This profile can also compensate for an inaccurate background subtraction. name for profile: 'UNIFORM' """ - param_names = ['amp'] - param_names_latex = {r'$I_0$'} - lower_limit_default = {'amp': -100} - upper_limit_default = {'amp': 100} + + param_names = ["amp"] + param_names_latex = {r"$I_0$"} + lower_limit_default = {"amp": -100} + upper_limit_default = {"amp": 100} def __init__(self): pass diff --git a/lenstronomy/LightModel/light_model.py b/lenstronomy/LightModel/light_model.py index 07ad0cd2c..9c8dfb647 100644 --- a/lenstronomy/LightModel/light_model.py +++ b/lenstronomy/LightModel/light_model.py @@ -1,28 +1,34 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LightModel.linear_basis import LinearBasis -__all__ = ['LightModel'] +__all__ = ["LightModel"] class LightModel(LinearBasis): + """Class to handle extended surface brightness profiles (for e.g. source and lens + light) + + all profiles come with a surface_brightness parameterization (in units per square + angle and independent of the pixel scale). The parameter 'amp' is the linear scaling + parameter of surface brightness. Some functional forms come with a total_flux() + definition that provide the integral of the surface brightness for a given set of + parameters. + + The SimulationAPI module allows to use astronomical magnitudes to be used and + translated into the surface brightness conventions of this module given a magnitude + zero point. """ - class to handle extended surface brightness profiles (for e.g. source and lens light) - all profiles come with a surface_brightness parameterization (in units per square angle and independent of - the pixel scale). - The parameter 'amp' is the linear scaling parameter of surface brightness. - Some functional forms come with a total_flux() definition that provide the integral of the surface brightness for a - given set of parameters. - - The SimulationAPI module allows to use astronomical magnitudes to be used and translated into the surface brightness - conventions of this module given a magnitude zero point. - - """ - - def __init__(self, light_model_list, deflection_scaling_list=None, source_redshift_list=None, - smoothing=0.001, sersic_major_axis=None): + def __init__( + self, + light_model_list, + deflection_scaling_list=None, + source_redshift_list=None, + smoothing=0.001, + sersic_major_axis=None, + ): """ :param light_model_list: list of light models @@ -36,7 +42,10 @@ def __init__(self, light_model_list, deflection_scaling_list=None, source_redshi half-light radius, if False, uses the product average of semi-major and semi-minor axis. If None, uses the convention in the lenstronomy yaml setting (which by default is =False) """ - super(LightModel, self).__init__(light_model_list=light_model_list, smoothing=smoothing, - sersic_major_axis=sersic_major_axis) + super(LightModel, self).__init__( + light_model_list=light_model_list, + smoothing=smoothing, + sersic_major_axis=sersic_major_axis, + ) self.deflection_scaling_list = deflection_scaling_list self.redshift_list = source_redshift_list diff --git a/lenstronomy/LightModel/light_model_base.py b/lenstronomy/LightModel/light_model_base.py index 79fd958b7..ece444a3b 100644 --- a/lenstronomy/LightModel/light_model_base.py +++ b/lenstronomy/LightModel/light_model_base.py @@ -1,28 +1,49 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class which describes the surface brightness of the light models import numpy as np from lenstronomy.Util.util import convert_bool_list from lenstronomy.Conf import config_loader + convention_conf = config_loader.conventions_conf() -sersic_major_axis_conf = convention_conf.get('sersic_major_axis', False) +sersic_major_axis_conf = convention_conf.get("sersic_major_axis", False) -__all__ = ['LightModelBase'] +__all__ = ["LightModelBase"] -_MODELS_SUPPORTED = ['GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'ELLIPSOID', 'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE', - 'SERSIC', 'SERSIC_ELLIPSE', 'CORE_SERSIC', 'SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP', - 'SHAPELETS_ELLIPSE', - 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', 'UNIFORM', 'POWER_LAW', 'NIE', - 'CHAMELEON', 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'INTERPOL', 'SLIT_STARLETS', - 'SLIT_STARLETS_GEN2'] +_MODELS_SUPPORTED = [ + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "ELLIPSOID", + "MULTI_GAUSSIAN", + "MULTI_GAUSSIAN_ELLIPSE", + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "SHAPELETS", + "SHAPELETS_POLAR", + "SHAPELETS_POLAR_EXP", + "SHAPELETS_ELLIPSE", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "UNIFORM", + "POWER_LAW", + "NIE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "INTERPOL", + "SLIT_STARLETS", + "SLIT_STARLETS_GEN2", +] class LightModelBase(object): - """ - class to handle source and lens light models - """ + """Class to handle source and lens light models.""" + def __init__(self, light_model_list, smoothing=0.001, sersic_major_axis=None): """ @@ -37,84 +58,127 @@ def __init__(self, light_model_list, smoothing=0.001, sersic_major_axis=None): if sersic_major_axis is None: sersic_major_axis = sersic_major_axis_conf for profile_type in light_model_list: - if profile_type == 'GAUSSIAN': + if profile_type == "GAUSSIAN": from lenstronomy.LightModel.Profiles.gaussian import Gaussian + self.func_list.append(Gaussian()) - elif profile_type == 'GAUSSIAN_ELLIPSE': + elif profile_type == "GAUSSIAN_ELLIPSE": from lenstronomy.LightModel.Profiles.gaussian import GaussianEllipse + self.func_list.append(GaussianEllipse()) - elif profile_type == 'ELLIPSOID': + elif profile_type == "ELLIPSOID": from lenstronomy.LightModel.Profiles.ellipsoid import Ellipsoid + self.func_list.append(Ellipsoid()) - elif profile_type == 'MULTI_GAUSSIAN': + elif profile_type == "MULTI_GAUSSIAN": from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian + self.func_list.append(MultiGaussian()) - elif profile_type == 'MULTI_GAUSSIAN_ELLIPSE': - from lenstronomy.LightModel.Profiles.gaussian import MultiGaussianEllipse + elif profile_type == "MULTI_GAUSSIAN_ELLIPSE": + from lenstronomy.LightModel.Profiles.gaussian import ( + MultiGaussianEllipse, + ) + self.func_list.append(MultiGaussianEllipse()) - elif profile_type == 'SERSIC': + elif profile_type == "SERSIC": from lenstronomy.LightModel.Profiles.sersic import Sersic + self.func_list.append(Sersic(smoothing=smoothing)) - elif profile_type == 'SERSIC_ELLIPSE': + elif profile_type == "SERSIC_ELLIPSE": from lenstronomy.LightModel.Profiles.sersic import SersicElliptic - self.func_list.append(SersicElliptic(smoothing=smoothing, sersic_major_axis=sersic_major_axis)) - elif profile_type == 'CORE_SERSIC': + + self.func_list.append( + SersicElliptic( + smoothing=smoothing, sersic_major_axis=sersic_major_axis + ) + ) + elif profile_type == "CORE_SERSIC": from lenstronomy.LightModel.Profiles.sersic import CoreSersic - self.func_list.append(CoreSersic(smoothing=smoothing, sersic_major_axis=sersic_major_axis)) - elif profile_type == 'SHAPELETS': + + self.func_list.append( + CoreSersic(smoothing=smoothing, sersic_major_axis=sersic_major_axis) + ) + elif profile_type == "SHAPELETS": from lenstronomy.LightModel.Profiles.shapelets import ShapeletSet + self.func_list.append(ShapeletSet()) - elif profile_type == 'SHAPELETS_ELLIPSE': - from lenstronomy.LightModel.Profiles.shapelets_ellipse import ShapeletSetEllipse + elif profile_type == "SHAPELETS_ELLIPSE": + from lenstronomy.LightModel.Profiles.shapelets_ellipse import ( + ShapeletSetEllipse, + ) + self.func_list.append(ShapeletSetEllipse()) - elif profile_type == 'SHAPELETS_POLAR': - from lenstronomy.LightModel.Profiles.shapelets_polar import ShapeletSetPolar + elif profile_type == "SHAPELETS_POLAR": + from lenstronomy.LightModel.Profiles.shapelets_polar import ( + ShapeletSetPolar, + ) + self.func_list.append(ShapeletSetPolar(exponential=False)) - elif profile_type == 'SHAPELETS_POLAR_EXP': - from lenstronomy.LightModel.Profiles.shapelets_polar import ShapeletSetPolar + elif profile_type == "SHAPELETS_POLAR_EXP": + from lenstronomy.LightModel.Profiles.shapelets_polar import ( + ShapeletSetPolar, + ) + self.func_list.append(ShapeletSetPolar(exponential=True)) - elif profile_type == 'HERNQUIST': + elif profile_type == "HERNQUIST": from lenstronomy.LightModel.Profiles.hernquist import Hernquist + self.func_list.append(Hernquist()) - elif profile_type == 'HERNQUIST_ELLIPSE': + elif profile_type == "HERNQUIST_ELLIPSE": from lenstronomy.LightModel.Profiles.hernquist import HernquistEllipse + self.func_list.append(HernquistEllipse()) - elif profile_type == 'PJAFFE': + elif profile_type == "PJAFFE": from lenstronomy.LightModel.Profiles.p_jaffe import PJaffe + self.func_list.append(PJaffe()) - elif profile_type == 'PJAFFE_ELLIPSE': + elif profile_type == "PJAFFE_ELLIPSE": from lenstronomy.LightModel.Profiles.p_jaffe import PJaffeEllipse + self.func_list.append(PJaffeEllipse()) - elif profile_type == 'UNIFORM': + elif profile_type == "UNIFORM": from lenstronomy.LightModel.Profiles.uniform import Uniform + self.func_list.append(Uniform()) - elif profile_type == 'POWER_LAW': + elif profile_type == "POWER_LAW": from lenstronomy.LightModel.Profiles.power_law import PowerLaw + self.func_list.append(PowerLaw()) - elif profile_type == 'NIE': + elif profile_type == "NIE": from lenstronomy.LightModel.Profiles.nie import NIE + self.func_list.append(NIE()) - elif profile_type == 'CHAMELEON': + elif profile_type == "CHAMELEON": from lenstronomy.LightModel.Profiles.chameleon import Chameleon + self.func_list.append(Chameleon()) - elif profile_type == 'DOUBLE_CHAMELEON': + elif profile_type == "DOUBLE_CHAMELEON": from lenstronomy.LightModel.Profiles.chameleon import DoubleChameleon + self.func_list.append(DoubleChameleon()) - elif profile_type == 'TRIPLE_CHAMELEON': + elif profile_type == "TRIPLE_CHAMELEON": from lenstronomy.LightModel.Profiles.chameleon import TripleChameleon + self.func_list.append(TripleChameleon()) - elif profile_type == 'INTERPOL': + elif profile_type == "INTERPOL": from lenstronomy.LightModel.Profiles.interpolation import Interpol + self.func_list.append(Interpol()) - elif profile_type == 'SLIT_STARLETS': + elif profile_type == "SLIT_STARLETS": from lenstronomy.LightModel.Profiles.starlets import SLIT_Starlets - self.func_list.append(SLIT_Starlets(fast_inverse=True, second_gen=False)) - elif profile_type == 'SLIT_STARLETS_GEN2': + + self.func_list.append( + SLIT_Starlets(fast_inverse=True, second_gen=False) + ) + elif profile_type == "SLIT_STARLETS_GEN2": from lenstronomy.LightModel.Profiles.starlets import SLIT_Starlets + self.func_list.append(SLIT_Starlets(second_gen=True)) else: - raise ValueError('No light model of type %s found! Supported are the following models: %s' - % (profile_type, _MODELS_SUPPORTED)) + raise ValueError( + "No light model of type %s found! Supported are the following models: %s" + % (profile_type, _MODELS_SUPPORTED) + ) self._num_func = len(self.func_list) def surface_brightness(self, x, y, kwargs_list, k=None): @@ -133,42 +197,59 @@ def surface_brightness(self, x, y, kwargs_list, k=None): bool_list = self._bool_list(k=k) for i, func in enumerate(self.func_list): if bool_list[i] is True: - out = np.array(func.function(x, y, **kwargs_list_standard[i]), dtype=float) + out = np.array( + func.function(x, y, **kwargs_list_standard[i]), dtype=float + ) flux += out return flux def light_3d(self, r, kwargs_list, k=None): - """ - computes 3d density at radius r - :param r: 3d radius units of arcsec relative to the center of the light profile - :param kwargs_list: keyword argument list of light profile - :param k: integer or list of integers for selecting subsets of light profiles - """ + """Computes 3d density at radius r :param r: 3d radius units of arcsec relative + to the center of the light profile :param kwargs_list: keyword argument list of + light profile :param k: integer or list of integers for selecting subsets of + light profiles.""" kwargs_list_standard = self._transform_kwargs(kwargs_list) r = np.array(r, dtype=float) flux = np.zeros_like(r) bool_list = self._bool_list(k=k) for i, func in enumerate(self.func_list): if bool_list[i] is True: - kwargs = {k: v for k, v in kwargs_list_standard[i].items() if k not in ['center_x', 'center_y']} - if self.profile_type_list[i] in ['DOUBLE_CHAMELEON', 'CHAMELEON', 'HERNQUIST', 'HERNQUIST_ELLIPSE', - 'PJAFFE', 'PJAFFE_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', - 'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE', 'NIE', 'POWER_LAW', - 'TRIPLE_CHAMELEON']: + kwargs = { + k: v + for k, v in kwargs_list_standard[i].items() + if k not in ["center_x", "center_y"] + } + if self.profile_type_list[i] in [ + "DOUBLE_CHAMELEON", + "CHAMELEON", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "MULTI_GAUSSIAN", + "MULTI_GAUSSIAN_ELLIPSE", + "NIE", + "POWER_LAW", + "TRIPLE_CHAMELEON", + ]: flux += func.light_3d(r, **kwargs) else: - raise ValueError('Light model %s does not support a 3d light distribution!' - % self.profile_type_list[i]) + raise ValueError( + "Light model %s does not support a 3d light distribution!" + % self.profile_type_list[i] + ) return flux def total_flux(self, kwargs_list, norm=False, k=None): - """ - Computes the total flux of each individual light profile. This allows to estimate the total flux as - well as lenstronomy amp to magnitude conversions. Not all models are supported. - The units are linked to the data to be modelled with associated noise properties (default is count/s). + """Computes the total flux of each individual light profile. This allows to + estimate the total flux as well as lenstronomy amp to magnitude conversions. Not + all models are supported. The units are linked to the data to be modelled with + associated noise properties (default is count/s). - :param kwargs_list: list of keyword arguments corresponding to the light profiles. The 'amp' parameter can be - missing. + :param kwargs_list: list of keyword arguments corresponding to the light + profiles. The 'amp' parameter can be missing. :param norm: bool, if True, computes the flux for amp=1 :param k: int, if set, only evaluates the specific light model :return: list of (total) flux values attributed to each profile @@ -178,28 +259,40 @@ def total_flux(self, kwargs_list, norm=False, k=None): bool_list = self._bool_list(k=k) for i, model in enumerate(self.profile_type_list): if bool_list[i] is True: - if model in ['SERSIC', 'SERSIC_ELLIPSE', 'INTERPOL', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', - 'MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: + if model in [ + "SERSIC", + "SERSIC_ELLIPSE", + "INTERPOL", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "MULTI_GAUSSIAN", + "MULTI_GAUSSIAN_ELLIPSE", + ]: kwargs_new = kwargs_list_standard[i].copy() if norm is True: - if model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: - new = {'amp': np.array(kwargs_new['amp'])/kwargs_new['amp'][0]} + if model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"]: + new = { + "amp": np.array(kwargs_new["amp"]) + / kwargs_new["amp"][0] + } else: - new = {'amp': 1} + new = {"amp": 1} kwargs_new.update(new) norm_flux = self.func_list[i].total_flux(**kwargs_new) norm_flux_list.append(norm_flux) else: - raise ValueError("profile %s does not support flux normlization." % model) + raise ValueError( + "profile %s does not support flux normlization." % model + ) # TODO implement total flux for e.g. 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', - # 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', 'DOUBLE_CHAMELEON' , + # 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', 'DOUBLE_CHAMELEON' , # 'TRIPLE_CHAMELEON', 'UNIFORM' return norm_flux_list def delete_interpol_caches(self): - """Call the delete_cache method of INTERPOL profiles""" + """Call the delete_cache method of INTERPOL profiles.""" for i, model in enumerate(self.profile_type_list): - if model in ['INTERPOL', 'SLIT_STARLETS', 'SLIT_STARLETS_GEN2']: + if model in ["INTERPOL", "SLIT_STARLETS", "SLIT_STARLETS_GEN2"]: self.func_list[i].delete_cache() def _transform_kwargs(self, kwargs_list): @@ -211,13 +304,11 @@ def _transform_kwargs(self, kwargs_list): return kwargs_list def _bool_list(self, k=None): - """ - returns a bool list of the length of the lens models - if k = None: returns bool list with True's - if k is int, returns bool list with False's but k'th is True - if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in the integers listed - and False elsewhere - if k is a boolean list, checks for size to match the numbers of models and returns it + """Returns a bool list of the length of the lens models if k = None: returns + bool list with True's if k is int, returns bool list with False's but k'th is + True if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in + the integers listed and False elsewhere if k is a boolean list, checks for size + to match the numbers of models and returns it. :param k: None, int, or list of ints :return: bool list diff --git a/lenstronomy/LightModel/light_param.py b/lenstronomy/LightModel/light_param.py index d3a511222..38180d4b3 100644 --- a/lenstronomy/LightModel/light_param.py +++ b/lenstronomy/LightModel/light_param.py @@ -1,15 +1,23 @@ from lenstronomy.LightModel.light_model import LightModel -__all__ = ['LightParam'] +__all__ = ["LightParam"] class LightParam(object): - """ - class manages the parameters corresponding to the LightModel() module. Also manages linear parameter handling. + """Class manages the parameters corresponding to the LightModel() module. + + Also manages linear parameter handling. """ - def __init__(self, light_model_list, kwargs_fixed, kwargs_lower=None, kwargs_upper=None, param_type='light', - linear_solver=True): + def __init__( + self, + light_model_list, + kwargs_fixed, + kwargs_lower=None, + kwargs_upper=None, + param_type="light", + linear_solver=True, + ): """ :param light_model_list: list of light models @@ -41,9 +49,9 @@ def __init__(self, light_model_list, kwargs_fixed, kwargs_lower=None, kwargs_upp self.upper_limit = kwargs_upper # check that n_max is fixed for k, model in enumerate(self.model_list): - if model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP']: - if 'n_max' not in self.kwargs_fixed[k]: - Warning('n_max needs to be fixed in %s.' % model) + if model in ["SHAPELETS", "SHAPELETS_POLAR", "SHAPELETS_POLAR_EXP"]: + if "n_max" not in self.kwargs_fixed[k]: + Warning("n_max needs to be fixed in %s." % model) @property def param_name_list(self): @@ -64,32 +72,44 @@ def get_params(self, args, i): param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP'] and name == 'amp': - if 'n_max' in kwargs_fixed: - n_max = kwargs_fixed['n_max'] + if ( + model in ["SHAPELETS", "SHAPELETS_POLAR", "SHAPELETS_POLAR_EXP"] + and name == "amp" + ): + if "n_max" in kwargs_fixed: + n_max = kwargs_fixed["n_max"] else: - raise ValueError('n_max needs to be fixed in %s.' % model) - if model in ['SHAPELETS_POLAR_EXP']: + raise ValueError("n_max needs to be fixed in %s." % model) + if model in ["SHAPELETS_POLAR_EXP"]: num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) - kwargs['amp'] = args[i:i + num_param] + kwargs["amp"] = args[i : i + num_param] i += num_param - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE'] and name == 'amp': - if 'sigma' in kwargs_fixed: - num_param = len(kwargs_fixed['sigma']) + elif ( + model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"] + and name == "amp" + ): + if "sigma" in kwargs_fixed: + num_param = len(kwargs_fixed["sigma"]) else: - raise ValueError('sigma needs to be fixed in %s.' % model) - kwargs['amp'] = args[i:i + num_param] + raise ValueError("sigma needs to be fixed in %s." % model) + kwargs["amp"] = args[i : i + num_param] i += num_param - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2'] and name == 'amp': - if 'n_scales' in kwargs_fixed and 'n_pixels' in kwargs_fixed: - n_scales = kwargs_fixed['n_scales'] - n_pixels = kwargs_fixed['n_pixels'] + elif ( + model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"] + and name == "amp" + ): + if "n_scales" in kwargs_fixed and "n_pixels" in kwargs_fixed: + n_scales = kwargs_fixed["n_scales"] + n_pixels = kwargs_fixed["n_pixels"] else: - raise ValueError("'n_scales' and 'n_pixels' both need to be fixed in %s." % model) + raise ValueError( + "'n_scales' and 'n_pixels' both need to be fixed in %s." + % model + ) num_param = n_scales * n_pixels - kwargs['amp'] = args[i:i + num_param] + kwargs["amp"] = args[i : i + num_param] i += num_param else: kwargs[name] = args[i] @@ -114,35 +134,62 @@ def set_params(self, kwargs_list): param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP'] and name == 'amp': - n_max = kwargs_fixed.get('n_max', kwargs['n_max']) - if model in ['SHAPELETS_POLAR_EXP']: + if ( + model in ["SHAPELETS", "SHAPELETS_POLAR", "SHAPELETS_POLAR_EXP"] + and name == "amp" + ): + n_max = kwargs_fixed.get("n_max", kwargs["n_max"]) + if model in ["SHAPELETS_POLAR_EXP"]: num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) for i in range(num_param): args.append(kwargs[name][i]) - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2'] and name == 'amp': - if 'n_scales' in kwargs_fixed: - n_scales = kwargs_fixed['n_scales'] + elif ( + model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"] + and name == "amp" + ): + if "n_scales" in kwargs_fixed: + n_scales = kwargs_fixed["n_scales"] else: - raise ValueError("'n_scales' for SLIT_STARLETS not found in kwargs_fixed") - if 'n_pixels' in kwargs_fixed: - n_pixels = kwargs_fixed['n_pixels'] + raise ValueError( + "'n_scales' for SLIT_STARLETS not found in kwargs_fixed" + ) + if "n_pixels" in kwargs_fixed: + n_pixels = kwargs_fixed["n_pixels"] else: - raise ValueError("'n_pixels' for SLIT_STARLETS not found in kwargs_fixed") + raise ValueError( + "'n_pixels' for SLIT_STARLETS not found in kwargs_fixed" + ) num_param = n_scales * n_pixels for i in range(num_param): args.append(kwargs[name][i]) - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2'] and name in ['n_scales', 'n_pixels', 'scale', - 'center_x', 'center_y']: - raise ValueError("'{}' must be a fixed keyword argument for STARLETS-like models".format(name)) - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE'] and name == 'amp': - num_param = len(kwargs['sigma']) + elif model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"] and name in [ + "n_scales", + "n_pixels", + "scale", + "center_x", + "center_y", + ]: + raise ValueError( + "'{}' must be a fixed keyword argument for STARLETS-like models".format( + name + ) + ) + elif ( + model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"] + and name == "amp" + ): + num_param = len(kwargs["sigma"]) for i in range(num_param): args.append(kwargs[name][i]) - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE'] and name == 'sigma': - raise ValueError("'sigma' must be a fixed keyword argument for MULTI_GAUSSIAN") + elif ( + model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"] + and name == "sigma" + ): + raise ValueError( + "'sigma' must be a fixed keyword argument for MULTI_GAUSSIAN" + ) else: args.append(kwargs[name]) return args @@ -159,34 +206,50 @@ def num_param(self, latex_style=False): param_names = self._param_name_list[k] for name in param_names: if name not in kwargs_fixed: - if model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP'] and name == 'amp': - if 'n_max' not in kwargs_fixed: - raise ValueError("n_max needs to be fixed in this configuration!") - n_max = kwargs_fixed['n_max'] - if model in ['SHAPELETS_POLAR_EXP']: + if ( + model in ["SHAPELETS", "SHAPELETS_POLAR", "SHAPELETS_POLAR_EXP"] + and name == "amp" + ): + if "n_max" not in kwargs_fixed: + raise ValueError( + "n_max needs to be fixed in this configuration!" + ) + n_max = kwargs_fixed["n_max"] + if model in ["SHAPELETS_POLAR_EXP"]: num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) num += num_param for i in range(num_param): - name_list.append(str(name + '_' + self._type + str(k))) - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2'] and name == 'amp': - if 'n_scales' not in kwargs_fixed or 'n_pixels' not in kwargs_fixed: - raise ValueError("n_scales and n_pixels need to be fixed when using STARLETS-like models!") - n_scales = kwargs_fixed['n_scales'] - n_pixels = kwargs_fixed['n_pixels'] + name_list.append(str(name + "_" + self._type + str(k))) + elif ( + model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"] + and name == "amp" + ): + if ( + "n_scales" not in kwargs_fixed + or "n_pixels" not in kwargs_fixed + ): + raise ValueError( + "n_scales and n_pixels need to be fixed when using STARLETS-like models!" + ) + n_scales = kwargs_fixed["n_scales"] + n_pixels = kwargs_fixed["n_pixels"] num_param = n_scales * n_pixels num += num_param for i in range(num_param): - name_list.append(str(name + '_' + self._type + str(k))) - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE'] and name == 'amp': - num_param = len(kwargs_fixed['sigma']) + name_list.append(str(name + "_" + self._type + str(k))) + elif ( + model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"] + and name == "amp" + ): + num_param = len(kwargs_fixed["sigma"]) num += num_param for i in range(num_param): - name_list.append(str(name + '_' + self._type + str(k))) + name_list.append(str(name + "_" + self._type + str(k))) else: num += 1 - name_list.append(str(name + '_' + self._type + str(k))) + name_list.append(str(name + "_" + self._type + str(k))) return num, name_list def num_param_linear(self): diff --git a/lenstronomy/LightModel/linear_basis.py b/lenstronomy/LightModel/linear_basis.py index 720878b43..ce595ad80 100644 --- a/lenstronomy/LightModel/linear_basis.py +++ b/lenstronomy/LightModel/linear_basis.py @@ -1,17 +1,15 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" # this file contains a class which describes the surface brightness of the light models import numpy as np from lenstronomy.LightModel.light_model_base import LightModelBase -__all__ = ['LinearBasis'] +__all__ = ["LinearBasis"] class LinearBasis(LightModelBase): - """ - class to handle source and lens light models - """ + """Class to handle source and lens light models.""" def __init__(self, **kwargs): """ @@ -22,8 +20,7 @@ def __init__(self, **kwargs): @property def param_name_list(self): - """ - returns the list of all parameter names + """Returns the list of all parameter names. :return: list of list of strings (for each light model separately) """ @@ -34,22 +31,20 @@ def param_name_list(self): @property def param_name_list_latex(self): - """ - returns the list of all parameter names in LateX style + """Returns the list of all parameter names in LateX style. :return: list of list of strings (for each light model separately) """ name_list = [] for i, func in enumerate(self.func_list): - if hasattr(func, 'param_names_latex'): + if hasattr(func, "param_names_latex"): name_list.append(func.param_names_latex) else: name_list.append(func.param_names) return name_list def functions_split(self, x, y, kwargs_list, k=None): - """ - split model in different components + """Split model in different components. :param x: coordinate in units of arcsec relative to the center of the image :param y: coordinate in units of arcsec relative to the center of the image @@ -61,38 +56,60 @@ def functions_split(self, x, y, kwargs_list, k=None): bool_list = self._bool_list(k=k) for i, model in enumerate(self.profile_type_list): if bool_list[i] is True: - if model in ['SERSIC', 'SERSIC_ELLIPSE', 'CORE_SERSIC', 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', - 'PJAFFE_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', - 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'UNIFORM', 'INTERPOL', 'ELLIPSOID']: + if model in [ + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "POWER_LAW", + "NIE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "UNIFORM", + "INTERPOL", + "ELLIPSOID", + ]: kwargs_new = kwargs_list[i].copy() - new = {'amp': 1} + new = {"amp": 1} kwargs_new.update(new) response += [self.func_list[i].function(x, y, **kwargs_new)] n += 1 - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: - num = len(kwargs_list[i]['amp']) - new = {'amp': np.ones(num)} + elif model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"]: + num = len(kwargs_list[i]["amp"]) + new = {"amp": np.ones(num)} kwargs_new = kwargs_list[i].copy() kwargs_new.update(new) response += self.func_list[i].function_split(x, y, **kwargs_new) n += num - elif model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP', - 'SHAPELETS_ELLIPSE']: + elif model in [ + "SHAPELETS", + "SHAPELETS_POLAR", + "SHAPELETS_POLAR_EXP", + "SHAPELETS_ELLIPSE", + ]: kwargs = kwargs_list[i] - n_max = kwargs['n_max'] - if model in ['SHAPELETS_POLAR_EXP']: - num_param = int((n_max+1)**2) + n_max = kwargs["n_max"] + if model in ["SHAPELETS_POLAR_EXP"]: + num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) - new = {'amp': np.ones(num_param)} + new = {"amp": np.ones(num_param)} kwargs_new = kwargs_list[i].copy() kwargs_new.update(new) response += self.func_list[i].function_split(x, y, **kwargs_new) n += num_param - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2']: - raise ValueError("'{}' model does not support function split".format(model)) + elif model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"]: + raise ValueError( + "'{}' model does not support function split".format(model) + ) else: - raise ValueError('model type %s not valid!' % model) + raise ValueError("model type %s not valid!" % model) return response, n def num_param_linear(self, kwargs_list, list_return=False): @@ -108,36 +125,58 @@ def num_param_linear(self, kwargs_list, list_return=False): return n_list def num_param_linear_list(self, kwargs_list): - """ - returns the list (in order of the light profiles) of the number of linear components per model + """Returns the list (in order of the light profiles) of the number of linear + components per model. :param kwargs_list: list of keyword arguments of the light profiles :return: number of linear basis set coefficients """ n_list = [] for i, model in enumerate(self.profile_type_list): - if model in ['SERSIC', 'SERSIC_ELLIPSE', 'CORE_SERSIC', 'HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', - 'PJAFFE_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', - 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'UNIFORM', 'INTERPOL', 'ELLIPSOID']: + if model in [ + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "POWER_LAW", + "NIE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "UNIFORM", + "INTERPOL", + "ELLIPSOID", + ]: n_list += [1] - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: - num = len(kwargs_list[i]['sigma']) + elif model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"]: + num = len(kwargs_list[i]["sigma"]) n_list += [num] - elif model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP', - 'SHAPELETS_ELLIPSE']: - n_max = kwargs_list[i]['n_max'] - if model in ['SHAPELETS_POLAR_EXP']: - num_param = int((n_max+1)**2) + elif model in [ + "SHAPELETS", + "SHAPELETS_POLAR", + "SHAPELETS_POLAR_EXP", + "SHAPELETS_ELLIPSE", + ]: + n_max = kwargs_list[i]["n_max"] + if model in ["SHAPELETS_POLAR_EXP"]: + num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) n_list += [num_param] - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2']: - n_scales = kwargs_list[i]['n_scales'] - n_pixels = kwargs_list[i]['n_pixels'] + elif model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"]: + n_scales = kwargs_list[i]["n_scales"] + n_pixels = kwargs_list[i]["n_pixels"] num_param = int(n_scales * n_pixels) - n_list += [num_param] # TODO : find a way to make it the number of source pixels + n_list += [ + num_param + ] # TODO : find a way to make it the number of source pixels else: - raise ValueError('model type %s not valid!' % model) + raise ValueError("model type %s not valid!" % model) return n_list def update_linear(self, param, i, kwargs_list): @@ -150,32 +189,52 @@ def update_linear(self, param, i, kwargs_list): :return: kwargs list with over-written or added 'amp' parameters according to the coefficients in param """ for k, model in enumerate(self.profile_type_list): - if model in ['SERSIC', 'SERSIC_ELLIPSE', 'CORE_SERSIC', 'HERNQUIST', 'PJAFFE', 'PJAFFE_ELLIPSE', - 'HERNQUIST_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', - 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON', 'UNIFORM', 'INTERPOL', 'ELLIPSOID']: - kwargs_list[k]['amp'] = param[i] + if model in [ + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "HERNQUIST", + "PJAFFE", + "PJAFFE_ELLIPSE", + "HERNQUIST_ELLIPSE", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "POWER_LAW", + "NIE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + "UNIFORM", + "INTERPOL", + "ELLIPSOID", + ]: + kwargs_list[k]["amp"] = param[i] i += 1 - elif model in ['MULTI_GAUSSIAN', 'MULTI_GAUSSIAN_ELLIPSE']: - num_param = len(kwargs_list[k]['sigma']) - kwargs_list[k]['amp'] = param[i:i + num_param] + elif model in ["MULTI_GAUSSIAN", "MULTI_GAUSSIAN_ELLIPSE"]: + num_param = len(kwargs_list[k]["sigma"]) + kwargs_list[k]["amp"] = param[i : i + num_param] i += num_param - elif model in ['SHAPELETS', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP', - 'SHAPELETS_ELLIPSE']: - n_max = kwargs_list[k]['n_max'] - if model in ['SHAPELETS_POLAR_EXP']: - num_param = int((n_max+1)**2) + elif model in [ + "SHAPELETS", + "SHAPELETS_POLAR", + "SHAPELETS_POLAR_EXP", + "SHAPELETS_ELLIPSE", + ]: + n_max = kwargs_list[k]["n_max"] + if model in ["SHAPELETS_POLAR_EXP"]: + num_param = int((n_max + 1) ** 2) else: num_param = int((n_max + 1) * (n_max + 2) / 2) - kwargs_list[k]['amp'] = param[i:i+num_param] + kwargs_list[k]["amp"] = param[i : i + num_param] i += num_param - elif model in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2']: - n_scales = kwargs_list[k]['n_scales'] - n_pixels = kwargs_list[k]['n_pixels'] + elif model in ["SLIT_STARLETS", "SLIT_STARLETS_GEN2"]: + n_scales = kwargs_list[k]["n_scales"] + n_pixels = kwargs_list[k]["n_pixels"] num_param = int(n_scales * n_pixels) - kwargs_list[k]['amp'] = param[i:i+num_param] + kwargs_list[k]["amp"] = param[i : i + num_param] i += num_param else: - raise ValueError('model type %s not valid!' % model) + raise ValueError("model type %s not valid!" % model) return kwargs_list, i def add_fixed_linear(self, kwargs_fixed_list): @@ -187,14 +246,14 @@ def add_fixed_linear(self, kwargs_fixed_list): for k, model in enumerate(self.profile_type_list): kwargs_fixed = kwargs_fixed_list[k] param_names = self.param_name_list[k] - if 'amp' in param_names: - if 'amp' not in kwargs_fixed: - kwargs_fixed['amp'] = 1 + if "amp" in param_names: + if "amp" not in kwargs_fixed: + kwargs_fixed["amp"] = 1 return kwargs_fixed_list def linear_param_from_kwargs(self, kwargs_list): - """ - inverse function of update_linear() returning the linear amplitude list for the keyword argument list + """Inverse function of update_linear() returning the linear amplitude list for + the keyword argument list. :param kwargs_list: model parameters including the linear amplitude parameters :type kwargs_list: list of keyword arguments @@ -205,28 +264,39 @@ def linear_param_from_kwargs(self, kwargs_list): for k, model in enumerate(self.profile_type_list): kwargs_ = kwargs_list[k] param_names = self.param_name_list[k] - if 'amp' in param_names: - amp = kwargs_['amp'] + if "amp" in param_names: + amp = kwargs_["amp"] amp_list = np.atleast_1d(amp) for a in amp_list: param.append(a) return param def check_positive_flux_profile(self, kwargs_list): - """ - check whether linear amplitude parameter are non-negative for specified list of lens models that have a - physical amplitude interpretation + """Check whether linear amplitude parameter are non-negative for specified list + of lens models that have a physical amplitude interpretation. :param kwargs_list: light model parameter keyword argument list :return: bool, if True, no specified model has negative flux """ pos_bool = True for k, model in enumerate(self.profile_type_list): - if 'amp' in kwargs_list[k]: - if model in ['SERSIC', 'SERSIC_ELLIPSE', 'CORE_SERSIC', 'HERNQUIST', 'PJAFFE', 'PJAFFE_ELLIPSE', - 'HERNQUIST_ELLIPSE', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'POWER_LAW', 'NIE', 'CHAMELEON', - 'DOUBLE_CHAMELEON']: - if kwargs_list[k]['amp'] < 0: + if "amp" in kwargs_list[k]: + if model in [ + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "HERNQUIST", + "PJAFFE", + "PJAFFE_ELLIPSE", + "HERNQUIST_ELLIPSE", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "POWER_LAW", + "NIE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + ]: + if kwargs_list[k]["amp"] < 0: pos_bool = False break return pos_bool diff --git a/lenstronomy/Plots/chain_plot.py b/lenstronomy/Plots/chain_plot.py index 0609db844..3bfaed952 100644 --- a/lenstronomy/Plots/chain_plot.py +++ b/lenstronomy/Plots/chain_plot.py @@ -6,14 +6,15 @@ from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def plot_chain_list(chain_list, index=0, num_average=100): - """ - plots the output of a chain of samples (MCMC or PSO) with the some diagnostics of convergence. - This routine is an example and more tests might be appropriate to analyse a specific chain. + """Plots the output of a chain of samples (MCMC or PSO) with the some diagnostics of + convergence. This routine is an example and more tests might be appropriate to + analyse a specific chain. :param chain_list: list of chains with arguments [type string, samples etc...] :param index: index of chain to be plotted @@ -22,19 +23,19 @@ def plot_chain_list(chain_list, index=0, num_average=100): """ chain_i = chain_list[index] chain_type = chain_i[0] - if chain_type == 'PSO': + if chain_type == "PSO": chain, param = chain_i[1:] f, axes = plot_chain(chain, param) - elif chain_type in ['EMCEE', 'ZEUS']: + elif chain_type in ["EMCEE", "ZEUS"]: samples, param, dist = chain_i[1:] f, ax = plt.subplots(1, 1, figsize=(6, 6)) axes = plot_mcmc_behaviour(ax, samples, param, dist, num_average=num_average) - elif chain_type in ['MULTINEST', 'DYPOLYCHORD', 'DYNESTY']: + elif chain_type in ["MULTINEST", "DYPOLYCHORD", "DYNESTY"]: samples, param, dist = chain_i[1:4] f, ax = plt.subplots(1, 1, figsize=(6, 6)) axes = plot_mcmc_behaviour(ax, samples, param, dist, num_average=num_average) else: - raise ValueError('chain_type %s not supported for plotting' % chain_type) + raise ValueError("chain_type %s not supported for plotting" % chain_type) return f, axes @@ -45,7 +46,7 @@ def plot_chain(chain, param_list): f, axes = plt.subplots(1, 3, figsize=(18, 6)) ax = axes[0] ax.plot(np.log10(-np.array(chi2_list))) - ax.set_title('-logL') + ax.set_title("-logL") ax = axes[1] pos = np.array(pos_list) @@ -53,28 +54,31 @@ def plot_chain(chain, param_list): n_iter = len(pos) plt.figure() for i in range(0, len(pos[0])): - ax.plot((pos[:, i]-pos[n_iter-1, i]) / (pos[n_iter-1, i] + 1), label=param_list[i]) - ax.set_title('particle position') + ax.plot( + (pos[:, i] - pos[n_iter - 1, i]) / (pos[n_iter - 1, i] + 1), + label=param_list[i], + ) + ax.set_title("particle position") ax.legend() ax = axes[2] for i in range(0, len(vel[0])): - ax.plot(vel[:, i] / (pos[n_iter-1, i] + 1), label=param_list[i]) - ax.set_title('param velocity') + ax.plot(vel[:, i] / (pos[n_iter - 1, i] + 1), label=param_list[i]) + ax.set_title("param velocity") ax.legend() return f, axes @export def plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc=None, num_average=100): - """ - plots the MCMC behaviour and looks for convergence of the chain + """Plots the MCMC behaviour and looks for convergence of the chain. :param ax: matplotlib.axis instance :param samples_mcmc: parameters sampled 2d numpy array :param param_mcmc: list of parameters :param dist_mcmc: log likelihood of the chain - :param num_average: number of samples to average (should coincide with the number of samples in the emcee process) + :param num_average: number of samples to average (should coincide with the number of + samples in the emcee process) :return: """ num_samples = len(samples_mcmc[:, 0]) @@ -82,15 +86,23 @@ def plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc=None, num_averag n_points = int((num_samples - num_samples % num_average) / num_average) for i, param_name in enumerate(param_mcmc): samples = samples_mcmc[:, i] - samples_averaged = np.average(samples[:int(n_points * num_average)].reshape(n_points, num_average), axis=1) + samples_averaged = np.average( + samples[: int(n_points * num_average)].reshape(n_points, num_average), + axis=1, + ) end_point = np.mean(samples_averaged) samples_renormed = (samples_averaged - end_point) / np.std(samples_averaged) ax.plot(samples_renormed, label=param_name) if dist_mcmc is not None: - dist_averaged = -np.max(dist_mcmc[:int(n_points * num_average)].reshape(n_points, num_average), axis=1) - dist_normed = (dist_averaged - np.max(dist_averaged)) / (np.max(dist_averaged) - np.min(dist_averaged)) - ax.plot(dist_normed, label="logL", color='k', linewidth=2) + dist_averaged = -np.max( + dist_mcmc[: int(n_points * num_average)].reshape(n_points, num_average), + axis=1, + ) + dist_normed = (dist_averaged - np.max(dist_averaged)) / ( + np.max(dist_averaged) - np.min(dist_averaged) + ) + ax.plot(dist_normed, label="logL", color="k", linewidth=2) ax.legend() return ax @@ -103,73 +115,108 @@ def psf_iteration_compare(kwargs_psf, **kwargs): :param kwargs: kwargs to send to matplotlib.pyplot.matshow() :return: """ - psf_out = kwargs_psf['kernel_point_source'] - psf_in = kwargs_psf['kernel_point_source_init'] + psf_out = kwargs_psf["kernel_point_source"] + psf_in = kwargs_psf["kernel_point_source_init"] # psf_error_map = kwargs_psf.get('psf_error_map', None) from lenstronomy.Data.psf import PSF + psf = PSF(**kwargs_psf) # psf_out = psf.kernel_point_source psf_error_map = psf.psf_error_map n_kernel = len(psf_in) - delta_x = n_kernel/20. - delta_y = n_kernel/10. + delta_x = n_kernel / 20.0 + delta_y = n_kernel / 10.0 - if 'cmap' not in kwargs: - kwargs['cmap'] = 'seismic' + if "cmap" not in kwargs: + kwargs["cmap"] = "seismic" n = 3 if psf_error_map is not None: n += 1 - f, axes = plt.subplots(1, n, figsize=(5*n, 5)) + f, axes = plt.subplots(1, n, figsize=(5 * n, 5)) ax = axes[0] - im = ax.matshow(np.log10(psf_in), origin='lower', **kwargs) + im = ax.matshow(np.log10(psf_in), origin="lower", **kwargs) v_min, v_max = im.get_clim() - if 'vmin' not in kwargs: - kwargs['vmin'] = v_min - if 'vmax' not in kwargs: - kwargs['vmax'] = v_max + if "vmin" not in kwargs: + kwargs["vmin"] = v_min + if "vmax" not in kwargs: + kwargs["vmax"] = v_max divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) - ax.text(delta_x, n_kernel-delta_y, "Initial PSF model", color="k", fontsize=20, backgroundcolor='w') + ax.text( + delta_x, + n_kernel - delta_y, + "Initial PSF model", + color="k", + fontsize=20, + backgroundcolor="w", + ) ax = axes[1] - im = ax.matshow(np.log10(psf_out), origin='lower', **kwargs) + im = ax.matshow(np.log10(psf_out), origin="lower", **kwargs) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) - ax.text(delta_x, n_kernel-delta_y, "iterative reconstruction", color="k", fontsize=20, backgroundcolor='w') + ax.text( + delta_x, + n_kernel - delta_y, + "iterative reconstruction", + color="k", + fontsize=20, + backgroundcolor="w", + ) ax = axes[2] kwargs_new = copy.deepcopy(kwargs) - del kwargs_new['vmin'] - del kwargs_new['vmax'] + del kwargs_new["vmin"] + del kwargs_new["vmax"] - im = ax.matshow(psf_out-psf_in, origin='lower', vmin=-10**-3, vmax=10**-3, **kwargs_new) + im = ax.matshow( + psf_out - psf_in, origin="lower", vmin=-(10**-3), vmax=10**-3, **kwargs_new + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) - ax.text(delta_x, n_kernel-delta_y, "difference", color="k", fontsize=20, backgroundcolor='w') + ax.text( + delta_x, + n_kernel - delta_y, + "difference", + color="k", + fontsize=20, + backgroundcolor="w", + ) if psf_error_map is not None: ax = axes[3] - im = ax.matshow(np.log10(psf_error_map*psf.kernel_point_source**2), origin='lower', **kwargs) + im = ax.matshow( + np.log10(psf_error_map * psf.kernel_point_source**2), + origin="lower", + **kwargs + ) n_kernel = len(psf_error_map) - delta_x = n_kernel / 20. - delta_y = n_kernel / 10. + delta_x = n_kernel / 20.0 + delta_y = n_kernel / 10.0 divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) - ax.text(delta_x, n_kernel - delta_y, "psf error map", color="k", fontsize=20, backgroundcolor='w') + ax.text( + delta_x, + n_kernel - delta_y, + "psf error map", + color="k", + fontsize=20, + backgroundcolor="w", + ) f.tight_layout() return f, axes diff --git a/lenstronomy/Plots/lens_plot.py b/lenstronomy/Plots/lens_plot.py index 434d67a98..e03bb780f 100644 --- a/lenstronomy/Plots/lens_plot.py +++ b/lenstronomy/Plots/lens_plot.py @@ -1,4 +1,3 @@ - import lenstronomy.Util.util as util from lenstronomy.Util.param_util import shear_cartesian2polar import lenstronomy.Util.simulation_util as sim_util @@ -14,40 +13,64 @@ from lenstronomy.Data.pixel_grid import PixelGrid from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() # TODO define coordinate grid beforehand, e.g. kwargs_data # TODO feed in PointSource instance? + @export -def lens_model_plot(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourcePos_x=0, sourcePos_y=0, - point_source=False, with_caustics=False, with_convergence=True, coord_center_ra=0, - coord_center_dec=0, coord_inverse=False, fast_caustic=True, **kwargs): - """ - plots a lens model (convergence) and the critical curves and caustics +def lens_model_plot( + ax, + lensModel, + kwargs_lens, + numPix=500, + deltaPix=0.01, + sourcePos_x=0, + sourcePos_y=0, + point_source=False, + with_caustics=False, + with_convergence=True, + coord_center_ra=0, + coord_center_dec=0, + coord_inverse=False, + fast_caustic=True, + **kwargs +): + """Plots a lens model (convergence) and the critical curves and caustics. :param ax: matplotlib axis instance :param lensModel: LensModel() class instance :param kwargs_lens: lens model keyword argument list :param numPix: total number of pixels (for convergence map) :param deltaPix: width of pixel (total frame size is deltaPix x numPix) - :param sourcePos_x: float, x-position of point source (image positions computed by the lens equation) - :param sourcePos_y: float, y-position of point source (image positions computed by the lens equation) - :param point_source: bool, if True, illustrates and computes the image positions of the point source - :param with_caustics: bool, if True, illustrates the critical curve and caustics of the system + :param sourcePos_x: float, x-position of point source (image positions computed by + the lens equation) + :param sourcePos_y: float, y-position of point source (image positions computed by + the lens equation) + :param point_source: bool, if True, illustrates and computes the image positions of + the point source + :param with_caustics: bool, if True, illustrates the critical curve and caustics of + the system :param with_convergence: bool, if True, illustrates the convergence map :param coord_center_ra: float, x-coordinate of the center of the frame :param coord_center_dec: float, y-coordinate of the center of the frame - :param coord_inverse: bool, if True, inverts the x-coordinates to go from right-to-left - (effectively the RA definition) - :param fast_caustic: boolean, if True, uses faster but less precise caustic calculation - (might have troubles for the outer caustic (inner critical curve) + :param coord_inverse: bool, if True, inverts the x-coordinates to go from right-to- + left (effectively the RA definition) + :param fast_caustic: boolean, if True, uses faster but less precise caustic + calculation (might have troubles for the outer caustic (inner critical curve) :param with_convergence: boolean, if True, plots the convergence of the deflector :return: """ - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, center_ra=coord_center_ra, - center_dec=coord_center_dec, inverse=coord_inverse) + kwargs_data = sim_util.data_configure_simple( + numPix, + deltaPix, + center_ra=coord_center_ra, + center_dec=coord_center_dec, + inverse=coord_inverse, + ) data = ImageData(**kwargs_data) _coords = data _frame_size = numPix * deltaPix @@ -59,17 +82,38 @@ def lens_model_plot(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourc extent = [ra0, ra0 + _frame_size, dec0, dec0 + _frame_size] if with_convergence: - kwargs_convergence = kwargs.get('kwargs_convergence', {}) - convergence_plot(ax, pixel_grid=_coords, lens_model=lensModel, kwargs_lens=kwargs_lens, extent=extent, - **kwargs_convergence) + kwargs_convergence = kwargs.get("kwargs_convergence", {}) + convergence_plot( + ax, + pixel_grid=_coords, + lens_model=lensModel, + kwargs_lens=kwargs_lens, + extent=extent, + **kwargs_convergence + ) if with_caustics is True: - kwargs_caustics = kwargs.get('kwargs_caustics', {}) - caustics_plot(ax, pixel_grid=_coords, lens_model=lensModel, kwargs_lens=kwargs_lens, fast_caustic=fast_caustic, - coord_inverse=coord_inverse, pixel_offset=True, **kwargs_caustics) + kwargs_caustics = kwargs.get("kwargs_caustics", {}) + caustics_plot( + ax, + pixel_grid=_coords, + lens_model=lensModel, + kwargs_lens=kwargs_lens, + fast_caustic=fast_caustic, + coord_inverse=coord_inverse, + pixel_offset=True, + **kwargs_caustics + ) if point_source: - kwargs_point_source = kwargs.get('kwargs_point_source', {}) - point_source_plot(ax, pixel_grid=_coords, lens_model=lensModel, kwargs_lens=kwargs_lens, - source_x=sourcePos_x, source_y=sourcePos_y, **kwargs_point_source) + kwargs_point_source = kwargs.get("kwargs_point_source", {}) + point_source_plot( + ax, + pixel_grid=_coords, + lens_model=lensModel, + kwargs_lens=kwargs_lens, + source_x=sourcePos_x, + source_y=sourcePos_y, + **kwargs_point_source + ) if coord_inverse: ax.set_xlim([ra0, ra0 - _frame_size]) else: @@ -81,12 +125,22 @@ def lens_model_plot(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourc return ax -def convergence_plot(ax, pixel_grid, lens_model, kwargs_lens, extent=None, vmin=-1, vmax=1, cmap='Greys', **kwargs): - """ - plot convergence +def convergence_plot( + ax, + pixel_grid, + lens_model, + kwargs_lens, + extent=None, + vmin=-1, + vmax=1, + cmap="Greys", + **kwargs +): + """Plot convergence. :param ax: matplotlib axis instance - :param pixel_grid: lenstronomy PixelGrid() instance (or class with inheritance of PixelGrid() + :param pixel_grid: lenstronomy PixelGrid() instance (or class with inheritance of + PixelGrid() :param lens_model: LensModel() class instance :param kwargs_lens: lens model keyword argument list :param extent: [[min, max] [min, max]] of frame @@ -101,12 +155,31 @@ def convergence_plot(ax, pixel_grid, lens_model, kwargs_lens, extent=None, vmin= y_grid1d = util.image2array(y_grid) kappa_result = lens_model.kappa(x_grid1d, y_grid1d, kwargs_lens) kappa_result = util.array2image(kappa_result) - _ = ax.matshow(np.log10(kappa_result), origin='lower', extent=extent, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs) + _ = ax.matshow( + np.log10(kappa_result), + origin="lower", + extent=extent, + cmap=cmap, + vmin=vmin, + vmax=vmax, + **kwargs + ) return ax -def caustics_plot(ax, pixel_grid, lens_model, kwargs_lens, fast_caustic=True, coord_inverse=False, color_crit='r', - color_caustic='g', pixel_offset=False, *args, **kwargs): +def caustics_plot( + ax, + pixel_grid, + lens_model, + kwargs_lens, + fast_caustic=True, + coord_inverse=False, + color_crit="r", + color_caustic="g", + pixel_offset=False, + *args, + **kwargs +): """ :param ax: matplotlib axis instance @@ -132,37 +205,81 @@ def caustics_plot(ax, pixel_grid, lens_model, kwargs_lens, fast_caustic=True, co ra0, dec0 = pixel_grid.radec_at_xy_0 origin = [ra0, dec0] if fast_caustic: - ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = lens_model_ext.critical_curve_caustics( - kwargs_lens, compute_window=frame_size, grid_scale=pixel_width, center_x=coord_center_ra, - center_y=coord_center_dec) + ( + ra_crit_list, + dec_crit_list, + ra_caustic_list, + dec_caustic_list, + ) = lens_model_ext.critical_curve_caustics( + kwargs_lens, + compute_window=frame_size, + grid_scale=pixel_width, + center_x=coord_center_ra, + center_y=coord_center_dec, + ) points_only = False else: # only supports individual points due to output of critical_curve_tiling definition points_only = True - ra_crit_list, dec_crit_list = lens_model_ext.critical_curve_tiling(kwargs_lens, compute_window=frame_size, - start_scale=pixel_width, max_order=10, - center_x=coord_center_ra, - center_y=coord_center_dec) - ra_caustic_list, dec_caustic_list = lens_model.ray_shooting(ra_crit_list, dec_crit_list, kwargs_lens) + ra_crit_list, dec_crit_list = lens_model_ext.critical_curve_tiling( + kwargs_lens, + compute_window=frame_size, + start_scale=pixel_width, + max_order=10, + center_x=coord_center_ra, + center_y=coord_center_dec, + ) + ra_caustic_list, dec_caustic_list = lens_model.ray_shooting( + ra_crit_list, dec_crit_list, kwargs_lens + ) # ra_crit_list, dec_crit_list = list(ra_crit_list), list(dec_crit_list) # ra_caustic_list, dec_caustic_list = list(ra_caustic_list), list(dec_caustic_list) - plot_util.plot_line_set(ax, pixel_grid, ra_caustic_list, dec_caustic_list, color=color_caustic, origin=origin, - flipped_x=coord_inverse, points_only=points_only, pixel_offset=pixel_offset, *args, - **kwargs) - plot_util.plot_line_set(ax, pixel_grid, ra_crit_list, dec_crit_list, color=color_crit, origin=origin, - flipped_x=coord_inverse, points_only=points_only, pixel_offset=pixel_offset, *args, - **kwargs) + plot_util.plot_line_set( + ax, + pixel_grid, + ra_caustic_list, + dec_caustic_list, + color=color_caustic, + origin=origin, + flipped_x=coord_inverse, + points_only=points_only, + pixel_offset=pixel_offset, + *args, + **kwargs + ) + plot_util.plot_line_set( + ax, + pixel_grid, + ra_crit_list, + dec_crit_list, + color=color_crit, + origin=origin, + flipped_x=coord_inverse, + points_only=points_only, + pixel_offset=pixel_offset, + *args, + **kwargs + ) return ax -def point_source_plot(ax, pixel_grid, lens_model, kwargs_lens, source_x, source_y, name_list=None, **kwargs): - """ - plots and illustrates images of a point source - The plotting routine orders the image labels according to the arrival time and illustrates a diamond shape of the - size of the magnification. The coordinates are chosen in pixel coordinates +def point_source_plot( + ax, + pixel_grid, + lens_model, + kwargs_lens, + source_x, + source_y, + name_list=None, + **kwargs +): + """Plots and illustrates images of a point source The plotting routine orders the + image labels according to the arrival time and illustrates a diamond shape of the + size of the magnification. The coordinates are chosen in pixel coordinates. :param ax: matplotlib axis instance - :param pixel_grid: lenstronomy PixelGrid() instance (or class with inheritance of PixelGrid() + :param pixel_grid: lenstronomy PixelGrid() instance (or class with inheritance of + PixelGrid() :param lens_model: LensModel() class instance :param kwargs_lens: lens model keyword argument list :param source_x: x-position of source @@ -173,39 +290,68 @@ def point_source_plot(ax, pixel_grid, lens_model, kwargs_lens, source_x, source_ :return: matplotlib axis instance with figure """ from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver + solver = LensEquationSolver(lens_model) x_center, y_center = pixel_grid.center delta_pix = pixel_grid.pixel_width ra0, dec0 = pixel_grid.radec_at_xy_0 tranform = pixel_grid.transform_angle2pix - if np.linalg.det(tranform) < 0: # if coordiate transform has negative parity (#TODO temporary fix) + if ( + np.linalg.det(tranform) < 0 + ): # if coordiate transform has negative parity (#TODO temporary fix) delta_pix_x = -delta_pix else: delta_pix_x = delta_pix origin = [ra0, dec0] - theta_x, theta_y = solver.image_position_from_source(source_x, source_y, kwargs_lens, - search_window=np.max(pixel_grid.width), x_center=x_center, - y_center=y_center, min_distance=pixel_grid.pixel_width) + theta_x, theta_y = solver.image_position_from_source( + source_x, + source_y, + kwargs_lens, + search_window=np.max(pixel_grid.width), + x_center=x_center, + y_center=y_center, + min_distance=pixel_grid.pixel_width, + ) mag_images = lens_model.magnification(theta_x, theta_y, kwargs_lens) x_image, y_image = pixel_grid.map_coord2pix(theta_x, theta_y) if name_list is None: - name_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] + name_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"] for i in range(len(x_image)): x_ = (x_image[i] + 0.5) * delta_pix_x + origin[0] y_ = (y_image[i] + 0.5) * delta_pix + origin[1] - ax.plot(x_, y_, 'dk', markersize=4 * (1 + np.log(np.abs(mag_images[i]))), alpha=0.5) - ax.text(x_, y_, name_list[i], fontsize=20, color='k') + ax.plot( + x_, y_, "dk", markersize=4 * (1 + np.log(np.abs(mag_images[i]))), alpha=0.5 + ) + ax.text(x_, y_, name_list[i], fontsize=20, color="k") x_source, y_source = pixel_grid.map_coord2pix(source_x, source_y) - ax.plot((x_source + 0.5) * delta_pix_x + origin[0], (y_source + 0.5) * delta_pix + origin[1], '*k', markersize=10) + ax.plot( + (x_source + 0.5) * delta_pix_x + origin[0], + (y_source + 0.5) * delta_pix + origin[1], + "*k", + markersize=10, + ) return ax @export -def arrival_time_surface(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, sourcePos_x=0, sourcePos_y=0, - with_caustics=False, point_source=False, n_levels=10, kwargs_contours=None, - image_color_list=None, letter_font_size=20, name_list=None): +def arrival_time_surface( + ax, + lensModel, + kwargs_lens, + numPix=500, + deltaPix=0.01, + sourcePos_x=0, + sourcePos_y=0, + with_caustics=False, + point_source=False, + n_levels=10, + kwargs_contours=None, + image_color_list=None, + letter_font_size=20, + name_list=None, +): """ :param ax: matplotlib axis instance @@ -233,50 +379,90 @@ def arrival_time_surface(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, # kwargs_lens, compute_window=_frame_size, grid_scale=deltaPix/2.) x_grid1d = util.image2array(x_grid) y_grid1d = util.image2array(y_grid) - fermat_surface = lensModel.fermat_potential(x_grid1d, y_grid1d, kwargs_lens, sourcePos_x, sourcePos_y) + fermat_surface = lensModel.fermat_potential( + x_grid1d, y_grid1d, kwargs_lens, sourcePos_x, sourcePos_y + ) fermat_surface = util.array2image(fermat_surface) if kwargs_contours is None: kwargs_contours = {} # , cmap='Greys', vmin=-1, vmax=1) #, cmap=self._cmap, vmin=v_min, vmax=v_max) if with_caustics is True: - ra_crit_list, dec_crit_list = lensModelExt.critical_curve_tiling(kwargs_lens, compute_window=_frame_size, - start_scale=deltaPix/5, max_order=10) - ra_caustic_list, dec_caustic_list = lensModel.ray_shooting(ra_crit_list, dec_crit_list, kwargs_lens) - plot_util.plot_line_set(ax, _coords, ra_caustic_list, dec_caustic_list, origin=origin, color='g') - plot_util.plot_line_set(ax, _coords, ra_crit_list, dec_crit_list, origin=origin, color='r') + ra_crit_list, dec_crit_list = lensModelExt.critical_curve_tiling( + kwargs_lens, + compute_window=_frame_size, + start_scale=deltaPix / 5, + max_order=10, + ) + ra_caustic_list, dec_caustic_list = lensModel.ray_shooting( + ra_crit_list, dec_crit_list, kwargs_lens + ) + plot_util.plot_line_set( + ax, _coords, ra_caustic_list, dec_caustic_list, origin=origin, color="g" + ) + plot_util.plot_line_set( + ax, _coords, ra_crit_list, dec_crit_list, origin=origin, color="r" + ) if point_source is True: from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver + solver = LensEquationSolver(lensModel) - theta_x, theta_y = solver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=deltaPix, search_window=deltaPix*numPix) + theta_x, theta_y = solver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=deltaPix, + search_window=deltaPix * numPix, + ) fermat_pot_images = lensModel.fermat_potential(theta_x, theta_y, kwargs_lens) - _ = ax.contour(x_grid, y_grid, fermat_surface, origin='lower', # extent=[0, _frame_size, 0, _frame_size], - levels=np.sort(fermat_pot_images), **kwargs_contours) + _ = ax.contour( + x_grid, + y_grid, + fermat_surface, + origin="lower", # extent=[0, _frame_size, 0, _frame_size], + levels=np.sort(fermat_pot_images), + **kwargs_contours + ) # mag_images = lensModel.magnification(theta_x, theta_y, kwargs_lens) x_image, y_image = _coords.map_coord2pix(theta_x, theta_y) if name_list is None: - name_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] + name_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"] for i in range(len(x_image)): - x_ = (x_image[i] + 0.5) * deltaPix - _frame_size/2 - y_ = (y_image[i] + 0.5) * deltaPix - _frame_size/2 + x_ = (x_image[i] + 0.5) * deltaPix - _frame_size / 2 + y_ = (y_image[i] + 0.5) * deltaPix - _frame_size / 2 if image_color_list is None: - color = 'k' + color = "k" else: color = image_color_list[i] - ax.plot(x_, y_, 'x', markersize=10, alpha=1, color=color) + ax.plot(x_, y_, "x", markersize=10, alpha=1, color=color) # markersize=8*(1 + np.log(np.abs(mag_images[i]))) - ax.text(x_ + deltaPix, y_ + deltaPix, name_list[i], fontsize=letter_font_size, color='k') + ax.text( + x_ + deltaPix, + y_ + deltaPix, + name_list[i], + fontsize=letter_font_size, + color="k", + ) x_source, y_source = _coords.map_coord2pix(sourcePos_x, sourcePos_y) - ax.plot((x_source + 0.5) * deltaPix - _frame_size/2, (y_source + 0.5) * deltaPix - _frame_size/2, '*k', - markersize=20) + ax.plot( + (x_source + 0.5) * deltaPix - _frame_size / 2, + (y_source + 0.5) * deltaPix - _frame_size / 2, + "*k", + markersize=20, + ) else: vmin = np.min(fermat_surface) vmax = np.max(fermat_surface) levels = np.linspace(start=vmin, stop=vmax, num=n_levels) - im = ax.contour(x_grid, y_grid, fermat_surface, origin='lower', # extent=[0, _frame_size, 0, _frame_size], - levels=levels, **kwargs_contours) + im = ax.contour( + x_grid, + y_grid, + fermat_surface, + origin="lower", # extent=[0, _frame_size, 0, _frame_size], + levels=levels, + **kwargs_contours + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) @@ -284,7 +470,9 @@ def arrival_time_surface(ax, lensModel, kwargs_lens, numPix=500, deltaPix=0.01, @export -def curved_arc_illustration(ax, lensModel, kwargs_lens, with_centroid=True, stretch_scale=0.1, color='k'): +def curved_arc_illustration( + ax, lensModel, kwargs_lens, with_centroid=True, stretch_scale=0.1, color="k" +): """ :param ax: matplotlib axis instance @@ -301,22 +489,46 @@ def curved_arc_illustration(ax, lensModel, kwargs_lens, with_centroid=True, stre # check whether curved arc lens_model_list = lensModel.lens_model_list for i, lens_type in enumerate(lens_model_list): - if lens_type in ['CURVED_ARC', 'CURVED_ARC_SIS_MST', 'CURVED_ARC_CONST', 'CURVED_ARC_CONST_MST', - 'CURVED_ARC_SPT', 'CURVED_ARC_TAN_DIFF']: - plot_arc(ax, with_centroid=with_centroid, stretch_scale=stretch_scale, color=color, **kwargs_lens[i]) + if lens_type in [ + "CURVED_ARC", + "CURVED_ARC_SIS_MST", + "CURVED_ARC_CONST", + "CURVED_ARC_CONST_MST", + "CURVED_ARC_SPT", + "CURVED_ARC_TAN_DIFF", + ]: + plot_arc( + ax, + with_centroid=with_centroid, + stretch_scale=stretch_scale, + color=color, + **kwargs_lens[i] + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) # rectangular frame - ax.axis('scaled') + ax.axis("scaled") # plot coordinate frame and scale @export -def plot_arc(ax, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y, stretch_scale=0.1, - with_centroid=True, linewidth=1, color='k', dtan_dtan=0): +def plot_arc( + ax, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + stretch_scale=0.1, + with_centroid=True, + linewidth=1, + color="k", + dtan_dtan=0, +): """ :param ax: matplotlib.axes instance @@ -336,18 +548,40 @@ def plot_arc(ax, tangential_stretch, radial_stretch, curvature, direction, cente :return: """ # plot line to centroid - center_x_spp, center_y_spp = center_deflector(curvature, direction, center_x, center_y) + center_x_spp, center_y_spp = center_deflector( + curvature, direction, center_x, center_y + ) if with_centroid: - ax.plot([center_x, center_x_spp], [center_y, center_y_spp], '--', color=color, alpha=0.5, linewidth=linewidth) - ax.plot([center_x_spp], [center_y_spp], '*', color=color, alpha=0.5, linewidth=linewidth) + ax.plot( + [center_x, center_x_spp], + [center_y, center_y_spp], + "--", + color=color, + alpha=0.5, + linewidth=linewidth, + ) + ax.plot( + [center_x_spp], + [center_y_spp], + "*", + color=color, + alpha=0.5, + linewidth=linewidth, + ) # plot radial stretch to scale x_r = np.cos(direction) * radial_stretch * stretch_scale y_r = np.sin(direction) * radial_stretch * stretch_scale - ax.plot([center_x - x_r, center_x + x_r], [center_y - y_r, center_y + y_r], '--', color=color, linewidth=linewidth) + ax.plot( + [center_x - x_r, center_x + x_r], + [center_y - y_r, center_y + y_r], + "--", + color=color, + linewidth=linewidth, + ) # compute angle of size of the tangential stretch - r = 1. / curvature + r = 1.0 / curvature # make sure tangential stretch * stretch_scale is not larger than r * 2pi such that the full circle is only # plotted once @@ -359,7 +593,7 @@ def plot_arc(ax, tangential_stretch, radial_stretch, curvature, direction, cente # plot points on circle x_curve = r * np.cos(phi) + center_x_spp y_curve = r * np.sin(phi) + center_y_spp - ax.plot(x_curve, y_curve, '--', color=color, linewidth=linewidth) + ax.plot(x_curve, y_curve, "--", color=color, linewidth=linewidth) # make round circle with start point to end to close the circle r_c, t_c = util.points_on_circle(radius=stretch_scale, num_points=200) @@ -367,15 +601,24 @@ def plot_arc(ax, tangential_stretch, radial_stretch, curvature, direction, cente phi_c = t_c * tangential_stretch_ / r_c + direction x_c = r_c * np.cos(phi_c) + center_x_spp y_c = r_c * np.sin(phi_c) + center_y_spp - ax.plot(x_c, y_c, '-', color=color, linewidth=linewidth) + ax.plot(x_c, y_c, "-", color=color, linewidth=linewidth) return ax # TODO add different colors for each quarter to identify parities @export -def distortions(lensModel, kwargs_lens, num_pix=100, delta_pix=0.05, center_ra=0, center_dec=0, - differential_scale=0.0001, smoothing_scale=None, **kwargs): +def distortions( + lensModel, + kwargs_lens, + num_pix=100, + delta_pix=0.05, + center_ra=0, + center_dec=0, + differential_scale=0.0001, + smoothing_scale=None, + **kwargs +): """ :param lensModel: LensModel instance @@ -388,7 +631,9 @@ def distortions(lensModel, kwargs_lens, num_pix=100, delta_pix=0.05, center_ra=0 :param smoothing_scale: float or None, Gaussian FWHM of a smoothing kernel applied before plotting :return: matplotlib instance with different panels """ - kwargs_grid = sim_util.data_configure_simple(num_pix, delta_pix, center_ra=center_ra, center_dec=center_dec) + kwargs_grid = sim_util.data_configure_simple( + num_pix, delta_pix, center_ra=center_ra, center_dec=center_dec + ) _coords = ImageData(**kwargs_grid) _frame_size = num_pix * delta_pix ra_grid, dec_grid = _coords.pixel_coordinates @@ -396,26 +641,79 @@ def distortions(lensModel, kwargs_lens, num_pix=100, delta_pix=0.05, center_ra=0 extensions = LensModelExtensions(lensModel=lensModel) ra_grid1d = util.image2array(ra_grid) dec_grid1d = util.image2array(dec_grid) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials( - ra_grid1d, dec_grid1d, kwargs_lens=kwargs_lens, center_x=center_ra, center_y=center_dec, smoothing_3rd=differential_scale, smoothing_2nd=None) - - lambda_rad2d, lambda_tan2d, orientation_angle2d, dlambda_tan_dtan2d, dlambda_tan_drad2d, dlambda_rad_drad2d, dlambda_rad_dtan2d, dphi_tan_dtan2d, dphi_tan_drad2d, dphi_rad_drad2d, dphi_rad_dtan2d = util.array2image(lambda_rad), \ - util.array2image(lambda_tan), util.array2image(orientation_angle), util.array2image(dlambda_tan_dtan), util.array2image(dlambda_tan_drad), util.array2image(dlambda_rad_drad), util.array2image(dlambda_rad_dtan), \ - util.array2image(dphi_tan_dtan), util.array2image(dphi_tan_drad), util.array2image(dphi_rad_drad), util.array2image(dphi_rad_dtan) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + ra_grid1d, + dec_grid1d, + kwargs_lens=kwargs_lens, + center_x=center_ra, + center_y=center_dec, + smoothing_3rd=differential_scale, + smoothing_2nd=None, + ) + + ( + lambda_rad2d, + lambda_tan2d, + orientation_angle2d, + dlambda_tan_dtan2d, + dlambda_tan_drad2d, + dlambda_rad_drad2d, + dlambda_rad_dtan2d, + dphi_tan_dtan2d, + dphi_tan_drad2d, + dphi_rad_drad2d, + dphi_rad_dtan2d, + ) = ( + util.array2image(lambda_rad), + util.array2image(lambda_tan), + util.array2image(orientation_angle), + util.array2image(dlambda_tan_dtan), + util.array2image(dlambda_tan_drad), + util.array2image(dlambda_rad_drad), + util.array2image(dlambda_rad_dtan), + util.array2image(dphi_tan_dtan), + util.array2image(dphi_tan_drad), + util.array2image(dphi_rad_drad), + util.array2image(dphi_rad_dtan), + ) if smoothing_scale is not None: - lambda_rad2d = ndimage.gaussian_filter(lambda_rad2d, sigma=smoothing_scale/delta_pix) - dlambda_rad_drad2d = ndimage.gaussian_filter(dlambda_rad_drad2d, sigma=smoothing_scale/delta_pix) + lambda_rad2d = ndimage.gaussian_filter( + lambda_rad2d, sigma=smoothing_scale / delta_pix + ) + dlambda_rad_drad2d = ndimage.gaussian_filter( + dlambda_rad_drad2d, sigma=smoothing_scale / delta_pix + ) lambda_tan2d = np.abs(lambda_tan2d) # the magnification cut is made to make a stable integral/convolution lambda_tan2d[lambda_tan2d > 100] = 100 - lambda_tan2d = ndimage.gaussian_filter(lambda_tan2d, sigma=smoothing_scale/delta_pix) + lambda_tan2d = ndimage.gaussian_filter( + lambda_tan2d, sigma=smoothing_scale / delta_pix + ) # the magnification cut is made to make a stable integral/convolution dlambda_tan_dtan2d[dlambda_tan_dtan2d > 100] = 100 dlambda_tan_dtan2d[dlambda_tan_dtan2d < -100] = -100 - dlambda_tan_dtan2d = ndimage.gaussian_filter(dlambda_tan_dtan2d, sigma=smoothing_scale/delta_pix) - orientation_angle2d = ndimage.gaussian_filter(orientation_angle2d, sigma=smoothing_scale/delta_pix) - dphi_tan_dtan2d = ndimage.gaussian_filter(dphi_tan_dtan2d, sigma=smoothing_scale/delta_pix) + dlambda_tan_dtan2d = ndimage.gaussian_filter( + dlambda_tan_dtan2d, sigma=smoothing_scale / delta_pix + ) + orientation_angle2d = ndimage.gaussian_filter( + orientation_angle2d, sigma=smoothing_scale / delta_pix + ) + dphi_tan_dtan2d = ndimage.gaussian_filter( + dphi_tan_dtan2d, sigma=smoothing_scale / delta_pix + ) def _plot_frame(ax, frame, vmin, vmax, text_string): """ @@ -429,44 +727,112 @@ def _plot_frame(ax, frame, vmin, vmax, text_string): """ font_size = 10 _arrow_size = 0.02 - im = ax.matshow(frame, extent=[0, _frame_size, 0, _frame_size], vmin=vmin, vmax=vmax) + im = ax.matshow( + frame, extent=[0, _frame_size, 0, _frame_size], vmin=vmin, vmax=vmax + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) - cb = plt.colorbar(im, cax=cax, orientation='vertical') + cb = plt.colorbar(im, cax=cax, orientation="vertical") # cb.set_label(text_string, fontsize=10) # plot_util.scale_bar(ax, _frame_size, dist=1, text='1"', font_size=font_size) - plot_util.text_description(ax, _frame_size, text=text_string, color="k", - backgroundcolor='w', font_size=font_size) + plot_util.text_description( + ax, + _frame_size, + text=text_string, + color="k", + backgroundcolor="w", + font_size=font_size, + ) # if 'no_arrow' not in kwargs or not kwargs['no_arrow']: # plot_util.coordinate_arrows(ax, _frame_size, _coords, # color='w', arrow_size=_arrow_size, # font_size=font_size) f, axes = plt.subplots(3, 4, figsize=(12, 8)) - _plot_frame(axes[0, 0], lambda_rad2d, vmin=0.6, vmax=1.4, text_string=r"$\lambda_{rad}$") - _plot_frame(axes[0, 1], lambda_tan2d, vmin=-20, vmax=20, text_string=r"$\lambda_{tan}$") - _plot_frame(axes[0, 2], orientation_angle2d, vmin=-np.pi / 10, vmax=np.pi / 10, text_string=r"$\phi$") - _plot_frame(axes[0, 3], util.array2image(lambda_tan * lambda_rad), vmin=-20, vmax=20, text_string='magnification') - _plot_frame(axes[1, 0], dlambda_rad_drad2d/lambda_rad2d, vmin=-.1, vmax=.1, text_string='dlambda_rad_drad') - _plot_frame(axes[1, 1], dlambda_tan_dtan2d/lambda_tan2d, vmin=-20, vmax=20, text_string='dlambda_tan_dtan') - _plot_frame(axes[1, 2], dlambda_tan_drad2d/lambda_tan2d, vmin=-20, vmax=20, text_string='dlambda_tan_drad') - _plot_frame(axes[1, 3], dlambda_rad_dtan2d/lambda_rad2d, vmin=-.1, vmax=.1, text_string='dlambda_rad_dtan') - - _plot_frame(axes[2, 0], dphi_rad_drad2d, vmin=-.1, vmax=.1, text_string='dphi_rad_drad') - _plot_frame(axes[2, 1], dphi_tan_dtan2d, vmin=0, vmax=20, text_string='dphi_tan_dtan: curvature radius') - _plot_frame(axes[2, 2], dphi_tan_drad2d, vmin=-.1, vmax=.1, text_string='dphi_tan_drad') - _plot_frame(axes[2, 3], dphi_rad_dtan2d, vmin=0, vmax=20, text_string='dphi_rad_dtan') + _plot_frame( + axes[0, 0], lambda_rad2d, vmin=0.6, vmax=1.4, text_string=r"$\lambda_{rad}$" + ) + _plot_frame( + axes[0, 1], lambda_tan2d, vmin=-20, vmax=20, text_string=r"$\lambda_{tan}$" + ) + _plot_frame( + axes[0, 2], + orientation_angle2d, + vmin=-np.pi / 10, + vmax=np.pi / 10, + text_string=r"$\phi$", + ) + _plot_frame( + axes[0, 3], + util.array2image(lambda_tan * lambda_rad), + vmin=-20, + vmax=20, + text_string="magnification", + ) + _plot_frame( + axes[1, 0], + dlambda_rad_drad2d / lambda_rad2d, + vmin=-0.1, + vmax=0.1, + text_string="dlambda_rad_drad", + ) + _plot_frame( + axes[1, 1], + dlambda_tan_dtan2d / lambda_tan2d, + vmin=-20, + vmax=20, + text_string="dlambda_tan_dtan", + ) + _plot_frame( + axes[1, 2], + dlambda_tan_drad2d / lambda_tan2d, + vmin=-20, + vmax=20, + text_string="dlambda_tan_drad", + ) + _plot_frame( + axes[1, 3], + dlambda_rad_dtan2d / lambda_rad2d, + vmin=-0.1, + vmax=0.1, + text_string="dlambda_rad_dtan", + ) + + _plot_frame( + axes[2, 0], dphi_rad_drad2d, vmin=-0.1, vmax=0.1, text_string="dphi_rad_drad" + ) + _plot_frame( + axes[2, 1], + dphi_tan_dtan2d, + vmin=0, + vmax=20, + text_string="dphi_tan_dtan: curvature radius", + ) + _plot_frame( + axes[2, 2], dphi_tan_drad2d, vmin=-0.1, vmax=0.1, text_string="dphi_tan_drad" + ) + _plot_frame( + axes[2, 3], dphi_rad_dtan2d, vmin=0, vmax=20, text_string="dphi_rad_dtan" + ) return f, axes -def stretch_plot(ax, lens_model, kwargs_lens, plot_grid=None, scale=1, ellipse_color='k', max_stretch=np.inf, - **patch_kwargs): - """ - Plots ellipses at each point on a grid, scaled corresponding to the local Jacobian eigenvalues +def stretch_plot( + ax, + lens_model, + kwargs_lens, + plot_grid=None, + scale=1, + ellipse_color="k", + max_stretch=np.inf, + **patch_kwargs +): + """Plots ellipses at each point on a grid, scaled corresponding to the local + Jacobian eigenvalues. :param ax: matplotib axis instance :param lens_model: LensModel instance @@ -481,34 +847,50 @@ def stretch_plot(ax, lens_model, kwargs_lens, plot_grid=None, scale=1, ellipse_c if plot_grid is None: # define default ellipse grid (20x20 spanning from -2 to 2) - plot_grid = PixelGrid(20, 20, np.array([[1, 0], [0, 1]])*0.2, -2, -2) + plot_grid = PixelGrid(20, 20, np.array([[1, 0], [0, 1]]) * 0.2, -2, -2) lme = LensModelExtensions(lens_model) x_grid, y_grid = plot_grid.pixel_coordinates x = util.image2array(x_grid) y = util.image2array(y_grid) w1, w2, v11, v12, v21, v22 = lme.hessian_eigenvectors(x, y, kwargs_lens) - stretch_1 = np.abs(1. / w1) # stretch in direction of first eigenvalue (unsorted) - stretch_2 = np.abs(1. / w2) - stretch_direction = np.arctan2(v12, v11) # Direction of first eigenvector. Other eigenvector is orthogonal. + stretch_1 = np.abs(1.0 / w1) # stretch in direction of first eigenvalue (unsorted) + stretch_2 = np.abs(1.0 / w2) + stretch_direction = np.arctan2( + v12, v11 + ) # Direction of first eigenvector. Other eigenvector is orthogonal. for i in range(len(stretch_direction)): stretch_1_amount = np.minimum(stretch_1[i], max_stretch) stretch_2_amount = np.minimum(stretch_2[i], max_stretch) - ell = patches.Ellipse((x[i], y[i]), stretch_1_amount * scale/40, # 40 arbitrarily chosen - stretch_2_amount * scale/40, - angle=stretch_direction[i] * 180 / np.pi, - linewidth=1, fill=False, color=ellipse_color, **patch_kwargs) + ell = patches.Ellipse( + (x[i], y[i]), + stretch_1_amount * scale / 40, # 40 arbitrarily chosen + stretch_2_amount * scale / 40, + angle=stretch_direction[i] * 180 / np.pi, + linewidth=1, + fill=False, + color=ellipse_color, + **patch_kwargs + ) ax.add_patch(ell) ax.set_xlim(np.min(x), np.max(x)) ax.set_ylim(np.min(y), np.max(y)) return ax -def shear_plot(ax, lens_model, kwargs_lens, plot_grid=None, scale=5, color='k', max_stretch=np.inf, **kwargs): - """ - Plots combined internal+external shear at each point on a grid, - represented by pseudovectors in the direction of local shear - with length corresponding to shear magnitude. +def shear_plot( + ax, + lens_model, + kwargs_lens, + plot_grid=None, + scale=5, + color="k", + max_stretch=np.inf, + **kwargs +): + """Plots combined internal+external shear at each point on a grid, represented by + pseudovectors in the direction of local shear with length corresponding to shear + magnitude. :param ax: matplotib axis instance :param lens_model: LensModel instance @@ -523,7 +905,7 @@ def shear_plot(ax, lens_model, kwargs_lens, plot_grid=None, scale=5, color='k', if plot_grid is None: # define default ellipse grid (20x20 spanning from -2 to 2) - plot_grid = PixelGrid(20, 20, np.array([[1, 0], [0, 1]])*0.2, -2, -2) + plot_grid = PixelGrid(20, 20, np.array([[1, 0], [0, 1]]) * 0.2, -2, -2) x_grid, y_grid = plot_grid.pixel_coordinates g1, g2 = lens_model.gamma(x_grid, y_grid, kwargs_lens) @@ -532,8 +914,22 @@ def shear_plot(ax, lens_model, kwargs_lens, plot_grid=None, scale=5, color='k', shear = np.minimum(shear, max_stretch_array) arrow_x = shear * np.cos(phi) arrow_y = shear * np.sin(phi) - ax.quiver(x_grid, y_grid, arrow_x, arrow_y, headaxislength=0, headlength=0, - pivot='middle', scale=scale, linewidth=.5, units='xy', width=.02, headwidth=1, color=color, **kwargs) + ax.quiver( + x_grid, + y_grid, + arrow_x, + arrow_y, + headaxislength=0, + headlength=0, + pivot="middle", + scale=scale, + linewidth=0.5, + units="xy", + width=0.02, + headwidth=1, + color=color, + **kwargs + ) # , headwidth=0, headlength=0) ax.set_xlim(np.min(x_grid), np.max(x_grid)) ax.set_ylim(np.min(y_grid), np.max(y_grid)) diff --git a/lenstronomy/Plots/model_band_plot.py b/lenstronomy/Plots/model_band_plot.py index a2e9af051..67f4cd5a1 100644 --- a/lenstronomy/Plots/model_band_plot.py +++ b/lenstronomy/Plots/model_band_plot.py @@ -1,4 +1,3 @@ - import lenstronomy.Util.util as util import matplotlib.pyplot as plt import numpy as np @@ -9,17 +8,27 @@ from lenstronomy.Analysis.image_reconstruction import ModelBand from lenstronomy.LensModel.lens_model import LensModel -__all__ = ['ModelBandPlot'] +__all__ = ["ModelBandPlot"] class ModelBandPlot(ModelBand): - """ - class to plot a single band given the modeling results - - """ - def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, param, kwargs_params, - likelihood_mask_list=None, band_index=0, arrow_size=0.02, cmap_string="gist_heat", - fast_caustic=True): + """Class to plot a single band given the modeling results.""" + + def __init__( + self, + multi_band_list, + kwargs_model, + model, + error_map, + cov_param, + param, + kwargs_params, + likelihood_mask_list=None, + band_index=0, + arrow_size=0.02, + cmap_string="gist_heat", + fast_caustic=True, + ): """ :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] @@ -36,8 +45,18 @@ def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, p :param cmap_string: string of color map (or cmap matplotlib object) :param fast_caustic: boolean; if True, uses fast (but less accurate) caustic calculation method """ - ModelBand.__init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, param, kwargs_params, - image_likelihood_mask_list=likelihood_mask_list, band_index=band_index) + ModelBand.__init__( + self, + multi_band_list, + kwargs_model, + model, + error_map, + cov_param, + param, + kwargs_params, + image_likelihood_mask_list=likelihood_mask_list, + band_index=band_index, + ) self._lensModel = self._bandmodel.LensModel self._lensModelExt = LensModelExtensions(self._lensModel) @@ -59,37 +78,63 @@ def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, p self._fast_caustic = fast_caustic def _critical_curves(self): - if not hasattr(self, '_ra_crit_list') or not hasattr(self, '_dec_crit_list'): - #self._ra_crit_list, self._dec_crit_list, self._ra_caustic_list, self._dec_caustic_list = self._lensModelExt.critical_curve_caustics( + if not hasattr(self, "_ra_crit_list") or not hasattr(self, "_dec_crit_list"): + # self._ra_crit_list, self._dec_crit_list, self._ra_caustic_list, self._dec_caustic_list = self._lensModelExt.critical_curve_caustics( # self._kwargs_lens_partial, compute_window=self._frame_size, grid_scale=self._deltaPix / 5., # center_x=self._x_center, center_y=self._y_center) if self._fast_caustic: - self._ra_crit_list, self._dec_crit_list, self._ra_caustic_list, self._dec_caustic_list = self._lensModelExt.critical_curve_caustics( - self._kwargs_lens_partial, compute_window=self._frame_size, grid_scale=self._deltaPix, - center_x=self._x_center, center_y=self._y_center) + ( + self._ra_crit_list, + self._dec_crit_list, + self._ra_caustic_list, + self._dec_caustic_list, + ) = self._lensModelExt.critical_curve_caustics( + self._kwargs_lens_partial, + compute_window=self._frame_size, + grid_scale=self._deltaPix, + center_x=self._x_center, + center_y=self._y_center, + ) self._caustic_points_only = False else: # only supports individual points due to output of critical_curve_tiling definition self._caustic_points_only = True - self._ra_crit_list, self._dec_crit_list = self._lensModelExt.critical_curve_tiling( + ( + self._ra_crit_list, + self._dec_crit_list, + ) = self._lensModelExt.critical_curve_tiling( self._kwargs_lens_partial, compute_window=self._frame_size, - start_scale=self._deltaPix / 5., + start_scale=self._deltaPix / 5.0, max_order=10, center_x=self._x_center, - center_y=self._y_center) - self._ra_caustic_list, self._dec_caustic_list = self._lensModel.ray_shooting(self._ra_crit_list, - self._dec_crit_list, - self._kwargs_lens_partial) + center_y=self._y_center, + ) + ( + self._ra_caustic_list, + self._dec_caustic_list, + ) = self._lensModel.ray_shooting( + self._ra_crit_list, self._dec_crit_list, self._kwargs_lens_partial + ) return self._ra_crit_list, self._dec_crit_list def _caustics(self): - if not hasattr(self, '_ra_caustic_list') or not hasattr(self, '_dec_caustic_list'): + if not hasattr(self, "_ra_caustic_list") or not hasattr( + self, "_dec_caustic_list" + ): _, _ = self._critical_curves() return self._ra_caustic_list, self._dec_caustic_list - def data_plot(self, ax, v_min=None, v_max=None, text='Observed', - font_size=15, colorbar_label=r'log$_{10}$ flux', **kwargs): + def data_plot( + self, + ax, + v_min=None, + v_max=None, + text="Observed", + font_size=15, + colorbar_label=r"log$_{10}$ flux", + **kwargs + ): """ :param ax: @@ -99,30 +144,58 @@ def data_plot(self, ax, v_min=None, v_max=None, text='Observed', v_min = self._v_min_default if v_max is None: v_max = self._v_max_default - im = ax.matshow(np.log10(self._data), origin='lower', - extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap, vmin=v_min, vmax=v_max) # , vmin=0, vmax=2 + im = ax.matshow( + np.log10(self._data), + origin="lower", + extent=[0, self._frame_size, 0, self._frame_size], + cmap=self._cmap, + vmin=v_min, + vmax=v_max, + ) # , vmin=0, vmax=2 ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="w", - backgroundcolor='k', font_size=font_size) - - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='w', - arrow_size=self._arrow_size, font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', font_size=font_size + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="w", + backgroundcolor="k", + font_size=font_size, + ) + + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="w", + arrow_size=self._arrow_size, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) - cb = plt.colorbar(im, cax=cax, orientation='vertical') + cb = plt.colorbar(im, cax=cax, orientation="vertical") cb.set_label(colorbar_label, fontsize=font_size) return ax - def model_plot(self, ax, v_min=None, v_max=None, image_names=False, - colorbar_label=r'log$_{10}$ flux', - font_size=15, text='Reconstructed', **kwargs): + def model_plot( + self, + ax, + v_min=None, + v_max=None, + image_names=False, + colorbar_label=r"log$_{10}$ flux", + font_size=15, + text="Reconstructed", + **kwargs + ): """ :param ax: matplotib axis instance @@ -134,73 +207,137 @@ def model_plot(self, ax, v_min=None, v_max=None, image_names=False, v_min = self._v_min_default if v_max is None: v_max = self._v_max_default - im = ax.matshow(np.log10(self._model), origin='lower', vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap) + im = ax.matshow( + np.log10(self._model), + origin="lower", + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + cmap=self._cmap, + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="w", - backgroundcolor='k', font_size=font_size) - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, - color='w', arrow_size=self._arrow_size, - font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', font_size=font_size + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="w", + backgroundcolor="k", + font_size=font_size, + ) + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="w", + arrow_size=self._arrow_size, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) cb.set_label(colorbar_label, fontsize=font_size) - #plot_line_set(ax, self._coords, self._ra_caustic_list, self._dec_caustic_list, color='b') - #plot_line_set(ax, self._coords, self._ra_crit_list, self._dec_crit_list, color='r') + # plot_line_set(ax, self._coords, self._ra_caustic_list, self._dec_caustic_list, color='b') + # plot_line_set(ax, self._coords, self._ra_crit_list, self._dec_crit_list, color='r') if image_names is True: - ra_image, dec_image = self._bandmodel.PointSource.image_position(self._kwargs_ps_partial, - self._kwargs_lens_partial, - original_position=kwargs.get('original_position', True)) - plot_util.image_position_plot(ax, self._coords, ra_image, dec_image, - image_name_list=kwargs.get('image_name_list', None), - plot_out_of_image=False) - #source_position_plot(ax, self._coords, self._kwargs_source) - - def convergence_plot(self, ax, text='Convergence', v_min=None, v_max=None, - font_size=15, colorbar_label=r'$\log_{10}\ \kappa$', - **kwargs): + ra_image, dec_image = self._bandmodel.PointSource.image_position( + self._kwargs_ps_partial, + self._kwargs_lens_partial, + original_position=kwargs.get("original_position", True), + ) + plot_util.image_position_plot( + ax, + self._coords, + ra_image, + dec_image, + image_name_list=kwargs.get("image_name_list", None), + plot_out_of_image=False, + ) + # source_position_plot(ax, self._coords, self._kwargs_source) + + def convergence_plot( + self, + ax, + text="Convergence", + v_min=None, + v_max=None, + font_size=15, + colorbar_label=r"$\log_{10}\ \kappa$", + **kwargs + ): """ :param ax: matplotib axis instance :return: convergence plot in ax instance """ - if not 'cmap' in kwargs: - kwargs['cmap'] = self._cmap - - kappa_result = util.array2image(self._lensModel.kappa(self._x_grid, self._y_grid, self._kwargs_lens_partial)) - im = ax.matshow(np.log10(kappa_result), origin='lower', - extent=[0, self._frame_size, 0, self._frame_size], - cmap=kwargs['cmap'], vmin=v_min, vmax=v_max) + if not "cmap" in kwargs: + kwargs["cmap"] = self._cmap + + kappa_result = util.array2image( + self._lensModel.kappa(self._x_grid, self._y_grid, self._kwargs_lens_partial) + ) + im = ax.matshow( + np.log10(kappa_result), + origin="lower", + extent=[0, self._frame_size, 0, self._frame_size], + cmap=kwargs["cmap"], + vmin=v_min, + vmax=v_max, + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='w', font_size=font_size) - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='w', - arrow_size=self._arrow_size, font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, - color="w", backgroundcolor='k', flipped=False, - font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="w", font_size=font_size + ) + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="w", + arrow_size=self._arrow_size, + font_size=font_size, + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="w", + backgroundcolor="k", + flipped=False, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) cb.set_label(colorbar_label, fontsize=font_size) return ax - def substructure_plot(self, ax, index_macromodel, text='Substructure convergence', subtract_mean=True, - v_min=-0.05, v_max=0.05, font_size=15, colorbar_label=r'$\kappa - \kappa_{\rm{macro}}$', - cmap='bwr', with_critical_curves=False, crit_curve_color='k', image_name_list=None, - **kwargs): - """ - - Plots the convergence of a full lens model minus the convergence from a few specified lens models to - more clearly show the presence of substructure + def substructure_plot( + self, + ax, + index_macromodel, + text="Substructure convergence", + subtract_mean=True, + v_min=-0.05, + v_max=0.05, + font_size=15, + colorbar_label=r"$\kappa - \kappa_{\rm{macro}}$", + cmap="bwr", + with_critical_curves=False, + crit_curve_color="k", + image_name_list=None, + **kwargs + ): + """Plots the convergence of a full lens model minus the convergence from a few + specified lens models to more clearly show the presence of substructure. :param ax: matplotib axis instance :param index_macromodel: a list of indexes corresponding to the lens models with convergence to be subtracted @@ -236,37 +373,80 @@ def substructure_plot(self, ax, index_macromodel, text='Substructure convergence if multi_plane: lens_redshift_list_macro.append(lens_redshift_list[idx]) - lens_model_macro = LensModel(lens_model_list_macro, multi_plane=multi_plane, lens_redshift_list=lens_redshift_list_macro, - z_source=z_source, cosmo=cosmo) - kappa_full = util.array2image(self._lensModel.kappa(self._x_grid, self._y_grid, self._kwargs_lens_partial)) - kappa_macro = util.array2image(lens_model_macro.kappa(self._x_grid, self._y_grid, kwargs_lens_macro)) + lens_model_macro = LensModel( + lens_model_list_macro, + multi_plane=multi_plane, + lens_redshift_list=lens_redshift_list_macro, + z_source=z_source, + cosmo=cosmo, + ) + kappa_full = util.array2image( + self._lensModel.kappa(self._x_grid, self._y_grid, self._kwargs_lens_partial) + ) + kappa_macro = util.array2image( + lens_model_macro.kappa(self._x_grid, self._y_grid, kwargs_lens_macro) + ) residual_kappa = kappa_full - kappa_macro if subtract_mean: mean_kappa = np.mean(residual_kappa) residual_kappa -= mean_kappa - colorbar_label = r'$\kappa_{\rm{sub}} - \langle \kappa_{\rm{sub}} \rangle$' - im=ax.imshow(residual_kappa, origin='lower', vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], cmap=cmap) + colorbar_label = r"$\kappa_{\rm{sub}} - \langle \kappa_{\rm{sub}} \rangle$" + im = ax.imshow( + residual_kappa, + origin="lower", + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + cmap=cmap, + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='k', font_size=font_size) - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='k', - arrow_size=self._arrow_size, font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, - color="k", backgroundcolor='w', flipped=False, - font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="k", font_size=font_size + ) + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="k", + arrow_size=self._arrow_size, + font_size=font_size, + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="k", + backgroundcolor="w", + flipped=False, + font_size=font_size, + ) if with_critical_curves is True: ra_crit_list, dec_crit_list = self._critical_curves() - plot_util.plot_line_set(ax, self._coords, ra_crit_list, dec_crit_list, color=crit_curve_color, - points_only=self._caustic_points_only) - - ra_image, dec_image = self._bandmodel.PointSource.image_position(self._kwargs_ps_partial, - self._kwargs_lens_partial) - plot_util.image_position_plot(ax, self._coords, ra_image, dec_image, color='k', image_name_list=image_name_list, - plot_out_of_image=False) + plot_util.plot_line_set( + ax, + self._coords, + ra_crit_list, + dec_crit_list, + color=crit_curve_color, + points_only=self._caustic_points_only, + ) + + ra_image, dec_image = self._bandmodel.PointSource.image_position( + self._kwargs_ps_partial, self._kwargs_lens_partial + ) + plot_util.image_position_plot( + ax, + self._coords, + ra_image, + dec_image, + color="k", + image_name_list=image_name_list, + plot_out_of_image=False, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) @@ -275,9 +455,18 @@ def substructure_plot(self, ax, index_macromodel, text='Substructure convergence return ax - def normalized_residual_plot(self, ax, v_min=-6, v_max=6, font_size=15, text="Normalized Residuals", - colorbar_label=r'(f${}_{\rm model}$ - f${}_{\rm data}$)/$\sigma$', - no_arrow=False, color_bar=True, **kwargs): + def normalized_residual_plot( + self, + ax, + v_min=-6, + v_max=6, + font_size=15, + text="Normalized Residuals", + colorbar_label=r"(f${}_{\rm model}$ - f${}_{\rm data}$)/$\sigma$", + no_arrow=False, + color_bar=True, + **kwargs + ): """ :param ax: @@ -287,50 +476,90 @@ def normalized_residual_plot(self, ax, v_min=-6, v_max=6, font_size=15, text="No :param color_bar: Option to display the color bar :return: """ - if not 'cmap' in kwargs: - kwargs['cmap'] = 'bwr' - im = ax.matshow(self._norm_residuals, vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], origin='lower', - **kwargs) + if not "cmap" in kwargs: + kwargs["cmap"] = "bwr" + im = ax.matshow( + self._norm_residuals, + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + origin="lower", + **kwargs + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='k', - font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="k", - backgroundcolor='w', font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="k", font_size=font_size + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="k", + backgroundcolor="w", + font_size=font_size, + ) if not no_arrow: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='w', - arrow_size=self._arrow_size, font_size=font_size) - if color_bar : + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="w", + arrow_size=self._arrow_size, + font_size=font_size, + ) + if color_bar: divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) - cb.set_label(colorbar_label, - fontsize=font_size) + cb.set_label(colorbar_label, fontsize=font_size) return ax - def absolute_residual_plot(self, ax, v_min=-1, v_max=1, font_size=15, - text="Residuals", - colorbar_label=r'(f$_{model}$-f$_{data}$)'): + def absolute_residual_plot( + self, + ax, + v_min=-1, + v_max=1, + font_size=15, + text="Residuals", + colorbar_label=r"(f$_{model}$-f$_{data}$)", + ): """ :param ax: :return: """ - im = ax.matshow(self._model - self._data, vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], cmap='bwr', origin='lower') + im = ax.matshow( + self._model - self._data, + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + cmap="bwr", + origin="lower", + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='k', - font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="k", - backgroundcolor='w', font_size=font_size) - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, - font_size=font_size, - color='k', - arrow_size=self._arrow_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="k", font_size=font_size + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="k", + backgroundcolor="w", + font_size=font_size, + ) + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + font_size=font_size, + color="k", + arrow_size=self._arrow_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) @@ -347,38 +576,62 @@ def source(self, numPix, deltaPix, center=None, image_orientation=True): """ if image_orientation is True: Mpix2coord = self._coords.transform_pix2angle * deltaPix / self._deltaPix - x_grid_source, y_grid_source = util.make_grid_transformed(numPix, Mpix2Angle=Mpix2coord) + x_grid_source, y_grid_source = util.make_grid_transformed( + numPix, Mpix2Angle=Mpix2coord + ) ra_at_xy_0, dec_at_xy_0 = x_grid_source[0], y_grid_source[0] else: - x_grid_source, y_grid_source, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix, deltaPix) + ( + x_grid_source, + y_grid_source, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform(numPix, deltaPix) center_x = 0 center_y = 0 if center is not None: center_x, center_y = center[0], center[1] elif len(self._kwargs_source_partial) > 0: - center_x = self._kwargs_source_partial[0]['center_x'] - center_y = self._kwargs_source_partial[0]['center_y'] + center_x = self._kwargs_source_partial[0]["center_x"] + center_y = self._kwargs_source_partial[0]["center_y"] x_grid_source += center_x y_grid_source += center_y - coords_source = Coordinates(transform_pix2angle=Mpix2coord, - ra_at_xy_0=ra_at_xy_0 + center_x, - dec_at_xy_0=dec_at_xy_0 + center_y) + coords_source = Coordinates( + transform_pix2angle=Mpix2coord, + ra_at_xy_0=ra_at_xy_0 + center_x, + dec_at_xy_0=dec_at_xy_0 + center_y, + ) - source = self._bandmodel.SourceModel.surface_brightness(x_grid_source, y_grid_source, - self._kwargs_source_partial) - source = util.array2image(source) * deltaPix ** 2 + source = self._bandmodel.SourceModel.surface_brightness( + x_grid_source, y_grid_source, self._kwargs_source_partial + ) + source = util.array2image(source) * deltaPix**2 return source, coords_source - def source_plot(self, ax, numPix, deltaPix_source, center=None, v_min=None, - v_max=None, with_caustics=False, caustic_color='yellow', - font_size=15, plot_scale='log', - scale_size=0.1, - text="Reconstructed source", - colorbar_label=r'log$_{10}$ flux', point_source_position=True, - **kwargs): + def source_plot( + self, + ax, + numPix, + deltaPix_source, + center=None, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + font_size=15, + plot_scale="log", + scale_size=0.1, + text="Reconstructed source", + colorbar_label=r"log$_{10}$ flux", + point_source_position=True, + **kwargs + ): """ :param ax: @@ -399,15 +652,26 @@ def source_plot(self, ax, numPix, deltaPix_source, center=None, v_min=None, v_max = self._v_max_default d_s = numPix * deltaPix_source source, coords_source = self.source(numPix, deltaPix_source, center=center) - if plot_scale == 'log': - source[source < 10**v_min] = 10**(v_min) # to remove weird shadow in plot + if plot_scale == "log": + source[source < 10**v_min] = 10 ** ( + v_min + ) # to remove weird shadow in plot source_scale = np.log10(source) - elif plot_scale == 'linear': + elif plot_scale == "linear": source_scale = source else: - raise ValueError('variable plot_scale needs to be "log" or "linear", not %s.' % plot_scale) - im = ax.matshow(source_scale, origin='lower', extent=[0, d_s, 0, d_s], - cmap=self._cmap, vmin=v_min, vmax=v_max) # source + raise ValueError( + 'variable plot_scale needs to be "log" or "linear", not %s.' + % plot_scale + ) + im = ax.matshow( + source_scale, + origin="lower", + extent=[0, d_s, 0, d_s], + cmap=self._cmap, + vmin=v_min, + vmax=v_max, + ) # source ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) @@ -418,80 +682,181 @@ def source_plot(self, ax, numPix, deltaPix_source, center=None, v_min=None, if with_caustics is True: ra_caustic_list, dec_caustic_list = self._caustics() - plot_util.plot_line_set(ax, coords_source, ra_caustic_list, dec_caustic_list, color=caustic_color, - points_only=self._caustic_points_only) - plot_util.plot_line_set(ax, coords_source, ra_caustic_list, dec_caustic_list, color=caustic_color, - points_only=self._caustic_points_only, **kwargs.get('kwargs_caustic', {})) + plot_util.plot_line_set( + ax, + coords_source, + ra_caustic_list, + dec_caustic_list, + color=caustic_color, + points_only=self._caustic_points_only, + ) + plot_util.plot_line_set( + ax, + coords_source, + ra_caustic_list, + dec_caustic_list, + color=caustic_color, + points_only=self._caustic_points_only, + **kwargs.get("kwargs_caustic", {}) + ) if scale_size > 0: - plot_util.scale_bar(ax, d_s, dist=scale_size, text='{:.1f}"'.format(scale_size), color='w', flipped=False, - font_size=font_size) - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='w', - arrow_size=self._arrow_size, font_size=font_size) - plot_util.text_description(ax, d_s, text=text, color="w", backgroundcolor='k', - flipped=False, font_size=font_size) + plot_util.scale_bar( + ax, + d_s, + dist=scale_size, + text='{:.1f}"'.format(scale_size), + color="w", + flipped=False, + font_size=font_size, + ) + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="w", + arrow_size=self._arrow_size, + font_size=font_size, + ) + plot_util.text_description( + ax, + d_s, + text=text, + color="w", + backgroundcolor="k", + flipped=False, + font_size=font_size, + ) if point_source_position is True: - ra_source, dec_source = self._bandmodel.PointSource.source_position(self._kwargs_ps_partial, self._kwargs_lens) + ra_source, dec_source = self._bandmodel.PointSource.source_position( + self._kwargs_ps_partial, self._kwargs_lens + ) plot_util.source_position_plot(ax, coords_source, ra_source, dec_source) return ax - def error_map_source_plot(self, ax, numPix, deltaPix_source, v_min=None, v_max=None, with_caustics=False, - font_size=15, point_source_position=True): - """ - plots the uncertainty in the surface brightness in the source from the linear inversion by taking the diagonal - elements of the covariance matrix of the inversion of the basis set to be propagated to the source plane. - #TODO illustration of the uncertainties in real space with the full covariance matrix is subtle. - # The best way is probably to draw realizations from the covariance matrix. + def error_map_source_plot( + self, + ax, + numPix, + deltaPix_source, + v_min=None, + v_max=None, + with_caustics=False, + font_size=15, + point_source_position=True, + ): + """Plots the uncertainty in the surface brightness in the source from the linear + inversion by taking the diagonal elements of the covariance matrix of the + inversion of the basis set to be propagated to the source plane. #TODO + illustration of the uncertainties in real space with the full covariance matrix + is subtle. # The best way is probably to draw realizations from the covariance + matrix. :param ax: matplotlib axis instance :param numPix: number of pixels in plot per axis - :param deltaPix_source: pixel spacing in the source resolution illustrated in plot + :param deltaPix_source: pixel spacing in the source resolution illustrated in + plot :param v_min: minimum plotting scale of the map :param v_max: maximum plotting scale of the map - :param with_caustics: plot the caustics on top of the source reconstruction (may take some time) + :param with_caustics: plot the caustics on top of the source reconstruction (may + take some time) :param font_size: font size of labels - :param point_source_position: boolean, if True, plots a point at the position of the point source - :return: plot of source surface brightness errors in the reconstruction on the axis instance + :param point_source_position: boolean, if True, plots a point at the position of + the point source + :return: plot of source surface brightness errors in the reconstruction on the + axis instance """ - x_grid_source, y_grid_source = util.make_grid_transformed(numPix, - self._coords.transform_pix2angle * deltaPix_source / self._deltaPix) - x_center = self._kwargs_source_partial[0]['center_x'] - y_center = self._kwargs_source_partial[0]['center_y'] + x_grid_source, y_grid_source = util.make_grid_transformed( + numPix, self._coords.transform_pix2angle * deltaPix_source / self._deltaPix + ) + x_center = self._kwargs_source_partial[0]["center_x"] + y_center = self._kwargs_source_partial[0]["center_y"] x_grid_source += x_center y_grid_source += y_center - coords_source = Coordinates(self._coords.transform_pix2angle * deltaPix_source / self._deltaPix, ra_at_xy_0=x_grid_source[0], - dec_at_xy_0=y_grid_source[0]) - error_map_source = self._bandmodel.error_map_source(self._kwargs_source_partial, x_grid_source, y_grid_source, - self._cov_param, model_index_select=False) + coords_source = Coordinates( + self._coords.transform_pix2angle * deltaPix_source / self._deltaPix, + ra_at_xy_0=x_grid_source[0], + dec_at_xy_0=y_grid_source[0], + ) + error_map_source = self._bandmodel.error_map_source( + self._kwargs_source_partial, + x_grid_source, + y_grid_source, + self._cov_param, + model_index_select=False, + ) error_map_source = util.array2image(error_map_source) d_s = numPix * deltaPix_source - im = ax.matshow(error_map_source, origin='lower', extent=[0, d_s, 0, d_s], - cmap=self._cmap, vmin=v_min, vmax=v_max) # source + im = ax.matshow( + error_map_source, + origin="lower", + extent=[0, d_s, 0, d_s], + cmap=self._cmap, + vmin=v_min, + vmax=v_max, + ) # source ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) - cb.set_label(r'error variance', fontsize=font_size) + cb.set_label(r"error variance", fontsize=font_size) if with_caustics: ra_caustic_list, dec_caustic_list = self._caustics() - plot_util.plot_line_set(ax, coords_source, ra_caustic_list, dec_caustic_list, color='b', - points_only=self._caustic_points_only) - plot_util.scale_bar(ax, d_s, dist=0.1, text='0.1"', color='w', flipped=False, font_size=font_size) - plot_util.coordinate_arrows(ax, d_s, coords_source, arrow_size=self._arrow_size, color='w', font_size=font_size) - plot_util.text_description(ax, d_s, text="Error map in source", color="w", backgroundcolor='k', flipped=False, - font_size=font_size) + plot_util.plot_line_set( + ax, + coords_source, + ra_caustic_list, + dec_caustic_list, + color="b", + points_only=self._caustic_points_only, + ) + plot_util.scale_bar( + ax, + d_s, + dist=0.1, + text='0.1"', + color="w", + flipped=False, + font_size=font_size, + ) + plot_util.coordinate_arrows( + ax, + d_s, + coords_source, + arrow_size=self._arrow_size, + color="w", + font_size=font_size, + ) + plot_util.text_description( + ax, + d_s, + text="Error map in source", + color="w", + backgroundcolor="k", + flipped=False, + font_size=font_size, + ) if point_source_position is True: - ra_source, dec_source = self._bandmodel.PointSource.source_position(self._kwargs_ps_partial, self._kwargs_lens) + ra_source, dec_source = self._bandmodel.PointSource.source_position( + self._kwargs_ps_partial, self._kwargs_lens + ) plot_util.source_position_plot(ax, coords_source, ra_source, dec_source) return ax - def magnification_plot(self, ax, v_min=-10, v_max=10, - image_name_list=None, font_size=15, no_arrow=False, - text="Magnification model", - colorbar_label=r"$\det\ (\mathsf{A}^{-1})$", - **kwargs): + def magnification_plot( + self, + ax, + v_min=-10, + v_max=10, + image_name_list=None, + font_size=15, + no_arrow=False, + text="Magnification model", + colorbar_label=r"$\det\ (\mathsf{A}^{-1})$", + **kwargs + ): """ :param ax: matplotib axis instance @@ -500,55 +865,121 @@ def magnification_plot(self, ax, v_min=-10, v_max=10, :param kwargs: kwargs to send to matplotlib.pyplot.matshow() :return: """ - if 'cmap' not in kwargs: - kwargs['cmap'] = self._cmap - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.5 - mag_result = util.array2image(self._lensModel.magnification(self._x_grid, self._y_grid, self._kwargs_lens_partial)) - im = ax.matshow(mag_result, origin='lower', extent=[0, self._frame_size, 0, self._frame_size], - vmin=v_min, vmax=v_max, **kwargs) + if "cmap" not in kwargs: + kwargs["cmap"] = self._cmap + if "alpha" not in kwargs: + kwargs["alpha"] = 0.5 + mag_result = util.array2image( + self._lensModel.magnification( + self._x_grid, self._y_grid, self._kwargs_lens_partial + ) + ) + im = ax.matshow( + mag_result, + origin="lower", + extent=[0, self._frame_size, 0, self._frame_size], + vmin=v_min, + vmax=v_max, + **kwargs + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='k', font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="k", font_size=font_size + ) if not no_arrow: - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size, - font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="k", backgroundcolor='w', font_size=font_size) + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="k", + arrow_size=self._arrow_size, + font_size=font_size, + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="k", + backgroundcolor="w", + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) cb.set_label(colorbar_label, fontsize=font_size) - ra_image, dec_image = self._bandmodel.PointSource.image_position(self._kwargs_ps_partial, self._kwargs_lens_partial) - plot_util.image_position_plot(ax, self._coords, ra_image, dec_image, color='k', image_name_list=image_name_list, - plot_out_of_image=False) + ra_image, dec_image = self._bandmodel.PointSource.image_position( + self._kwargs_ps_partial, self._kwargs_lens_partial + ) + plot_util.image_position_plot( + ax, + self._coords, + ra_image, + dec_image, + color="k", + image_name_list=image_name_list, + plot_out_of_image=False, + ) return ax - def deflection_plot(self, ax, v_min=None, v_max=None, axis=0, - with_caustics=False, image_name_list=None, - text="Deflection model", font_size=15, - colorbar_label=r'arcsec'): + def deflection_plot( + self, + ax, + v_min=None, + v_max=None, + axis=0, + with_caustics=False, + image_name_list=None, + text="Deflection model", + font_size=15, + colorbar_label=r"arcsec", + ): """ :return: """ - alpha1, alpha2 = self._lensModel.alpha(self._x_grid, self._y_grid, self._kwargs_lens_partial) + alpha1, alpha2 = self._lensModel.alpha( + self._x_grid, self._y_grid, self._kwargs_lens_partial + ) alpha1 = util.array2image(alpha1) alpha2 = util.array2image(alpha2) if axis == 0: alpha = alpha1 else: alpha = alpha2 - im = ax.matshow(alpha, origin='lower', extent=[0, self._frame_size, 0, self._frame_size], - vmin=v_min, vmax=v_max, cmap=self._cmap, alpha=0.5) + im = ax.matshow( + alpha, + origin="lower", + extent=[0, self._frame_size, 0, self._frame_size], + vmin=v_min, + vmax=v_max, + cmap=self._cmap, + alpha=0.5, + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', color='k', font_size=font_size) - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, color='k', arrow_size=self._arrow_size, - font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="k", backgroundcolor='w', font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', color="k", font_size=font_size + ) + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + color="k", + arrow_size=self._arrow_size, + font_size=font_size, + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="k", + backgroundcolor="w", + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) @@ -556,19 +987,48 @@ def deflection_plot(self, ax, v_min=None, v_max=None, axis=0, if with_caustics is True: ra_crit_list, dec_crit_list = self._critical_curves() ra_caustic_list, dec_caustic_list = self._caustics() - plot_util.plot_line_set(ax, self._coords, ra_caustic_list, dec_caustic_list, color='b', - points_only=self._caustic_points_only) - plot_util.plot_line_set(ax, self._coords, ra_crit_list, dec_crit_list, color='r', - points_only=self._caustic_points_only) - ra_image, dec_image = self._bandmodel.PointSource.image_position(self._kwargs_ps_partial, self._kwargs_lens_partial) - plot_util.image_position_plot(ax, self._coords, ra_image, dec_image, image_name_list=image_name_list, - plot_out_of_image=False) + plot_util.plot_line_set( + ax, + self._coords, + ra_caustic_list, + dec_caustic_list, + color="b", + points_only=self._caustic_points_only, + ) + plot_util.plot_line_set( + ax, + self._coords, + ra_crit_list, + dec_crit_list, + color="r", + points_only=self._caustic_points_only, + ) + ra_image, dec_image = self._bandmodel.PointSource.image_position( + self._kwargs_ps_partial, self._kwargs_lens_partial + ) + plot_util.image_position_plot( + ax, + self._coords, + ra_image, + dec_image, + image_name_list=image_name_list, + plot_out_of_image=False, + ) return ax - def decomposition_plot(self, ax, text='Reconstructed', v_min=None, v_max=None, - unconvolved=False, point_source_add=False, - font_size=15, - source_add=False, lens_light_add=False, **kwargs): + def decomposition_plot( + self, + ax, + text="Reconstructed", + v_min=None, + v_max=None, + unconvolved=False, + point_source_add=False, + font_size=15, + source_add=False, + lens_light_add=False, + **kwargs + ): """ :param ax: @@ -582,60 +1042,114 @@ def decomposition_plot(self, ax, text='Reconstructed', v_min=None, v_max=None, :param kwargs: kwargs to send matplotlib.pyplot.matshow() :return: """ - model = self._bandmodel._image(self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, - self._kwargs_ps_partial, unconvolved=unconvolved, source_add=source_add, - lens_light_add=lens_light_add, point_source_add=point_source_add) + model = self._bandmodel._image( + self._kwargs_lens_partial, + self._kwargs_source_partial, + self._kwargs_lens_light_partial, + self._kwargs_ps_partial, + unconvolved=unconvolved, + source_add=source_add, + lens_light_add=lens_light_add, + point_source_add=point_source_add, + ) if v_min is None: v_min = self._v_min_default if v_max is None: v_max = self._v_max_default - if 'cmap' not in kwargs: - kwargs['cmap'] = self._cmap - im = ax.matshow(np.log10(model), origin='lower', vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], **kwargs) + if "cmap" not in kwargs: + kwargs["cmap"] = self._cmap + im = ax.matshow( + np.log10(model), + origin="lower", + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + **kwargs + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="w", backgroundcolor='k') - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size, - font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', font_size=font_size + ) + plot_util.text_description( + ax, self._frame_size, text=text, color="w", backgroundcolor="k" + ) + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + arrow_size=self._arrow_size, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) - cb.set_label(r'log$_{10}$ flux', fontsize=font_size) + cb.set_label(r"log$_{10}$ flux", fontsize=font_size) return ax - def subtract_from_data_plot(self, ax, text='Subtracted', v_min=None, - v_max=None, point_source_add=False, - source_add=False, lens_light_add=False, - font_size=15 - ): - model = self._bandmodel._image(self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, - self._kwargs_ps_partial, unconvolved=False, source_add=source_add, - lens_light_add=lens_light_add, point_source_add=point_source_add) + def subtract_from_data_plot( + self, + ax, + text="Subtracted", + v_min=None, + v_max=None, + point_source_add=False, + source_add=False, + lens_light_add=False, + font_size=15, + ): + model = self._bandmodel._image( + self._kwargs_lens_partial, + self._kwargs_source_partial, + self._kwargs_lens_light_partial, + self._kwargs_ps_partial, + unconvolved=False, + source_add=source_add, + lens_light_add=lens_light_add, + point_source_add=point_source_add, + ) if v_min is None: v_min = self._v_min_default if v_max is None: v_max = self._v_max_default - im = ax.matshow(np.log10(self._data - model), origin='lower', vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], cmap=self._cmap) + im = ax.matshow( + np.log10(self._data - model), + origin="lower", + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + cmap=self._cmap, + ) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.autoscale(False) - plot_util.scale_bar(ax, self._frame_size, dist=1, text='1"', font_size=font_size) - plot_util.text_description(ax, self._frame_size, text=text, color="w", backgroundcolor='k', font_size=font_size) - plot_util.coordinate_arrows(ax, self._frame_size, self._coords, arrow_size=self._arrow_size, - font_size=font_size) + plot_util.scale_bar( + ax, self._frame_size, dist=1, text='1"', font_size=font_size + ) + plot_util.text_description( + ax, + self._frame_size, + text=text, + color="w", + backgroundcolor="k", + font_size=font_size, + ) + plot_util.coordinate_arrows( + ax, + self._frame_size, + self._coords, + arrow_size=self._arrow_size, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb = plt.colorbar(im, cax=cax) - cb.set_label(r'log$_{10}$ flux', fontsize=font_size) + cb.set_label(r"log$_{10}$ flux", fontsize=font_size) return ax def plot_main(self, with_caustics=False): - """ - print the main plots together in a joint frame + """Print the main plots together in a joint frame. :return: """ @@ -644,51 +1158,89 @@ def plot_main(self, with_caustics=False): self.data_plot(ax=axes[0, 0]) self.model_plot(ax=axes[0, 1], image_names=True) self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6) - self.source_plot(ax=axes[1, 0], deltaPix_source=0.01, numPix=100, with_caustics=with_caustics) + self.source_plot( + ax=axes[1, 0], deltaPix_source=0.01, numPix=100, with_caustics=with_caustics + ) self.convergence_plot(ax=axes[1, 1], v_max=1) self.magnification_plot(ax=axes[1, 2]) f.tight_layout() - f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) + f.subplots_adjust( + left=None, bottom=None, right=None, top=None, wspace=0.0, hspace=0.05 + ) return f, axes def plot_separate(self): - """ - plot the different model components separately + """Plot the different model components separately. :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) - self.decomposition_plot(ax=axes[0, 0], text='Lens light', lens_light_add=True, unconvolved=True) - self.decomposition_plot(ax=axes[1, 0], text='Lens light convolved', lens_light_add=True) - self.decomposition_plot(ax=axes[0, 1], text='Source light', source_add=True, unconvolved=True) - self.decomposition_plot(ax=axes[1, 1], text='Source light convolved', source_add=True) - self.decomposition_plot(ax=axes[0, 2], text='All components', source_add=True, lens_light_add=True, - unconvolved=True) - self.decomposition_plot(ax=axes[1, 2], text='All components convolved', source_add=True, - lens_light_add=True, point_source_add=True) + self.decomposition_plot( + ax=axes[0, 0], text="Lens light", lens_light_add=True, unconvolved=True + ) + self.decomposition_plot( + ax=axes[1, 0], text="Lens light convolved", lens_light_add=True + ) + self.decomposition_plot( + ax=axes[0, 1], text="Source light", source_add=True, unconvolved=True + ) + self.decomposition_plot( + ax=axes[1, 1], text="Source light convolved", source_add=True + ) + self.decomposition_plot( + ax=axes[0, 2], + text="All components", + source_add=True, + lens_light_add=True, + unconvolved=True, + ) + self.decomposition_plot( + ax=axes[1, 2], + text="All components convolved", + source_add=True, + lens_light_add=True, + point_source_add=True, + ) f.tight_layout() - f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) + f.subplots_adjust( + left=None, bottom=None, right=None, top=None, wspace=0.0, hspace=0.05 + ) return f, axes def plot_subtract_from_data_all(self): - """ - subtract model components from data + """Subtract model components from data. :return: """ f, axes = plt.subplots(2, 3, figsize=(16, 8)) - self.subtract_from_data_plot(ax=axes[0, 0], text='Data') - self.subtract_from_data_plot(ax=axes[0, 1], text='Data - Point Source', point_source_add=True) - self.subtract_from_data_plot(ax=axes[0, 2], text='Data - Lens Light', lens_light_add=True) - self.subtract_from_data_plot(ax=axes[1, 0], text='Data - Source Light', source_add=True) - self.subtract_from_data_plot(ax=axes[1, 1], text='Data - Source Light - Point Source', source_add=True, - point_source_add=True) - self.subtract_from_data_plot(ax=axes[1, 2], text='Data - Lens Light - Point Source', lens_light_add=True, - point_source_add=True) + self.subtract_from_data_plot(ax=axes[0, 0], text="Data") + self.subtract_from_data_plot( + ax=axes[0, 1], text="Data - Point Source", point_source_add=True + ) + self.subtract_from_data_plot( + ax=axes[0, 2], text="Data - Lens Light", lens_light_add=True + ) + self.subtract_from_data_plot( + ax=axes[1, 0], text="Data - Source Light", source_add=True + ) + self.subtract_from_data_plot( + ax=axes[1, 1], + text="Data - Source Light - Point Source", + source_add=True, + point_source_add=True, + ) + self.subtract_from_data_plot( + ax=axes[1, 2], + text="Data - Lens Light - Point Source", + lens_light_add=True, + point_source_add=True, + ) f.tight_layout() - f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) + f.subplots_adjust( + left=None, bottom=None, right=None, top=None, wspace=0.0, hspace=0.05 + ) return f, axes def plot_extinction_map(self, ax, v_min=None, v_max=None, **kwargs): @@ -699,12 +1251,20 @@ def plot_extinction_map(self, ax, v_min=None, v_max=None, **kwargs): :param v_max: :return: """ - model = self._bandmodel._extinction_map(self._kwargs_extinction_partial, self._kwargs_special_partial) + model = self._bandmodel._extinction_map( + self._kwargs_extinction_partial, self._kwargs_special_partial + ) if v_min is None: v_min = 0 if v_max is None: v_max = 1 - _ = ax.matshow(model, origin='lower', vmin=v_min, vmax=v_max, - extent=[0, self._frame_size, 0, self._frame_size], **kwargs) + _ = ax.matshow( + model, + origin="lower", + vmin=v_min, + vmax=v_max, + extent=[0, self._frame_size, 0, self._frame_size], + **kwargs + ) return ax diff --git a/lenstronomy/Plots/model_plot.py b/lenstronomy/Plots/model_plot.py index 9ac5d6a02..0963478a6 100644 --- a/lenstronomy/Plots/model_plot.py +++ b/lenstronomy/Plots/model_plot.py @@ -5,21 +5,35 @@ from lenstronomy.Plots.model_band_plot import ModelBandPlot from lenstronomy.Analysis.image_reconstruction import check_solver_error -__all__ = ['ModelPlot'] +__all__ = ["ModelPlot"] class ModelPlot(object): + """Class that manages the summary plots of a lens model The class uses the same + conventions as being used in the FittingSequence and interfaces with the ImSim + module. + + The linear inversion is re-done given the likelihood settings in the init of this + class (make sure this is the same as you perform the FittingSequence) to make sure + the linear amplitude parameters are computed as they are not part of the output of + the FittingSequence results. """ - class that manages the summary plots of a lens model - The class uses the same conventions as being used in the FittingSequence and interfaces with the ImSim module. - The linear inversion is re-done given the likelihood settings in the init of this class (make sure this is the same - as you perform the FittingSequence) to make sure the linear amplitude parameters are computed as they are not part - of the output of the FittingSequence results. - """ - def __init__(self, multi_band_list, kwargs_model, kwargs_params, image_likelihood_mask_list=None, - bands_compute=None, multi_band_type='multi-linear', source_marg=False, linear_prior=None, - arrow_size=0.02, cmap_string="gist_heat", fast_caustic=True, linear_solver=True): + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + image_likelihood_mask_list=None, + bands_compute=None, + multi_band_type="multi-linear", + source_marg=False, + linear_prior=None, + arrow_size=0.02, + cmap_string="gist_heat", + fast_caustic=True, + linear_solver=True, + ): """ :param multi_band_list: list of [[kwargs_data, kwargs_psf, kwargs_numerics], [], ..] @@ -45,58 +59,88 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, image_likelihoo if bands_compute is None: bands_compute = [True] * len(multi_band_list) - if multi_band_type == 'single-band': - multi_band_type = 'multi-linear' # this makes sure that the linear inversion outputs are coming in a list - self._imageModel = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, - bands_compute=bands_compute, linear_solver=linear_solver, - image_likelihood_mask_list=image_likelihood_mask_list) + if multi_band_type == "single-band": + multi_band_type = "multi-linear" # this makes sure that the linear inversion outputs are coming in a list + self._imageModel = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=bands_compute, + linear_solver=linear_solver, + image_likelihood_mask_list=image_likelihood_mask_list, + ) kwargs_params_copy = copy.deepcopy(kwargs_params) - model, error_map, cov_param, param = self._imageModel.image_linear_solve(inv_bool=True, **kwargs_params_copy) + model, error_map, cov_param, param = self._imageModel.image_linear_solve( + inv_bool=True, **kwargs_params_copy + ) if linear_solver is False: if len(multi_band_list) > 1: - raise ValueError('plotting the solution without the linear solver currently only works with one band.') - - im_sim = class_creator.create_im_sim(multi_band_list, 'single-band', kwargs_model, - bands_compute=bands_compute, linear_solver=linear_solver, - image_likelihood_mask_list=image_likelihood_mask_list) + raise ValueError( + "plotting the solution without the linear solver currently only works with one band." + ) + + im_sim = class_creator.create_im_sim( + multi_band_list, + "single-band", + kwargs_model, + bands_compute=bands_compute, + linear_solver=linear_solver, + image_likelihood_mask_list=image_likelihood_mask_list, + ) # overwrite model with initial input without linear solver applied model[0] = im_sim.image(**kwargs_params) # retrieve amplitude parameters directly from kwargs_list - param[0] = im_sim.linear_param_from_kwargs(kwargs_params['kwargs_source'], - kwargs_params['kwargs_lens_light'], - kwargs_params['kwargs_ps']) + param[0] = im_sim.linear_param_from_kwargs( + kwargs_params["kwargs_source"], + kwargs_params["kwargs_lens_light"], + kwargs_params["kwargs_ps"], + ) else: # overwrite the keyword list with the linear solved 'amp' values for key in kwargs_params.keys(): kwargs_params[key] = kwargs_params_copy[key] check_solver_error(param) - log_l = self._imageModel.likelihood_data_given_model(source_marg=source_marg, linear_prior=linear_prior, - **kwargs_params) + log_l = self._imageModel.likelihood_data_given_model( + source_marg=source_marg, linear_prior=linear_prior, **kwargs_params + ) n_data = self._imageModel.num_data_evaluate if n_data > 0: - print(log_l * 2 / n_data, 'reduced X^2 of all evaluated imaging data combined ' - '(without degrees of freedom subtracted).') + print( + log_l * 2 / n_data, + "reduced X^2 of all evaluated imaging data combined " + "(without degrees of freedom subtracted).", + ) self._band_plot_list = [] self._index_list = [] index = 0 for i in range(len(multi_band_list)): if bands_compute[i] is True: - if multi_band_type == 'joint-linear': + if multi_band_type == "joint-linear": param_i = param cov_param_i = cov_param else: param_i = param[index] cov_param_i = cov_param[index] - bandplot = ModelBandPlot(multi_band_list, kwargs_model, model[index], error_map[index], cov_param_i, - param_i, copy.deepcopy(kwargs_params), - likelihood_mask_list=image_likelihood_mask_list, band_index=i, - arrow_size=arrow_size, cmap_string=cmap_string, fast_caustic=fast_caustic) + bandplot = ModelBandPlot( + multi_band_list, + kwargs_model, + model[index], + error_map[index], + cov_param_i, + param_i, + copy.deepcopy(kwargs_params), + likelihood_mask_list=image_likelihood_mask_list, + band_index=i, + arrow_size=arrow_size, + cmap_string=cmap_string, + fast_caustic=fast_caustic, + ) self._band_plot_list.append(bandplot) self._index_list.append(index) @@ -125,7 +169,8 @@ def reconstruction_all_bands(self, **kwargs): """ n_bands = len(self._band_plot_list) import matplotlib.pyplot as plt - f, axes = plt.subplots(n_bands, 3, figsize=(12, 4*n_bands)) + + f, axes = plt.subplots(n_bands, 3, figsize=(12, 4 * n_bands)) if n_bands == 1: # make sure axis can be called as 2d array _axes = np.empty((1, 3), dtype=object) _axes[:] = axes @@ -133,16 +178,19 @@ def reconstruction_all_bands(self, **kwargs): i = 0 for band_index in self._index_list: if band_index >= 0: - axes[i, 0].set_title('image ' + str(band_index)) + axes[i, 0].set_title("image " + str(band_index)) self.data_plot(ax=axes[i, 0], band_index=band_index, **kwargs) - self.model_plot(ax=axes[i, 1], image_names=True, band_index=band_index, **kwargs) - self.normalized_residual_plot(ax=axes[i, 2], v_min=-6, v_max=6, band_index=band_index, **kwargs) + self.model_plot( + ax=axes[i, 1], image_names=True, band_index=band_index, **kwargs + ) + self.normalized_residual_plot( + ax=axes[i, 2], v_min=-6, v_max=6, band_index=band_index, **kwargs + ) i += 1 return f, axes def data_plot(self, band_index=0, **kwargs): - """ - illustrates data + """Illustrates data. :param band_index: index of band :param kwargs: arguments of plotting @@ -152,8 +200,7 @@ def data_plot(self, band_index=0, **kwargs): return plot_band.data_plot(**kwargs) def model_plot(self, band_index=0, **kwargs): - """ - illustrates model + """Illustrates model. :param band_index: index of band :param kwargs: arguments of plotting @@ -163,8 +210,7 @@ def model_plot(self, band_index=0, **kwargs): return plot_band.model_plot(**kwargs) def convergence_plot(self, band_index=0, **kwargs): - """ - illustrates lensing convergence in data frame + """Illustrates lensing convergence in data frame. :param band_index: index of band :param kwargs: arguments of plotting @@ -174,8 +220,7 @@ def convergence_plot(self, band_index=0, **kwargs): return plot_band.convergence_plot(**kwargs) def substructure_plot(self, band_index=0, **kwargs): - """ - illustrates substructure in the lens system + """Illustrates substructure in the lens system. :param band_index: index of band :param kwargs: arguments of plotting @@ -185,8 +230,7 @@ def substructure_plot(self, band_index=0, **kwargs): return plot_band.substructure_plot(**kwargs) def normalized_residual_plot(self, band_index=0, **kwargs): - """ - illustrates normalized residuals between data and model fit + """Illustrates normalized residuals between data and model fit. :param band_index: index of band :param kwargs: arguments of plotting @@ -196,8 +240,7 @@ def normalized_residual_plot(self, band_index=0, **kwargs): return plot_band.normalized_residual_plot(**kwargs) def absolute_residual_plot(self, band_index=0, **kwargs): - """ - illustrates absolute residuals between data and model fit + """Illustrates absolute residuals between data and model fit. :param band_index: index of band :param kwargs: arguments of plotting @@ -207,8 +250,7 @@ def absolute_residual_plot(self, band_index=0, **kwargs): return plot_band.absolute_residual_plot(**kwargs) def source_plot(self, band_index=0, **kwargs): - """ - illustrates reconstructed source (de-lensed de-convolved) + """Illustrates reconstructed source (de-lensed de-convolved) :param band_index: index of band :param kwargs: arguments of plotting @@ -218,8 +260,8 @@ def source_plot(self, band_index=0, **kwargs): return plot_band.source_plot(**kwargs) def error_map_source_plot(self, band_index=0, **kwargs): - """ - illustrates surface brightness variance in the reconstruction in the source plane + """Illustrates surface brightness variance in the reconstruction in the source + plane. :param band_index: index of band :param kwargs: arguments of plotting @@ -229,8 +271,7 @@ def error_map_source_plot(self, band_index=0, **kwargs): return plot_band.error_map_source_plot(**kwargs) def magnification_plot(self, band_index=0, **kwargs): - """ - illustrates lensing magnification in the field of view of the data frame + """Illustrates lensing magnification in the field of view of the data frame. :param band_index: index of band :param kwargs: arguments of plotting @@ -240,8 +281,7 @@ def magnification_plot(self, band_index=0, **kwargs): return plot_band.magnification_plot(**kwargs) def deflection_plot(self, band_index=0, **kwargs): - """ - illustrates lensing deflections on the field of view of the data frame + """Illustrates lensing deflections on the field of view of the data frame. :param band_index: index of band :param kwargs: arguments of plotting @@ -251,8 +291,7 @@ def deflection_plot(self, band_index=0, **kwargs): return plot_band.deflection_plot(**kwargs) def decomposition_plot(self, band_index=0, **kwargs): - """ - illustrates decomposition of model components + """Illustrates decomposition of model components. :param band_index: index of band :param kwargs: arguments of plotting @@ -262,8 +301,7 @@ def decomposition_plot(self, band_index=0, **kwargs): return plot_band.decomposition_plot(**kwargs) def subtract_from_data_plot(self, band_index=0, **kwargs): - """ - subtracts individual model components from the data + """Subtracts individual model components from the data. :param band_index: index of band :param kwargs: arguments of plotting @@ -273,8 +311,7 @@ def subtract_from_data_plot(self, band_index=0, **kwargs): return plot_band.subtract_from_data_plot(**kwargs) def plot_main(self, band_index=0, **kwargs): - """ - plot a set of 'main' modelling diagnostics + """Plot a set of 'main' modelling diagnostics. :param band_index: index of band :param kwargs: arguments of plotting @@ -284,8 +321,7 @@ def plot_main(self, band_index=0, **kwargs): return plot_band.plot_main(**kwargs) def plot_separate(self, band_index=0): - """ - plot a set of 'main' modelling diagnostics + """Plot a set of 'main' modelling diagnostics. :param band_index: index of band :return: plot instance @@ -294,8 +330,7 @@ def plot_separate(self, band_index=0): return plot_band.plot_separate() def plot_subtract_from_data_all(self, band_index=0): - """ - plot a set of 'main' modelling diagnostics + """Plot a set of 'main' modelling diagnostics. :param band_index: index of band :return: plot instance diff --git a/lenstronomy/Plots/multi_patch_plot.py b/lenstronomy/Plots/multi_patch_plot.py index f0a47d0dd..f36358707 100644 --- a/lenstronomy/Plots/multi_patch_plot.py +++ b/lenstronomy/Plots/multi_patch_plot.py @@ -9,12 +9,20 @@ class MultiPatchPlot(MultiPatchReconstruction): - """ - this class illustrates the model of disconnected multi-patch modeling with 'joint-linear' option in one single - array. - """ - def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type='joint-linear', - kwargs_likelihood=None, kwargs_pixel_grid=None, verbose=True, cmap_string="gist_heat"): + """This class illustrates the model of disconnected multi-patch modeling with + 'joint-linear' option in one single array.""" + + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="joint-linear", + kwargs_likelihood=None, + kwargs_pixel_grid=None, + verbose=True, + cmap_string="gist_heat", + ): """ :param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]] @@ -31,11 +39,27 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type This can deactivated for speedup purposes (does not run linear inversion again), and reduces the number of prints. :param cmap_string: string of color map (or cmap matplotlib object) """ - MultiPatchReconstruction.__init__(self, multi_band_list, kwargs_model, kwargs_params, - multi_band_type=multi_band_type, kwargs_likelihood=kwargs_likelihood, - kwargs_pixel_grid=kwargs_pixel_grid, verbose=verbose) - self._image_joint, self._model_joint, self._norm_residuals_joint = self.image_joint() - self._kappa_joint, self._magnification_joint, self._alpha_x_joint, self._alpha_y_joint = self.lens_model_joint() + MultiPatchReconstruction.__init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type=multi_band_type, + kwargs_likelihood=kwargs_likelihood, + kwargs_pixel_grid=kwargs_pixel_grid, + verbose=verbose, + ) + ( + self._image_joint, + self._model_joint, + self._norm_residuals_joint, + ) = self.image_joint() + ( + self._kappa_joint, + self._magnification_joint, + self._alpha_x_joint, + self._alpha_y_joint, + ) = self.lens_model_joint() log_model = np.log10(self._model_joint) log_model[np.isnan(log_model)] = -5 @@ -43,47 +67,101 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type self._v_max_default = min(np.max(log_model), 10) self._cmap = plot_util.cmap_conf(cmap_string) - def data_plot(self, ax, log_scale=True, text='Observed', colorbar_label=r'log$_{10}$ flux', **kwargs): - """ - illustrates data + def data_plot( + self, + ax, + log_scale=True, + text="Observed", + colorbar_label=r"log$_{10}$ flux", + **kwargs + ): + """Illustrates data. :param ax: matplotlib axis instance :param kwargs: plotting keyword arguments :return: matplotlib instance """ - return self._plot(ax, image=self._image_joint, coords=self._pixel_grid_joint, log_scale=log_scale, text=text, - colorbar_label=colorbar_label, **kwargs) + return self._plot( + ax, + image=self._image_joint, + coords=self._pixel_grid_joint, + log_scale=log_scale, + text=text, + colorbar_label=colorbar_label, + **kwargs + ) - def model_plot(self, ax, log_scale=True, text='Reconstructed', colorbar_label=r'log$_{10}$ flux', **kwargs): - """ - illustrates model + def model_plot( + self, + ax, + log_scale=True, + text="Reconstructed", + colorbar_label=r"log$_{10}$ flux", + **kwargs + ): + """Illustrates model. :param ax: matplotlib axis instance :param kwargs: plotting keyword arguments :return: matplotlib instance """ - return self._plot(ax, image=self._model_joint, coords=self._pixel_grid_joint, log_scale=log_scale, text=text, - colorbar_label=colorbar_label, **kwargs) + return self._plot( + ax, + image=self._model_joint, + coords=self._pixel_grid_joint, + log_scale=log_scale, + text=text, + colorbar_label=colorbar_label, + **kwargs + ) - def source_plot(self, ax, delta_pix, num_pix, center=None, log_scale=True, text='Source', - colorbar_label=r'log$_{10}$ flux', dist_scale=0.1, **kwargs): - """ - illustrates source + def source_plot( + self, + ax, + delta_pix, + num_pix, + center=None, + log_scale=True, + text="Source", + colorbar_label=r"log$_{10}$ flux", + dist_scale=0.1, + **kwargs + ): + """Illustrates source. - :param ax: matplotlib axis instance - :param delta_pix scale of the pixel size of the source plot + :param ax: matplotlib axis instance :param delta_pix scale of the pixel size of + the source plot :param num_pix: number of pixels per axis of the source plot :param center: list with two entries [center_x, center_y] (optional) :param kwargs: plotting keyword arguments :return: matplotlib instance """ - source, coords = self.source(num_pix=num_pix, delta_pix=delta_pix, center=center) - return self._plot(ax, image=source, coords=coords, log_scale=log_scale, text=text, - colorbar_label=colorbar_label, dist_scale=dist_scale, **kwargs) + source, coords = self.source( + num_pix=num_pix, delta_pix=delta_pix, center=center + ) + return self._plot( + ax, + image=source, + coords=coords, + log_scale=log_scale, + text=text, + colorbar_label=colorbar_label, + dist_scale=dist_scale, + **kwargs + ) - def normalized_residual_plot(self, ax, v_min=-6, v_max=6, log_scale=False, text='Normalized Residuals', - colorbar_label=r'(f${}_{\rm model}$ - f${}_{\rm data}$)/$\sigma$', cmap='bwr', - white_on_black=False, **kwargs): + def normalized_residual_plot( + self, + ax, + v_min=-6, + v_max=6, + log_scale=False, + text="Normalized Residuals", + colorbar_label=r"(f${}_{\rm model}$ - f${}_{\rm data}$)/$\sigma$", + cmap="bwr", + white_on_black=False, + **kwargs + ): """ illustrates normalized residuals of (data - model) / error @@ -91,38 +169,82 @@ def normalized_residual_plot(self, ax, v_min=-6, v_max=6, log_scale=False, text= :param kwargs: plotting keyword arguments :return: matplotlib instance """ - return self._plot(ax, image=self._norm_residuals_joint, coords=self._pixel_grid_joint, v_min=v_min, v_max=v_max, - log_scale=log_scale, text=text, colorbar_label=colorbar_label, cmap=cmap, - white_on_black=white_on_black, **kwargs) + return self._plot( + ax, + image=self._norm_residuals_joint, + coords=self._pixel_grid_joint, + v_min=v_min, + v_max=v_max, + log_scale=log_scale, + text=text, + colorbar_label=colorbar_label, + cmap=cmap, + white_on_black=white_on_black, + **kwargs + ) - def convergence_plot(self, ax, log_scale=True, v_min=-2, v_max=0.2, text='Convergence', - colorbar_label=r'$\log_{10}\ \kappa$', **kwargs): - """ - illustrates lensing convergence + def convergence_plot( + self, + ax, + log_scale=True, + v_min=-2, + v_max=0.2, + text="Convergence", + colorbar_label=r"$\log_{10}\ \kappa$", + **kwargs + ): + """Illustrates lensing convergence. :param ax: matplotlib axis instance :param kwargs: plotting keyword arguments :return: matplotlib instance """ - return self._plot(ax, image=self._kappa_joint, coords=self._pixel_grid_joint, log_scale=log_scale, v_min=v_min, - v_max=v_max, text=text, colorbar_label=colorbar_label, **kwargs) + return self._plot( + ax, + image=self._kappa_joint, + coords=self._pixel_grid_joint, + log_scale=log_scale, + v_min=v_min, + v_max=v_max, + text=text, + colorbar_label=colorbar_label, + **kwargs + ) - def magnification_plot(self, ax, log_scale=False, v_min=-10, v_max=10, text="Magnification", - colorbar_label=r"$\det\ (\mathsf{A}^{-1})$", cmap='bwr', white_on_black=False, **kwargs): - """ - illustrates lensing convergence + def magnification_plot( + self, + ax, + log_scale=False, + v_min=-10, + v_max=10, + text="Magnification", + colorbar_label=r"$\det\ (\mathsf{A}^{-1})$", + cmap="bwr", + white_on_black=False, + **kwargs + ): + """Illustrates lensing convergence. :param ax: matplotlib axis instance :param kwargs: plotting keyword arguments :return: matplotlib instance """ - return self._plot(ax, image=self._magnification_joint, coords=self._pixel_grid_joint, log_scale=log_scale, v_min=v_min, - v_max=v_max, text=text, colorbar_label=colorbar_label, cmap=cmap, - white_on_black=white_on_black, **kwargs) + return self._plot( + ax, + image=self._magnification_joint, + coords=self._pixel_grid_joint, + log_scale=log_scale, + v_min=v_min, + v_max=v_max, + text=text, + colorbar_label=colorbar_label, + cmap=cmap, + white_on_black=white_on_black, + **kwargs + ) def plot_main(self, **kwargs): - """ - print the main plots together in a joint frame + """Print the main plots together in a joint frame. :return: """ @@ -131,21 +253,40 @@ def plot_main(self, **kwargs): self.data_plot(ax=axes[0, 0], **kwargs) self.model_plot(ax=axes[0, 1], image_names=True, **kwargs) kwargs_residuals = copy.deepcopy(kwargs) - if 'v_min' in kwargs_residuals: - kwargs_residuals.pop('v_min') - if 'v_max' in kwargs_residuals: - kwargs_residuals.pop('v_max') - self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6, **kwargs_residuals) + if "v_min" in kwargs_residuals: + kwargs_residuals.pop("v_min") + if "v_max" in kwargs_residuals: + kwargs_residuals.pop("v_max") + self.normalized_residual_plot( + ax=axes[0, 2], v_min=-6, v_max=6, **kwargs_residuals + ) self.source_plot(ax=axes[1, 0], delta_pix=0.01, num_pix=100, **kwargs) self.convergence_plot(ax=axes[1, 1], **kwargs) self.magnification_plot(ax=axes[1, 2], **kwargs) f.tight_layout() - f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05) + f.subplots_adjust( + left=None, bottom=None, right=None, top=None, wspace=0.0, hspace=0.05 + ) return f, axes - def _plot(self, ax, image, coords, log_scale=True, v_min=None, v_max=None, text='Observed', font_size=15, - colorbar_label=r'log$_{10}$ flux', arrow_size=0.02, cmap=None, dist_scale=1., white_on_black=True, - no_support=False, **kwargs): + def _plot( + self, + ax, + image, + coords, + log_scale=True, + v_min=None, + v_max=None, + text="Observed", + font_size=15, + colorbar_label=r"log$_{10}$ flux", + arrow_size=0.02, + cmap=None, + dist_scale=1.0, + white_on_black=True, + no_support=False, + **kwargs + ): """ :param ax: matplotlib axis instance @@ -155,11 +296,11 @@ def _plot(self, ax, image, coords, log_scale=True, v_min=None, v_max=None, text= :return: matplotlib axis instance """ if white_on_black: - text_k = 'w' - bkg_k = 'k' + text_k = "w" + bkg_k = "k" else: - text_k = 'k' - bkg_k = 'w' + text_k = "k" + bkg_k = "w" if cmap is None: cmap = self._cmap @@ -173,8 +314,14 @@ def _plot(self, ax, image, coords, log_scale=True, v_min=None, v_max=None, text= image_plot = np.log10(image) else: image_plot = image - im = ax.matshow(image_plot, origin='lower', extent=[0, frame_size, 0, frame_size], - cmap=cmap, vmin=v_min, vmax=v_max) # , vmin=0, vmax=2 + im = ax.matshow( + image_plot, + origin="lower", + extent=[0, frame_size, 0, frame_size], + cmap=cmap, + vmin=v_min, + vmax=v_max, + ) # , vmin=0, vmax=2 ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) @@ -182,16 +329,37 @@ def _plot(self, ax, image, coords, log_scale=True, v_min=None, v_max=None, text= if not no_support: text_dist = "{:.1f}".format(dist_scale) + '"' - if 'no_scale_bar' not in kwargs or not kwargs['no_scale_bar']: - plot_util.scale_bar(ax, frame_size, dist=dist_scale, text=text_dist, font_size=font_size, color=text_k) - if 'no_text' not in kwargs or not kwargs['no_text']: - plot_util.text_description(ax, frame_size, text=text, color=text_k, backgroundcolor=bkg_k, font_size=font_size) + if "no_scale_bar" not in kwargs or not kwargs["no_scale_bar"]: + plot_util.scale_bar( + ax, + frame_size, + dist=dist_scale, + text=text_dist, + font_size=font_size, + color=text_k, + ) + if "no_text" not in kwargs or not kwargs["no_text"]: + plot_util.text_description( + ax, + frame_size, + text=text, + color=text_k, + backgroundcolor=bkg_k, + font_size=font_size, + ) - if 'no_arrow' not in kwargs or not kwargs['no_arrow']: - plot_util.coordinate_arrows(ax, frame_size, coords, color=text_k, arrow_size=arrow_size, font_size=font_size) + if "no_arrow" not in kwargs or not kwargs["no_arrow"]: + plot_util.coordinate_arrows( + ax, + frame_size, + coords, + color=text_k, + arrow_size=arrow_size, + font_size=font_size, + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) - cb = plt.colorbar(im, cax=cax, orientation='vertical') + cb = plt.colorbar(im, cax=cax, orientation="vertical") cb.set_label(colorbar_label, fontsize=font_size) return ax diff --git a/lenstronomy/Plots/plot_quasar_images.py b/lenstronomy/Plots/plot_quasar_images.py index a8f034e80..3cb9ff5f4 100644 --- a/lenstronomy/Plots/plot_quasar_images.py +++ b/lenstronomy/Plots/plot_quasar_images.py @@ -3,17 +3,28 @@ from lenstronomy.LensModel.lens_model_extensions import LensModelExtensions -def plot_quasar_images(lens_model, x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source, - cosmo=None, grid_resolution=None, - grid_radius_arcsec=None, - source_light_model='SINGLE_GAUSSIAN', - dx=None, dy=None, size_scale=None, amp_scale=None - ): - """ - This function plots the surface brightness in the image plane of a background source modeled as either a single - Gaussian or two Gaussian light profiles. The flux is computed inside a circular aperture with radius - grid_radius_arcsec. If grid_radius_arcsec is not specified a default value will be assumed. +def plot_quasar_images( + lens_model, + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=None, + grid_resolution=None, + grid_radius_arcsec=None, + source_light_model="SINGLE_GAUSSIAN", + dx=None, + dy=None, + size_scale=None, + amp_scale=None, +): + """This function plots the surface brightness in the image plane of a background + source modeled as either a single Gaussian or two Gaussian light profiles. The flux + is computed inside a circular aperture with radius grid_radius_arcsec. If + grid_radius_arcsec is not specified a default value will be assumed. :param lens_model: an instance of LensModel :param x_image: a list or array of x coordinates [units arcsec] @@ -21,21 +32,22 @@ def plot_quasar_images(lens_model, x_image, y_image, source_x, source_y, kwargs_ :param kwargs_lens: keyword arguments for the lens model :param source_fwhm_parsec: the size of the background source [units parsec] :param z_source: the source redshift - :param cosmo: (optional) an instance of astropy.cosmology; if not specified, a default cosmology will be used - :param grid_resolution: the grid resolution in units arcsec/pixel; if not specified, an appropriate value will - be estimated from the source size - :param grid_radius_arcsec: (optional) the size of the ray tracing region in arcsec; if not specified, an appropriate value - will be estimated from the source size - :param source_light_model: the model for background source light; currently implemented are 'SINGLE_GAUSSIAN' and - 'DOUBLE_GAUSSIAN'. - :param dx: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source light profile from the first - [arcsec] - :param dy: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source light profile from the first - [arcsec] - :param size_scale: used with source model 'DOUBLE_GAUSSIAN', the size of the second source light profile relative - to the first - :param amp_scale: used with source model 'DOUBLE_GAUSSIAN', the peak brightness of the second source light profile - relative to the first + :param cosmo: (optional) an instance of astropy.cosmology; if not specified, a + default cosmology will be used + :param grid_resolution: the grid resolution in units arcsec/pixel; if not specified, + an appropriate value will be estimated from the source size + :param grid_radius_arcsec: (optional) the size of the ray tracing region in arcsec; + if not specified, an appropriate value will be estimated from the source size + :param source_light_model: the model for background source light; currently + implemented are 'SINGLE_GAUSSIAN' and 'DOUBLE_GAUSSIAN'. + :param dx: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source + light profile from the first [arcsec] + :param dy: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source + light profile from the first [arcsec] + :param size_scale: used with source model 'DOUBLE_GAUSSIAN', the size of the second + source light profile relative to the first + :param amp_scale: used with source model 'DOUBLE_GAUSSIAN', the peak brightness of + the second source light profile relative to the first :return: Four images of the background source in the image plane """ @@ -44,9 +56,28 @@ def plot_quasar_images(lens_model, x_image, y_image, source_x, source_y, kwargs_ magnifications = [] images = [] - grid_x_0, grid_y_0, source_model, kwargs_source, grid_resolution, grid_radius_arcsec = \ - setup_mag_finite(cosmo, lens_model, grid_radius_arcsec, grid_resolution, source_fwhm_parsec, - source_light_model, z_source, source_x, source_y, dx, dy, amp_scale, size_scale) + ( + grid_x_0, + grid_y_0, + source_model, + kwargs_source, + grid_resolution, + grid_radius_arcsec, + ) = setup_mag_finite( + cosmo, + lens_model, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, + ) shape0 = grid_x_0.shape grid_x_0, grid_y_0 = grid_x_0.ravel(), grid_y_0.ravel() @@ -55,25 +86,55 @@ def plot_quasar_images(lens_model, x_image, y_image, source_x, source_y, kwargs_ r_min = 0 r_max = grid_radius_arcsec grid_r = np.hypot(grid_x_0, grid_y_0) - flux_array = lens_model_extension._magnification_adaptive_iteration(flux_array, xi, yi, grid_x_0, grid_y_0, grid_r, - r_min, r_max, lens_model, kwargs_lens, - source_model, kwargs_source) - m = np.sum(flux_array) * grid_resolution ** 2 + flux_array = lens_model_extension._magnification_adaptive_iteration( + flux_array, + xi, + yi, + grid_x_0, + grid_y_0, + grid_r, + r_min, + r_max, + lens_model, + kwargs_lens, + source_model, + kwargs_source, + ) + m = np.sum(flux_array) * grid_resolution**2 magnifications.append(m) images.append(flux_array.reshape(shape0)) magnifications = np.array(magnifications) flux_ratios = magnifications / max(magnifications) import matplotlib.pyplot as plt + fig = plt.figure(1) fig.set_size_inches(16, 6) N = len(images) for i, (image, mag, fr) in enumerate(zip(images, magnifications, flux_ratios)): ax = plt.subplot(1, N, i + 1) - ax.imshow(image, origin='lower', - extent=[-grid_radius_arcsec, grid_radius_arcsec, -grid_radius_arcsec, grid_radius_arcsec]) - ax.annotate('magnification: ' + str(np.round(mag, 3)), xy=(0.05, 0.9), xycoords='axes fraction', color='w', - fontsize=12) - ax.annotate('flux ratio: ' + str(np.round(fr, 3)), xy=(0.05, 0.8), xycoords='axes fraction', color='w', - fontsize=12) + ax.imshow( + image, + origin="lower", + extent=[ + -grid_radius_arcsec, + grid_radius_arcsec, + -grid_radius_arcsec, + grid_radius_arcsec, + ], + ) + ax.annotate( + "magnification: " + str(np.round(mag, 3)), + xy=(0.05, 0.9), + xycoords="axes fraction", + color="w", + fontsize=12, + ) + ax.annotate( + "flux ratio: " + str(np.round(fr, 3)), + xy=(0.05, 0.8), + xycoords="axes fraction", + color="w", + fontsize=12, + ) plt.show() diff --git a/lenstronomy/Plots/plot_util.py b/lenstronomy/Plots/plot_util.py index 7c05236a5..e62dcce07 100644 --- a/lenstronomy/Plots/plot_util.py +++ b/lenstronomy/Plots/plot_util.py @@ -4,6 +4,7 @@ import copy from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -19,7 +20,6 @@ def sqrt(inputArray, scale_min=None, scale_max=None): :param scale_max: maximum data value :rtype: numpy array :return: image data array - """ imageData = np.array(inputArray, copy=True) @@ -39,21 +39,33 @@ def sqrt(inputArray, scale_min=None, scale_max=None): @export -def text_description(ax, d, text, color='w', backgroundcolor='k', - flipped=False, font_size=15): - c_vertical = 1/15. #+ font_size / d / 10.**2 - c_horizontal = 1./30 +def text_description( + ax, d, text, color="w", backgroundcolor="k", flipped=False, font_size=15 +): + c_vertical = 1 / 15.0 # + font_size / d / 10.**2 + c_horizontal = 1.0 / 30 if flipped: - ax.text(d - d * c_horizontal, d - d * c_vertical, text, color=color, - fontsize=font_size, - backgroundcolor=backgroundcolor) + ax.text( + d - d * c_horizontal, + d - d * c_vertical, + text, + color=color, + fontsize=font_size, + backgroundcolor=backgroundcolor, + ) else: - ax.text(d * c_horizontal, d - d * c_vertical, text, color=color, fontsize=font_size, - backgroundcolor=backgroundcolor) + ax.text( + d * c_horizontal, + d - d * c_vertical, + text, + color=color, + fontsize=font_size, + backgroundcolor=backgroundcolor, + ) @export -def scale_bar(ax, d, dist=1., text='1"', color='w', font_size=15, flipped=False): +def scale_bar(ax, d, dist=1.0, text='1"', color="w", font_size=15, flipped=False): """ :param ax: matplotlib.axes instance @@ -66,19 +78,32 @@ def scale_bar(ax, d, dist=1., text='1"', color='w', font_size=15, flipped=False) :return: None, updated ax instance """ if flipped: - p0 = d - d / 15. - dist - p1 = d / 15. + p0 = d - d / 15.0 - dist + p1 = d / 15.0 ax.plot([p0, p0 + dist], [p1, p1], linewidth=2, color=color) - ax.text(p0 + dist / 2., p1 + 0.01 * d, text, fontsize=font_size, - color=color, ha='center') + ax.text( + p0 + dist / 2.0, + p1 + 0.01 * d, + text, + fontsize=font_size, + color=color, + ha="center", + ) else: - p0 = d / 15. + p0 = d / 15.0 ax.plot([p0, p0 + dist], [p0, p0], linewidth=2, color=color) - ax.text(p0 + dist / 2., p0 + 0.01 * d, text, fontsize=font_size, color=color, ha='center') + ax.text( + p0 + dist / 2.0, + p0 + 0.01 * d, + text, + fontsize=font_size, + color=color, + ha="center", + ) @export -def coordinate_arrows(ax, d, coords, color='w', font_size=15, arrow_size=0.05): +def coordinate_arrows(ax, d, coords, color="w", font_size=15, arrow_size=0.05): """ :param ax: matplotlib axes instance @@ -89,9 +114,9 @@ def coordinate_arrows(ax, d, coords, color='w', font_size=15, arrow_size=0.05): :param arrow_size: size of arrow :return: updated ax instance """ - d0 = d / 8. - p0 = d / 15. - pt = d / 9. + d0 = d / 8.0 + p0 = d / 15.0 + pt = d / 9.0 deltaPix = coords.pixel_width ra0, dec0 = coords.map_pix2coord((d - d0) / deltaPix, d0 / deltaPix) xx_, yy_ = coords.map_coord2pix(ra0, dec0) @@ -100,35 +125,78 @@ def coordinate_arrows(ax, d, coords, color='w', font_size=15, arrow_size=0.05): xx_ra_t, yy_ra_t = coords.map_coord2pix(ra0 + pt, dec0) xx_dec_t, yy_dec_t = coords.map_coord2pix(ra0, dec0 + pt) - ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_ra - xx_) * deltaPix, (yy_ra - yy_) * deltaPix, - head_width=arrow_size * d, head_length=arrow_size * d, fc=color, ec=color, linewidth=1) - ax.text(xx_ra_t * deltaPix, yy_ra_t * deltaPix, "E", color=color, fontsize=font_size, ha='center') - ax.arrow(xx_ * deltaPix, yy_ * deltaPix, (xx_dec - xx_) * deltaPix, (yy_dec - yy_) * deltaPix, - head_width=arrow_size * d, head_length=arrow_size * d, fc - =color, ec=color, linewidth=1) - ax.text(xx_dec_t * deltaPix, yy_dec_t * deltaPix, "N", color=color, fontsize=font_size, ha='center') + ax.arrow( + xx_ * deltaPix, + yy_ * deltaPix, + (xx_ra - xx_) * deltaPix, + (yy_ra - yy_) * deltaPix, + head_width=arrow_size * d, + head_length=arrow_size * d, + fc=color, + ec=color, + linewidth=1, + ) + ax.text( + xx_ra_t * deltaPix, + yy_ra_t * deltaPix, + "E", + color=color, + fontsize=font_size, + ha="center", + ) + ax.arrow( + xx_ * deltaPix, + yy_ * deltaPix, + (xx_dec - xx_) * deltaPix, + (yy_dec - yy_) * deltaPix, + head_width=arrow_size * d, + head_length=arrow_size * d, + fc=color, + ec=color, + linewidth=1, + ) + ax.text( + xx_dec_t * deltaPix, + yy_dec_t * deltaPix, + "N", + color=color, + fontsize=font_size, + ha="center", + ) @export -def plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=None, flipped_x=False, points_only=False, - pixel_offset=True, *args, **kwargs): - """ - plotting a line set on a matplotlib instance where the coordinates are defined in pixel units with the lower left - corner (defined as origin) is by default (0, 0). The coordinates are moved by 0.5 pixels to be placed in the center - of the pixel in accordance with the matplotlib.matshow() routine. +def plot_line_set( + ax, + coords, + line_set_list_x, + line_set_list_y, + origin=None, + flipped_x=False, + points_only=False, + pixel_offset=True, + *args, + **kwargs +): + """Plotting a line set on a matplotlib instance where the coordinates are defined in + pixel units with the lower left corner (defined as origin) is by default (0, 0). The + coordinates are moved by 0.5 pixels to be placed in the center of the pixel in + accordance with the matplotlib.matshow() routine. :param ax: matplotlib.axis instance :param coords: Coordinates() class instance :param origin: [x0, y0], lower left pixel coordinate in the frame of the pixels - :param line_set_list_x: numpy arrays corresponding of different disconnected regions of the line - (e.g. caustic or critical curve) - :param line_set_list_y: numpy arrays corresponding of different disconnected regions of the line - (e.g. caustic or critical curve) + :param line_set_list_x: numpy arrays corresponding of different disconnected regions + of the line (e.g. caustic or critical curve) + :param line_set_list_y: numpy arrays corresponding of different disconnected regions + of the line (e.g. caustic or critical curve) :param color: string with matplotlib color :param flipped_x: bool, if True, flips x-axis - :param points_only: bool, if True, sets plotting keywords to plot single points without connecting lines - :param pixel_offset: boolean; if True (default plotting), the coordinates are shifted a half a pixel to match with - the matshow() command to center the coordinates in the pixel center + :param points_only: bool, if True, sets plotting keywords to plot single points + without connecting lines + :param pixel_offset: boolean; if True (default plotting), the coordinates are + shifted a half a pixel to match with the matshow() command to center the + coordinates in the pixel center :return: plot with line sets on matplotlib axis in pixel coordinates """ if origin is None: @@ -136,12 +204,12 @@ def plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=None, fli pixel_width = coords.pixel_width pixel_width_x = pixel_width if points_only: - if 'linestyle' not in kwargs: - kwargs['linestyle'] = "" - if 'marker' not in kwargs: - kwargs['marker'] = "o" - if 'markersize' not in kwargs: - kwargs['markersize'] = 0.01 + if "linestyle" not in kwargs: + kwargs["linestyle"] = "" + if "marker" not in kwargs: + kwargs["marker"] = "o" + if "markersize" not in kwargs: + kwargs["markersize"] = 0.01 if flipped_x: pixel_width_x = -pixel_width if pixel_offset is True: @@ -151,16 +219,36 @@ def plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=None, fli if isinstance(line_set_list_x, list): for i in range(len(line_set_list_x)): x_c, y_c = coords.map_coord2pix(line_set_list_x[i], line_set_list_y[i]) - ax.plot((x_c + shift) * pixel_width_x + origin[0], (y_c + shift) * pixel_width + origin[1], *args, **kwargs) + ax.plot( + (x_c + shift) * pixel_width_x + origin[0], + (y_c + shift) * pixel_width + origin[1], + *args, + **kwargs + ) else: x_c, y_c = coords.map_coord2pix(line_set_list_x, line_set_list_y) - ax.plot((x_c + shift) * pixel_width_x + origin[0], (y_c + shift) * pixel_width + origin[1], *args, **kwargs) + ax.plot( + (x_c + shift) * pixel_width_x + origin[0], + (y_c + shift) * pixel_width + origin[1], + *args, + **kwargs + ) return ax @export -def image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_list=None, origin=None, flipped_x=False, - pixel_offset=True, plot_out_of_image=True): +def image_position_plot( + ax, + coords, + ra_image, + dec_image, + color="w", + image_name_list=None, + origin=None, + flipped_x=False, + pixel_offset=True, + plot_out_of_image=True, +): """ :param ax: matplotlib axis instance @@ -184,7 +272,7 @@ def image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_l if flipped_x: pixel_width_x = -pixel_width if image_name_list is None: - image_name_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'] + image_name_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"] if not isinstance(ra_image, list): ra_image_, dec_image_ = [ra_image], [dec_image] @@ -206,13 +294,15 @@ def image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_l if 0 < x_image[i] < nx and 0 < y_image[i] < ny: x_ = (x_image[i] + shift) * pixel_width_x + origin[0] y_ = (y_image[i] + shift) * pixel_width + origin[1] - ax.plot(x_, y_, 'o', color=color) + ax.plot(x_, y_, "o", color=color) ax.text(x_, y_, image_name_list[i], fontsize=20, color=color) return ax @export -def source_position_plot(ax, coords, ra_source, dec_source, marker='*', markersize=10, **kwargs): +def source_position_plot( + ax, coords, ra_source, dec_source, marker="*", markersize=10, **kwargs +): """ :param ax: matplotlib axis instance @@ -227,8 +317,13 @@ def source_position_plot(ax, coords, ra_source, dec_source, marker='*', markersi if len(ra_source) > 0: for ra, dec in zip(ra_source, dec_source): x_source, y_source = coords.map_coord2pix(ra, dec) - ax.plot((x_source + 0.5) * delta_pix, (y_source + 0.5) * delta_pix, marker=marker, markersize=markersize, - **kwargs) + ax.plot( + (x_source + 0.5) * delta_pix, + (y_source + 0.5) * delta_pix, + marker=marker, + markersize=markersize, + **kwargs + ) return ax @@ -258,8 +353,7 @@ def result_string(x, weights=None, title_fmt=".2f", label=None): @export def cmap_conf(cmap_string): - """ - configures matplotlib color map + """Configures matplotlib color map. :param cmap_string: string of cmap name, or cmap instance :return: cmap instance with setting for bad pixels and values below the threshold @@ -268,8 +362,8 @@ def cmap_conf(cmap_string): cmap = plt.get_cmap(cmap_string) else: cmap = cmap_string - #cmap_new = cmap.copy() + # cmap_new = cmap.copy() cmap_new = copy.deepcopy(cmap) - cmap_new.set_bad(color='k', alpha=1.) - cmap_new.set_under('k') + cmap_new.set_bad(color="k", alpha=1.0) + cmap_new.set_under("k") return cmap_new diff --git a/lenstronomy/PointSource/Types/base_ps.py b/lenstronomy/PointSource/Types/base_ps.py index 732beb666..526c72752 100644 --- a/lenstronomy/PointSource/Types/base_ps.py +++ b/lenstronomy/PointSource/Types/base_ps.py @@ -1,15 +1,20 @@ from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver import numpy as np -__all__ = ['PSBase', '_expand_to_array', '_shrink_array'] +__all__ = ["PSBase", "_expand_to_array", "_shrink_array"] class PSBase(object): - """ - base point source type class - """ - def __init__(self, lens_model=None, fixed_magnification=False, additional_images=False, index_lens_model_list=None, - point_source_frame_list=None): + """Base point source type class.""" + + def __init__( + self, + lens_model=None, + fixed_magnification=False, + additional_images=False, + index_lens_model_list=None, + point_source_frame_list=None, + ): """ :param lens_model: instance of the LensModel() class @@ -38,50 +43,57 @@ def __init__(self, lens_model=None, fixed_magnification=False, additional_images self._fixed_magnification = fixed_magnification self.additional_images = additional_images if fixed_magnification is True and additional_images is True: - Warning('The combination of fixed_magnification=True and additional_image=True is not optimal for the ' - 'current computation. If you see this warning, please approach the developers.') + Warning( + "The combination of fixed_magnification=True and additional_image=True is not optimal for the " + "current computation. If you see this warning, please approach the developers." + ) def image_position(self, kwargs_ps, **kwargs): - """ - on-sky position + """On-sky position. :param kwargs_ps: keyword argument of point source model :return: numpy array of x, y image positions """ - raise ValueError('image_position definition is not defined in the profile you want to execute.') + raise ValueError( + "image_position definition is not defined in the profile you want to execute." + ) def source_position(self, kwargs_ps, **kwargs): - """ - original unlensed position + """Original unlensed position. :param kwargs_ps: keyword argument of point source model :return: numpy array of x, y source positions """ - raise ValueError('source_position definition is not defined in the profile you want to execute.') + raise ValueError( + "source_position definition is not defined in the profile you want to execute." + ) def image_amplitude(self, kwargs_ps, *args, **kwargs): - """ - amplitudes as observed on the sky + """Amplitudes as observed on the sky. :param kwargs_ps: keyword argument of point source model :param kwargs: keyword arguments of function call :return: numpy array of amplitudes """ - raise ValueError('source_position definition is not defined in the profile you want to execute.') + raise ValueError( + "source_position definition is not defined in the profile you want to execute." + ) def source_amplitude(self, kwargs_ps, **kwargs): - """ - intrinsic source amplitudes (without lensing magnification, but still apparent) + """Intrinsic source amplitudes (without lensing magnification, but still + apparent) :param kwargs_ps: keyword argument of point source model - :param kwargs: keyword arguments of function call (which are not used for this object + :param kwargs: keyword arguments of function call (which are not used for this + object :return: numpy array of amplitudes """ - raise ValueError('source_position definition is not defined in the profile you want to execute.') + raise ValueError( + "source_position definition is not defined in the profile you want to execute." + ) def update_lens_model(self, lens_model_class): - """ - update LensModel() and LensEquationSolver() instance + """Update LensModel() and LensEquationSolver() instance. :param lens_model_class: LensModel() class instance :return: internal lensModel class updated @@ -104,7 +116,7 @@ def _expand_to_array(array, num): return np.ones(num) * array elif len(array) < num: out = np.zeros(num) - out[0:len(array)] = array + out[0 : len(array)] = array return out else: return array @@ -122,7 +134,9 @@ def _shrink_array(array, num): array_return = array[:num] return array_return elif len(array) < num: - raise ValueError("the length of the array (%s) needs to be larger or equal than the designated length %s " - % (len(array), num)) - else: + raise ValueError( + "the length of the array (%s) needs to be larger or equal than the designated length %s " + % (len(array), num) + ) + else: return array diff --git a/lenstronomy/PointSource/Types/lensed_position.py b/lenstronomy/PointSource/Types/lensed_position.py index 9eca01ab0..8df02b80d 100644 --- a/lenstronomy/PointSource/Types/lensed_position.py +++ b/lenstronomy/PointSource/Types/lensed_position.py @@ -1,7 +1,7 @@ import numpy as np from lenstronomy.PointSource.Types.base_ps import PSBase, _expand_to_array -__all__ = ['LensedPositions'] +__all__ = ["LensedPositions"] class LensedPositions(PSBase): @@ -12,23 +12,32 @@ class of a lensed point source parameterized as the (multiple) observed image po If fixed_magnification=True, than 'source_amp' is a parameter instead of 'point_amp' """ + # def __init__(self, lens_model=None, fixed_magnification=False, additional_image=False): # super(LensedPositions, self).__init__(lens_model=lens_model, fixed_magnification=fixed_magnification, # additional_image=additional_image) - def image_position(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver=None, - additional_images=False): - """ - on-sky image positions + def image_position( + self, + kwargs_ps, + kwargs_lens=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + additional_images=False, + ): + """On-sky image positions. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation - solver - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details - :param additional_images: if True, solves the lens equation for additional images + :param kwargs_lens: keyword argument list of the lens model(s), only used when + requiring the lens equation solver + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details + :param additional_images: if True, solves the lens equation for additional + images :type additional_images: bool :return: image positions in x, y as arrays """ @@ -37,95 +46,121 @@ def image_position(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver = {} ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens) # TODO: this solver does not distinguish between different frames/bands with partial lens models - ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens, - magnification_limit=magnification_limit, - **kwargs_lens_eqn_solver) + ra_image, dec_image = self._solver.image_position_from_source( + ra_source, + dec_source, + kwargs_lens, + magnification_limit=magnification_limit, + **kwargs_lens_eqn_solver + ) else: - ra_image = kwargs_ps['ra_image'] - dec_image = kwargs_ps['dec_image'] + ra_image = kwargs_ps["ra_image"] + dec_image = kwargs_ps["dec_image"] return np.array(ra_image), np.array(dec_image) def source_position(self, kwargs_ps, kwargs_lens=None): - """ - original source position (prior to lensing) + """Original source position (prior to lensing) :param kwargs_ps: point source keyword arguments - :param kwargs_lens: lens model keyword argument list (required to ray-trace back in the source plane) + :param kwargs_lens: lens model keyword argument list (required to ray-trace back + in the source plane) :return: x, y position (as numpy arrays) """ - ra_image = kwargs_ps['ra_image'] - dec_image = kwargs_ps['dec_image'] + ra_image = kwargs_ps["ra_image"] + dec_image = kwargs_ps["dec_image"] if self.k_list is None: - x_source, y_source = self._lens_model.ray_shooting(ra_image, dec_image, kwargs_lens) + x_source, y_source = self._lens_model.ray_shooting( + ra_image, dec_image, kwargs_lens + ) else: x_source, y_source = [], [] for i in range(len(ra_image)): - x, y = self._lens_model.ray_shooting(ra_image[i], dec_image[i], kwargs_lens, k=self.k_list[i]) + x, y = self._lens_model.ray_shooting( + ra_image[i], dec_image[i], kwargs_lens, k=self.k_list[i] + ) x_source.append(x) y_source.append(y) x_source = np.mean(x_source) y_source = np.mean(y_source) return np.array(x_source), np.array(y_source) - def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, magnification_limit=None, - kwargs_lens_eqn_solver=None): - """ - image brightness amplitudes + def image_amplitude( + self, + kwargs_ps, + kwargs_lens=None, + x_pos=None, + y_pos=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ): + """Image brightness amplitudes. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation - solver + :param kwargs_lens: keyword argument list of the lens model(s), only used when + requiring the lens equation solver :param x_pos: pre-computed image position (no lens equation solver applied) :param y_pos: pre-computed image position (no lens equation solver applied) - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details :return: array of image amplitudes """ if self._fixed_magnification: if x_pos is not None and y_pos is not None: ra_image, dec_image = x_pos, y_pos else: - ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens, - magnification_limit=magnification_limit, - kwargs_lens_eqn_solver=kwargs_lens_eqn_solver) + ra_image, dec_image = self.image_position( + kwargs_ps, + kwargs_lens, + magnification_limit=magnification_limit, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + ) if self.k_list is None: mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens) else: mag = [] for i in range(len(ra_image)): - mag.append(self._lens_model.magnification(ra_image, dec_image, kwargs_lens, k=self.k_list[i])) - point_amp = kwargs_ps['source_amp'] * np.abs(mag) + mag.append( + self._lens_model.magnification( + ra_image, dec_image, kwargs_lens, k=self.k_list[i] + ) + ) + point_amp = kwargs_ps["source_amp"] * np.abs(mag) else: - point_amp = kwargs_ps['point_amp'] + point_amp = kwargs_ps["point_amp"] if x_pos is not None: point_amp = _expand_to_array(point_amp, len(x_pos)) return np.array(point_amp) def source_amplitude(self, kwargs_ps, kwargs_lens=None): - """ - intrinsic brightness amplitude of point source - When brightnesses are defined in magnified on-sky positions, the intrinsic brightness is computed as the mean + """Intrinsic brightness amplitude of point source When brightnesses are defined + in magnified on-sky positions, the intrinsic brightness is computed as the mean in the magnification corrected image position brightnesses. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), used when brightness are defined in - magnified on-sky positions + :param kwargs_lens: keyword argument list of the lens model(s), used when + brightness are defined in magnified on-sky positions :return: brightness amplitude (as numpy array) """ if self._fixed_magnification: - source_amp = kwargs_ps['source_amp'] + source_amp = kwargs_ps["source_amp"] else: - ra_image, dec_image = kwargs_ps['ra_image'], kwargs_ps['dec_image'] + ra_image, dec_image = kwargs_ps["ra_image"], kwargs_ps["dec_image"] if self.k_list is None: mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens) else: mag = [] for i in range(len(ra_image)): - mag.append(self._lens_model.magnification(ra_image, dec_image, kwargs_lens, k=self.k_list[i])) - point_amp = kwargs_ps['point_amp'] + mag.append( + self._lens_model.magnification( + ra_image, dec_image, kwargs_lens, k=self.k_list[i] + ) + ) + point_amp = kwargs_ps["point_amp"] source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag))) return np.array(source_amp) diff --git a/lenstronomy/PointSource/Types/source_position.py b/lenstronomy/PointSource/Types/source_position.py index b064a6ca2..f00ff914c 100644 --- a/lenstronomy/PointSource/Types/source_position.py +++ b/lenstronomy/PointSource/Types/source_position.py @@ -1,68 +1,89 @@ import numpy as np -from lenstronomy.PointSource.Types.base_ps import PSBase, _expand_to_array, _shrink_array +from lenstronomy.PointSource.Types.base_ps import ( + PSBase, + _expand_to_array, + _shrink_array, +) -__all__ = ['SourcePositions'] +__all__ = ["SourcePositions"] class SourcePositions(PSBase): - """ - class of a single point source defined in the original source coordinate position that is lensed. - The lens equation is solved to compute the image positions for the specified source position. + """Class of a single point source defined in the original source coordinate position + that is lensed. The lens equation is solved to compute the image positions for the + specified source position. Name within the PointSource module: 'SOURCE_POSITION' parameters: ra_source, dec_source, source_amp, mag_pert (optional) If fixed_magnification=True, than 'source_amp' is a parameter instead of 'point_amp' mag_pert is a list of fractional magnification pertubations applied to point source images - """ - def image_position(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver=None, - **kwargs): - """ - on-sky image positions + def image_position( + self, + kwargs_ps, + kwargs_lens=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + **kwargs + ): + """On-sky image positions. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation - solver - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details + :param kwargs_lens: keyword argument list of the lens model(s), only used when + requiring the lens equation solver + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details :return: image positions in x, y as arrays """ if kwargs_lens_eqn_solver is None: kwargs_lens_eqn_solver = {} ra_source, dec_source = self.source_position(kwargs_ps) - ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens, - magnification_limit=magnification_limit, - **kwargs_lens_eqn_solver) + ra_image, dec_image = self._solver.image_position_from_source( + ra_source, + dec_source, + kwargs_lens, + magnification_limit=magnification_limit, + **kwargs_lens_eqn_solver + ) return ra_image, dec_image def source_position(self, kwargs_ps, **kwargs): - """ - original source position (prior to lensing) + """Original source position (prior to lensing) :param kwargs_ps: point source keyword arguments :return: x, y position (as numpy arrays) """ - ra_source = kwargs_ps['ra_source'] - dec_source = kwargs_ps['dec_source'] + ra_source = kwargs_ps["ra_source"] + dec_source = kwargs_ps["dec_source"] return np.array(ra_source), np.array(dec_source) - def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, magnification_limit=None, - kwargs_lens_eqn_solver=None): - """ - image brightness amplitudes + def image_amplitude( + self, + kwargs_ps, + kwargs_lens=None, + x_pos=None, + y_pos=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ): + """Image brightness amplitudes. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only ignored when providing image positions - directly + :param kwargs_lens: keyword argument list of the lens model(s), only ignored + when providing image positions directly :param x_pos: pre-computed image position (no lens equation solver applied) :param y_pos: pre-computed image position (no lens equation solver applied) - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details :return: array of image amplitudes """ if self._fixed_magnification: @@ -71,36 +92,38 @@ def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, m else: if kwargs_lens_eqn_solver is None: kwargs_lens_eqn_solver = {} - ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens=kwargs_lens, - magnification_limit=magnification_limit, - **kwargs_lens_eqn_solver) + ra_image, dec_image = self.image_position( + kwargs_ps, + kwargs_lens=kwargs_lens, + magnification_limit=magnification_limit, + **kwargs_lens_eqn_solver + ) mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens) - point_amp = kwargs_ps['source_amp'] * np.abs(mag) + point_amp = kwargs_ps["source_amp"] * np.abs(mag) else: - point_amp = kwargs_ps['point_amp'] + point_amp = kwargs_ps["point_amp"] if x_pos is not None: point_amp = _expand_to_array(point_amp, len(x_pos)) - mag_pert = kwargs_ps.get('mag_pert', 1) + mag_pert = kwargs_ps.get("mag_pert", 1) mag_pert = _shrink_array(mag_pert, len(point_amp)) point_amp *= np.array(mag_pert) return np.array(point_amp) def source_amplitude(self, kwargs_ps, kwargs_lens=None): - """ - intrinsic brightness amplitude of point source - When brightnesses are defined in magnified on-sky positions, the intrinsic brightness is computed as the mean + """Intrinsic brightness amplitude of point source When brightnesses are defined + in magnified on-sky positions, the intrinsic brightness is computed as the mean in the magnification corrected image position brightnesses. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), used when brightness are defined in - magnified on-sky positions + :param kwargs_lens: keyword argument list of the lens model(s), used when + brightness are defined in magnified on-sky positions :return: brightness amplitude (as numpy array) """ if self._fixed_magnification: - source_amp = kwargs_ps['source_amp'] + source_amp = kwargs_ps["source_amp"] else: ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens) mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens) - point_amp = kwargs_ps['point_amp'] + point_amp = kwargs_ps["point_amp"] source_amp = np.mean(np.array(point_amp) / np.array(mag)) return np.array(source_amp) diff --git a/lenstronomy/PointSource/Types/unlensed.py b/lenstronomy/PointSource/Types/unlensed.py index ea3dbaf5d..49e429f78 100644 --- a/lenstronomy/PointSource/Types/unlensed.py +++ b/lenstronomy/PointSource/Types/unlensed.py @@ -1,7 +1,7 @@ from lenstronomy.PointSource.Types.base_ps import PSBase import numpy as np -__all__ = ['Unlensed'] +__all__ = ["Unlensed"] class Unlensed(PSBase): @@ -14,45 +14,43 @@ class of a single point source in the image plane, aka star """ def image_position(self, kwargs_ps, **kwargs): - """ - on-sky position + """On-sky position. :param kwargs_ps: keyword argument of point source model :return: numpy array of x, y image positions """ - ra_image = kwargs_ps['ra_image'] - dec_image = kwargs_ps['dec_image'] + ra_image = kwargs_ps["ra_image"] + dec_image = kwargs_ps["dec_image"] return np.array(ra_image), np.array(dec_image) def source_position(self, kwargs_ps, **kwargs): - """ - original physical position (identical for this object) + """Original physical position (identical for this object) :param kwargs_ps: keyword argument of point source model :return: numpy array of x, y source positions """ - ra_image = kwargs_ps['ra_image'] - dec_image = kwargs_ps['dec_image'] + ra_image = kwargs_ps["ra_image"] + dec_image = kwargs_ps["dec_image"] return np.array(ra_image), np.array(dec_image) def image_amplitude(self, kwargs_ps, **kwargs): - """ - amplitudes as observed on the sky + """Amplitudes as observed on the sky. :param kwargs_ps: keyword argument of point source model - :param kwargs: keyword arguments of function call (which are not used for this object + :param kwargs: keyword arguments of function call (which are not used for this + object :return: numpy array of amplitudes """ - point_amp = kwargs_ps['point_amp'] + point_amp = kwargs_ps["point_amp"] return np.array(point_amp) def source_amplitude(self, kwargs_ps, **kwargs): - """ - intrinsic source amplitudes + """Intrinsic source amplitudes. :param kwargs_ps: keyword argument of point source model - :param kwargs: keyword arguments of function call (which are not used for this object + :param kwargs: keyword arguments of function call (which are not used for this + object :return: numpy array of amplitudes """ - point_amp = kwargs_ps['point_amp'] + point_amp = kwargs_ps["point_amp"] return np.array(point_amp) diff --git a/lenstronomy/PointSource/point_source.py b/lenstronomy/PointSource/point_source.py index 18e8808c2..d9667df2e 100644 --- a/lenstronomy/PointSource/point_source.py +++ b/lenstronomy/PointSource/point_source.py @@ -2,17 +2,25 @@ import copy from lenstronomy.PointSource.point_source_cached import PointSourceCached -__all__ = ['PointSource'] +__all__ = ["PointSource"] -_SUPPORTED_MODELS = ['UNLENSED', 'LENSED_POSITION', 'SOURCE_POSITION'] +_SUPPORTED_MODELS = ["UNLENSED", "LENSED_POSITION", "SOURCE_POSITION"] class PointSource(object): - - def __init__(self, point_source_type_list, lensModel=None, fixed_magnification_list=None, - additional_images_list=None, flux_from_point_source_list=None, magnification_limit=None, - save_cache=False, kwargs_lens_eqn_solver=None, index_lens_model_list=None, - point_source_frame_list=None): + def __init__( + self, + point_source_type_list, + lensModel=None, + fixed_magnification_list=None, + additional_images_list=None, + flux_from_point_source_list=None, + magnification_limit=None, + save_cache=False, + kwargs_lens_eqn_solver=None, + index_lens_model_list=None, + point_source_frame_list=None, + ): """ :param point_source_type_list: list of point source types @@ -45,8 +53,10 @@ def __init__(self, point_source_type_list, lensModel=None, fixed_magnification_l """ if len(point_source_type_list) > 0: if index_lens_model_list is not None and point_source_frame_list is None: - raise ValueError('with specified index_lens_model_list a specified point_source_frame_list argument is ' - 'required') + raise ValueError( + "with specified index_lens_model_list a specified point_source_frame_list argument is " + "required" + ) if index_lens_model_list is None: point_source_frame_list = [None] * len(point_source_type_list) self._index_lens_model_list = index_lens_model_list @@ -63,53 +73,92 @@ def __init__(self, point_source_type_list, lensModel=None, fixed_magnification_l flux_from_point_source_list = [True] * len(point_source_type_list) self._flux_from_point_source_list = flux_from_point_source_list for i, model in enumerate(point_source_type_list): - if model == 'UNLENSED': + if model == "UNLENSED": from lenstronomy.PointSource.Types.unlensed import Unlensed - self._point_source_list.append(PointSourceCached(Unlensed(), save_cache=save_cache)) - elif model == 'LENSED_POSITION': - from lenstronomy.PointSource.Types.lensed_position import LensedPositions - self._point_source_list.append(PointSourceCached(LensedPositions(lensModel, - fixed_magnification=fixed_magnification_list[i], - additional_images=additional_images_list[i], - index_lens_model_list=index_lens_model_list, - point_source_frame_list=point_source_frame_list[i]), - save_cache=save_cache)) - elif model == 'SOURCE_POSITION': - from lenstronomy.PointSource.Types.source_position import SourcePositions - self._point_source_list.append(PointSourceCached(SourcePositions(lensModel, - fixed_magnification=fixed_magnification_list[i]), - save_cache=save_cache)) + + self._point_source_list.append( + PointSourceCached(Unlensed(), save_cache=save_cache) + ) + elif model == "LENSED_POSITION": + from lenstronomy.PointSource.Types.lensed_position import ( + LensedPositions, + ) + + self._point_source_list.append( + PointSourceCached( + LensedPositions( + lensModel, + fixed_magnification=fixed_magnification_list[i], + additional_images=additional_images_list[i], + index_lens_model_list=index_lens_model_list, + point_source_frame_list=point_source_frame_list[i], + ), + save_cache=save_cache, + ) + ) + elif model == "SOURCE_POSITION": + from lenstronomy.PointSource.Types.source_position import ( + SourcePositions, + ) + + self._point_source_list.append( + PointSourceCached( + SourcePositions( + lensModel, fixed_magnification=fixed_magnification_list[i] + ), + save_cache=save_cache, + ) + ) else: - raise ValueError("Point-source model %s not available. Supported models are %s ." - % (model, _SUPPORTED_MODELS)) + raise ValueError( + "Point-source model %s not available. Supported models are %s ." + % (model, _SUPPORTED_MODELS) + ) if kwargs_lens_eqn_solver is None: kwargs_lens_eqn_solver = {} self._kwargs_lens_eqn_solver = kwargs_lens_eqn_solver self._magnification_limit = magnification_limit self._save_cache = save_cache - def update_search_window(self, search_window, x_center, y_center, min_distance=None, only_from_unspecified=False): - """ - update the search area for the lens equation solver - - :param search_window: search_window: window size of the image position search with the lens equation solver. + def update_search_window( + self, + search_window, + x_center, + y_center, + min_distance=None, + only_from_unspecified=False, + ): + """Update the search area for the lens equation solver. + + :param search_window: search_window: window size of the image position search + with the lens equation solver. :param x_center: center of search window :param y_center: center of search window :param min_distance: minimum search distance - :param only_from_unspecified: bool, if True, only sets keywords that previously have not been set + :param only_from_unspecified: bool, if True, only sets keywords that previously + have not been set :return: updated self instances """ - if min_distance is not None and 'min_distance' not in self._kwargs_lens_eqn_solver and only_from_unspecified: - self._kwargs_lens_eqn_solver['min_distance'] = min_distance + if ( + min_distance is not None + and "min_distance" not in self._kwargs_lens_eqn_solver + and only_from_unspecified + ): + self._kwargs_lens_eqn_solver["min_distance"] = min_distance if only_from_unspecified: - self._kwargs_lens_eqn_solver['search_window'] = self._kwargs_lens_eqn_solver.get('search_window', - search_window) - self._kwargs_lens_eqn_solver['x_center'] = self._kwargs_lens_eqn_solver.get('x_center', x_center) - self._kwargs_lens_eqn_solver['y_center'] = self._kwargs_lens_eqn_solver.get('y_center', y_center) + self._kwargs_lens_eqn_solver[ + "search_window" + ] = self._kwargs_lens_eqn_solver.get("search_window", search_window) + self._kwargs_lens_eqn_solver["x_center"] = self._kwargs_lens_eqn_solver.get( + "x_center", x_center + ) + self._kwargs_lens_eqn_solver["y_center"] = self._kwargs_lens_eqn_solver.get( + "y_center", y_center + ) else: - self._kwargs_lens_eqn_solver['search_window'] = search_window - self._kwargs_lens_eqn_solver['x_center'] = x_center - self._kwargs_lens_eqn_solver['y_center'] = y_center + self._kwargs_lens_eqn_solver["search_window"] = search_window + self._kwargs_lens_eqn_solver["x_center"] = x_center + self._kwargs_lens_eqn_solver["y_center"] = y_center def update_lens_model(self, lens_model_class): """ @@ -123,8 +172,7 @@ def update_lens_model(self, lens_model_class): model.update_lens_model(lens_model_class=lens_model_class) def delete_lens_model_cache(self): - """ - deletes the variables saved for a specific lens model + """Deletes the variables saved for a specific lens model. :return: None """ @@ -132,19 +180,18 @@ def delete_lens_model_cache(self): model.delete_lens_model_cache() def set_save_cache(self, save_cache): - """ - set the save cache boolean to new value + """Set the save cache boolean to new value. :param save_cache: bool, if True, saves (or uses a previously saved) values - :return: updated class and sub-class instances to either save or not save the point source information in cache + :return: updated class and sub-class instances to either save or not save the + point source information in cache """ self._set_save_cache(save_cache) self._save_cache = save_cache def _set_save_cache(self, save_cache): - """ - set the save cache boolean to new value. This function is for use within this class for temporarily set the - cache within a single routine. + """Set the save cache boolean to new value. This function is for use within this + class for temporarily set the cache within a single routine. :param save_cache: bool, if True, saves (or uses a previously saved) values :return: None @@ -167,8 +214,7 @@ def k_list(self, k): return k_list def source_position(self, kwargs_ps, kwargs_lens): - """ - intrinsic source positions of the point sources + """Intrinsic source positions of the point sources. :param kwargs_ps: keyword argument list of point source models :param kwargs_lens: keyword argument list of lens models @@ -183,17 +229,26 @@ def source_position(self, kwargs_ps, kwargs_lens): y_source_list.append(y_source) return x_source_list, y_source_list - def image_position(self, kwargs_ps, kwargs_lens, k=None, original_position=False, additional_images=False): - """ - image positions as observed on the sky of the point sources + def image_position( + self, + kwargs_ps, + kwargs_lens, + k=None, + original_position=False, + additional_images=False, + ): + """Image positions as observed on the sky of the point sources. :param kwargs_ps: point source parameter keyword argument list :param kwargs_lens: lens model keyword argument list - :param k: None, int or boolean list; only returns a subset of the model predictions - :param original_position: boolean (only applies to 'LENSED_POSITION' models), returns the image positions in - the model parameters and does not re-compute images (which might be differently ordered) in case of the lens - equation solver - :param additional_images: if True, solves the lens equation for additional images + :param k: None, int or boolean list; only returns a subset of the model + predictions + :param original_position: boolean (only applies to 'LENSED_POSITION' models), + returns the image positions in the model parameters and does not re-compute + images (which might be differently ordered) in case of the lens equation + solver + :param additional_images: if True, solves the lens equation for additional + images :type additional_images: bool :return: list of: list of image positions per point source model component """ @@ -202,29 +257,38 @@ def image_position(self, kwargs_ps, kwargs_lens, k=None, original_position=False for i, model in enumerate(self._point_source_list): if k is None or k == i: kwargs = kwargs_ps[i] - x_image, y_image = model.image_position(kwargs, kwargs_lens, - magnification_limit=self._magnification_limit, - kwargs_lens_eqn_solver=self._kwargs_lens_eqn_solver, - additional_images=additional_images) + x_image, y_image = model.image_position( + kwargs, + kwargs_lens, + magnification_limit=self._magnification_limit, + kwargs_lens_eqn_solver=self._kwargs_lens_eqn_solver, + additional_images=additional_images, + ) # this takes action when new images are computed not necessary in order - if original_position is True and additional_images is True and\ - self.point_source_type_list[i] == 'LENSED_POSITION': - x_o, y_o = kwargs['ra_image'], kwargs['dec_image'] - x_image, y_image = _sort_position_by_original(x_o, y_o, x_image, y_image) + if ( + original_position is True + and additional_images is True + and self.point_source_type_list[i] == "LENSED_POSITION" + ): + x_o, y_o = kwargs["ra_image"], kwargs["dec_image"] + x_image, y_image = _sort_position_by_original( + x_o, y_o, x_image, y_image + ) x_image_list.append(x_image) y_image_list.append(y_image) return x_image_list, y_image_list def point_source_list(self, kwargs_ps, kwargs_lens, k=None, with_amp=True): - """ - returns the coordinates and amplitudes of all point sources in a single array + """Returns the coordinates and amplitudes of all point sources in a single + array. :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list - :param k: None, int or list of int's to select a subset of the point source models in the return - :param with_amp: bool, if False, ignores the amplitude parameters in the return and instead provides ones for - each point source image + :param k: None, int or list of int's to select a subset of the point source + models in the return + :param with_amp: bool, if False, ignores the amplitude parameters in the return + and instead provides ones for each point source image :return: ra_array, dec_array, amp_array """ # here we save the cache of the individual models but do not overwrite the class boolean variable to do so @@ -250,8 +314,7 @@ def point_source_list(self, kwargs_ps, kwargs_lens, k=None, with_amp=True): return ra_array, dec_array, amp_array def num_basis(self, kwargs_ps, kwargs_lens): - """ - number of basis functions for linear inversion + """Number of basis functions for linear inversion. :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list @@ -268,24 +331,28 @@ def num_basis(self, kwargs_ps, kwargs_lens): return n def image_amplitude(self, kwargs_ps, kwargs_lens, k=None): - """ - returns the image amplitudes + """Returns the image amplitudes. :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list - :param k: None, int or list of int's to select a subset of the point source models in the return + :param k: None, int or list of int's to select a subset of the point source + models in the return :return: list of image amplitudes per model component """ amp_list = [] for i, model in enumerate(self._point_source_list): if (k is None or k == i) and self._flux_from_point_source_list[i]: - amp_list.append(model.image_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens, - kwargs_lens_eqn_solver=self._kwargs_lens_eqn_solver)) + amp_list.append( + model.image_amplitude( + kwargs_ps=kwargs_ps[i], + kwargs_lens=kwargs_lens, + kwargs_lens_eqn_solver=self._kwargs_lens_eqn_solver, + ) + ) return amp_list def source_amplitude(self, kwargs_ps, kwargs_lens): - """ - intrinsic (unlensed) point source amplitudes + """Intrinsic (unlensed) point source amplitudes. :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list @@ -294,7 +361,11 @@ def source_amplitude(self, kwargs_ps, kwargs_lens): amp_list = [] for i, model in enumerate(self._point_source_list): if self._flux_from_point_source_list[i]: - amp_list.append(model.source_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens)) + amp_list.append( + model.source_amplitude( + kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens + ) + ) return amp_list def linear_response_set(self, kwargs_ps, kwargs_lens=None, with_amp=False): @@ -353,17 +424,17 @@ def update_linear(self, param, i, kwargs_ps, kwargs_lens): if self._flux_from_point_source_list[k]: kwargs = kwargs_ps[k] if self._fixed_magnification_list[k]: - kwargs['source_amp'] = param[i] + kwargs["source_amp"] = param[i] i += 1 else: n_points = len(ra_pos_list[k]) - kwargs['point_amp'] = np.array(param[i:i + n_points]) + kwargs["point_amp"] = np.array(param[i : i + n_points]) i += n_points return kwargs_ps, i def linear_param_from_kwargs(self, kwargs_list): - """ - inverse function of update_linear() returning the linear amplitude list for the keyword argument list + """Inverse function of update_linear() returning the linear amplitude list for + the keyword argument list. :param kwargs_list: model parameters including the linear amplitude parameters :type kwargs_list: list of keyword arguments @@ -375,37 +446,41 @@ def linear_param_from_kwargs(self, kwargs_list): if self._flux_from_point_source_list[k]: kwargs = kwargs_list[k] if self._fixed_magnification_list[k]: - param.append(kwargs['source_amp']) + param.append(kwargs["source_amp"]) else: - for a in kwargs['point_amp']: + for a in kwargs["point_amp"]: param.append(a) return param def check_image_positions(self, kwargs_ps, kwargs_lens, tolerance=0.001): - """ - checks whether the point sources in kwargs_ps satisfy the lens equation with a tolerance - (computed by ray-tracing in the source plane) + """Checks whether the point sources in kwargs_ps satisfy the lens equation with + a tolerance (computed by ray-tracing in the source plane) :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list - :param tolerance: Euclidian distance between the source positions ray-traced backwards to be tolerated + :param tolerance: Euclidian distance between the source positions ray-traced + backwards to be tolerated :return: bool: True, if requirement on tolerance is fulfilled, False if not. """ x_image_list, y_image_list = self.image_position(kwargs_ps, kwargs_lens) for i, model in enumerate(self.point_source_type_list): - if model in ['LENSED_POSITION', 'SOURCE_POSITION']: + if model in ["LENSED_POSITION", "SOURCE_POSITION"]: x_pos = x_image_list[i] y_pos = y_image_list[i] - x_source, y_source = self._lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) - dist = np.sqrt((x_source - x_source[0]) ** 2 + (y_source - y_source[0]) ** 2) + x_source, y_source = self._lensModel.ray_shooting( + x_pos, y_pos, kwargs_lens + ) + dist = np.sqrt( + (x_source - x_source[0]) ** 2 + (y_source - y_source[0]) ** 2 + ) if np.max(dist) > tolerance: return False return True def set_amplitudes(self, amp_list, kwargs_ps): - """ - translates the amplitude parameters into the convention of the keyword argument list - currently only used in SimAPI to transform magnitudes to amplitudes in the lenstronomy conventions + """Translates the amplitude parameters into the convention of the keyword + argument list currently only used in SimAPI to transform magnitudes to + amplitudes in the lenstronomy conventions. :param amp_list: list of model amplitudes for each point source model :param kwargs_ps: list of point source keywords @@ -415,32 +490,31 @@ def set_amplitudes(self, amp_list, kwargs_ps): for i, model in enumerate(self.point_source_type_list): if self._flux_from_point_source_list[i]: amp = amp_list[i] - if model == 'UNLENSED': - kwargs_list[i]['point_amp'] = amp - elif model in ['LENSED_POSITION', 'SOURCE_POSITION']: + if model == "UNLENSED": + kwargs_list[i]["point_amp"] = amp + elif model in ["LENSED_POSITION", "SOURCE_POSITION"]: if self._fixed_magnification_list[i] is True: - kwargs_list[i]['source_amp'] = amp + kwargs_list[i]["source_amp"] = amp else: - kwargs_list[i]['point_amp'] = amp + kwargs_list[i]["point_amp"] = amp return kwargs_list @classmethod def check_positive_flux(cls, kwargs_ps): - """ - check whether inferred linear parameters are positive + """Check whether inferred linear parameters are positive. :param kwargs_ps: point source keyword argument list :return: bool, True, if all 'point_amp' parameters are positive semi-definite """ pos_bool = True for kwargs in kwargs_ps: - if 'point_amp' in kwargs: - point_amp = kwargs['point_amp'] + if "point_amp" in kwargs: + point_amp = kwargs["point_amp"] if not np.all(point_amp >= 0): pos_bool = False break - if 'source_amp' in kwargs: - point_amp = kwargs['source_amp'] + if "source_amp" in kwargs: + point_amp = kwargs["source_amp"] if not np.all(point_amp >= 0): pos_bool = False break @@ -448,15 +522,16 @@ def check_positive_flux(cls, kwargs_ps): def _sort_position_by_original(x_o, y_o, x_solved, y_solved): - """ - sorting new image positions such that the old order is best preserved + """Sorting new image positions such that the old order is best preserved. :param x_o: numpy array; original image positions :param y_o: numpy array; original image positions - :param x_solved: numpy array; solved image positions with potentially more or fewer images - :param y_solved: numpy array; solved image positions with potentially more or fewer images - :return: sorted new image positions with the order best matching the original positions first, - and then all other images in the same order as solved for + :param x_solved: numpy array; solved image positions with potentially more or fewer + images + :param y_solved: numpy array; solved image positions with potentially more or fewer + images + :return: sorted new image positions with the order best matching the original + positions first, and then all other images in the same order as solved for """ if len(x_o) > len(x_solved): # if new images are less , then return the original images (no sorting required) diff --git a/lenstronomy/PointSource/point_source_cached.py b/lenstronomy/PointSource/point_source_cached.py index b5f6e0407..52a0e429b 100644 --- a/lenstronomy/PointSource/point_source_cached.py +++ b/lenstronomy/PointSource/point_source_cached.py @@ -1,29 +1,30 @@ -__all__ = ['PointSourceCached'] +__all__ = ["PointSourceCached"] class PointSourceCached(object): - """ - This class is the same as PointSource() except that it saves image and source positions in cache. + """This class is the same as PointSource() except that it saves image and source + positions in cache. + This speeds-up repeated calls for the same source and lens model and avoids duplicating the lens equation solving. Attention: cache needs to be deleted before calling functions with different lens and point source parameters. - """ + def __init__(self, point_source_model, save_cache=False): self._model = point_source_model self._save_cache = save_cache def delete_lens_model_cache(self): - if hasattr(self, '_x_image'): + if hasattr(self, "_x_image"): del self._x_image - if hasattr(self, '_y_image'): + if hasattr(self, "_y_image"): del self._y_image - if hasattr(self, '_x_image_add'): + if hasattr(self, "_x_image_add"): del self._x_image_add - if hasattr(self, '_y_image_add'): + if hasattr(self, "_y_image_add"): del self._y_image_add - if hasattr(self, '_x_source'): + if hasattr(self, "_x_source"): del self._x_source - if hasattr(self, '_y_source'): + if hasattr(self, "_y_source"): del self._y_source def set_save_cache(self, save_bool): @@ -32,73 +33,112 @@ def set_save_cache(self, save_bool): def update_lens_model(self, lens_model_class): self._model.update_lens_model(lens_model_class) - def image_position(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver=None, - additional_images=False): - """ - on-sky image positions + def image_position( + self, + kwargs_ps, + kwargs_lens=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + additional_images=False, + ): + """On-sky image positions. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation - solver - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details - :param additional_images: if True, solves the lens equation for additional images + :param kwargs_lens: keyword argument list of the lens model(s), only used when + requiring the lens equation solver + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details + :param additional_images: if True, solves the lens equation for additional + images :type additional_images: bool :return: image positions in x, y as arrays """ if additional_images and not self._model.additional_images: # ignore cached parts if additional images - if not self._save_cache or not hasattr(self, '_x_image_add') or not hasattr(self, '_y_image_add'): - self._x_image_add, self._y_image_add = self._model.image_position(kwargs_ps, kwargs_lens=kwargs_lens, - magnification_limit=magnification_limit, - kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, - additional_images=additional_images) + if ( + not self._save_cache + or not hasattr(self, "_x_image_add") + or not hasattr(self, "_y_image_add") + ): + self._x_image_add, self._y_image_add = self._model.image_position( + kwargs_ps, + kwargs_lens=kwargs_lens, + magnification_limit=magnification_limit, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + additional_images=additional_images, + ) return self._x_image_add, self._y_image_add - if not self._save_cache or not hasattr(self, '_x_image') or not hasattr(self, '_y_image'): - self._x_image, self._y_image = self._model.image_position(kwargs_ps, kwargs_lens=kwargs_lens, - magnification_limit=magnification_limit, - kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, - additional_images=additional_images) + if ( + not self._save_cache + or not hasattr(self, "_x_image") + or not hasattr(self, "_y_image") + ): + self._x_image, self._y_image = self._model.image_position( + kwargs_ps, + kwargs_lens=kwargs_lens, + magnification_limit=magnification_limit, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + additional_images=additional_images, + ) return self._x_image, self._y_image def source_position(self, kwargs_ps, kwargs_lens=None): - """ - original source position (prior to lensing) + """Original source position (prior to lensing) :param kwargs_ps: point source keyword arguments :param kwargs_lens: lens model keyword argument list (only used when required) :return: x, y position """ - if not self._save_cache or not hasattr(self, '_x_source') or not hasattr(self, '_y_source'): - self._x_source, self._y_source = self._model.source_position(kwargs_ps, kwargs_lens=kwargs_lens) + if ( + not self._save_cache + or not hasattr(self, "_x_source") + or not hasattr(self, "_y_source") + ): + self._x_source, self._y_source = self._model.source_position( + kwargs_ps, kwargs_lens=kwargs_lens + ) return self._x_source, self._y_source - def image_amplitude(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver=None): - """ - image brightness amplitudes + def image_amplitude( + self, + kwargs_ps, + kwargs_lens=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ): + """Image brightness amplitudes. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation - solver - :param magnification_limit: float >0 or None, if float is set and additional images are computed, only those - images will be computed that exceed the lensing magnification (absolute value) limit - :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver - see LensEquationSolver() class for details + :param kwargs_lens: keyword argument list of the lens model(s), only used when + requiring the lens equation solver + :param magnification_limit: float >0 or None, if float is set and additional + images are computed, only those images will be computed that exceed the + lensing magnification (absolute value) limit + :param kwargs_lens_eqn_solver: keyword arguments specifying the numerical + settings for the lens equation solver see LensEquationSolver() class for + details :return: array of image amplitudes """ - x_pos, y_pos = self.image_position(kwargs_ps, kwargs_lens, magnification_limit=magnification_limit, - kwargs_lens_eqn_solver=kwargs_lens_eqn_solver) - return self._model.image_amplitude(kwargs_ps, kwargs_lens=kwargs_lens, x_pos=x_pos, y_pos=y_pos) + x_pos, y_pos = self.image_position( + kwargs_ps, + kwargs_lens, + magnification_limit=magnification_limit, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + ) + return self._model.image_amplitude( + kwargs_ps, kwargs_lens=kwargs_lens, x_pos=x_pos, y_pos=y_pos + ) def source_amplitude(self, kwargs_ps, kwargs_lens=None): - """ - intrinsic brightness amplitude of point source + """Intrinsic brightness amplitude of point source. :param kwargs_ps: keyword arguments of the point source model - :param kwargs_lens: keyword argument list of the lens model(s), only used when positions are defined in image - plane and have to be ray-traced back + :param kwargs_lens: keyword argument list of the lens model(s), only used when + positions are defined in image plane and have to be ray-traced back :return: brightness amplitude (as numpy array) """ return self._model.source_amplitude(kwargs_ps, kwargs_lens=kwargs_lens) diff --git a/lenstronomy/PointSource/point_source_param.py b/lenstronomy/PointSource/point_source_param.py index 84bf989e5..ae5c75ad1 100644 --- a/lenstronomy/PointSource/point_source_param.py +++ b/lenstronomy/PointSource/point_source_param.py @@ -1,64 +1,75 @@ import numpy as np -__all__ = ['PointSourceParam'] +__all__ = ["PointSourceParam"] from lenstronomy.Sampling.param_group import ModelParamGroup, SingleParam, ArrayParam class SourcePositionParam(SingleParam): - """ - Source position parameter, ra_source and dec_source - """ - param_names = ['ra_source', 'dec_source'] - _kwargs_lower = {'ra_source': -100, 'dec_source': -100} - _kwargs_upper = {'ra_source': 100, 'dec_source': 100} + """Source position parameter, ra_source and dec_source.""" + + param_names = ["ra_source", "dec_source"] + _kwargs_lower = {"ra_source": -100, "dec_source": -100} + _kwargs_upper = {"ra_source": 100, "dec_source": 100} class LensedPosition(ArrayParam): - """ - Represents lensed positions, possibly many. ra_image and dec_image + """Represents lensed positions, possibly many. ra_image and dec_image. :param num_images: integer. The number of lensed positions to model. """ - _kwargs_lower = {'ra_image': -100, 'dec_image': -100, } - _kwargs_upper = {'ra_image': 100, 'dec_image': 100, } + + _kwargs_lower = { + "ra_image": -100, + "dec_image": -100, + } + _kwargs_upper = { + "ra_image": 100, + "dec_image": 100, + } def __init__(self, num_images): ArrayParam.__init__(self, int(num_images) > 0) - self.param_names = {'ra_image': int(num_images), 'dec_image': int(num_images)} + self.param_names = {"ra_image": int(num_images), "dec_image": int(num_images)} class SourceAmp(SingleParam): - """ - Source amplification - """ - param_names = ['source_amp'] - _kwargs_lower = {'source_amp': 0} - _kwargs_upper = {'source_amp': 100} + """Source amplification.""" + + param_names = ["source_amp"] + _kwargs_lower = {"source_amp": 0} + _kwargs_upper = {"source_amp": 100} class ImageAmp(ArrayParam): - """ - Observed amplification of lensed images of a point source. Can model - arbitrarily many magnified images + """Observed amplification of lensed images of a point source. Can model arbitrarily + many magnified images. - :param num_point_sources: integer. The number of lensed images without fixed magnification. + :param num_point_sources: integer. The number of lensed images without fixed + magnification. """ - _kwargs_lower = {'point_amp': 0} - _kwargs_upper = {'point_amp': 100} + + _kwargs_lower = {"point_amp": 0} + _kwargs_upper = {"point_amp": 100} def __init__(self, num_point_sources): ArrayParam.__init__(self, int(num_point_sources) > 0) - self.param_names = {'point_amp': int(num_point_sources)} + self.param_names = {"point_amp": int(num_point_sources)} class PointSourceParam(object): - """ - Point source parameters - """ - - def __init__(self, model_list, kwargs_fixed, num_point_source_list=None, linear_solver=True, - fixed_magnification_list=None, kwargs_lower=None, kwargs_upper=None): + """Point source parameters.""" + + def __init__( + self, + model_list, + kwargs_fixed, + num_point_source_list=None, + linear_solver=True, + fixed_magnification_list=None, + kwargs_lower=None, + kwargs_upper=None, + ): """ :param model_list: list of point source model names @@ -85,14 +96,17 @@ def __init__(self, model_list, kwargs_fixed, num_point_source_list=None, linear_ for i, model in enumerate(self.model_list): params = [] num = num_point_source_list[i] - if model in ['LENSED_POSITION', 'UNLENSED']: + if model in ["LENSED_POSITION", "UNLENSED"]: params.append(LensedPosition(num)) - elif model == 'SOURCE_POSITION': + elif model == "SOURCE_POSITION": params.append(SourcePositionParam(True)) else: raise ValueError("%s not a valid point source model" % model) - if fixed_magnification_list[i] and model in ['LENSED_POSITION', 'SOURCE_POSITION']: + if fixed_magnification_list[i] and model in [ + "LENSED_POSITION", + "SOURCE_POSITION", + ]: params.append(SourceAmp(True)) else: params.append(ImageAmp(num)) @@ -143,14 +157,15 @@ def set_params(self, kwargs_list): for k, param_group in enumerate(self.param_groups): kwargs = kwargs_list[k] kwargs_fixed = self.kwargs_fixed[k] - args.extend(ModelParamGroup.compose_set_params( - param_group, kwargs, kwargs_fixed=kwargs_fixed - )) + args.extend( + ModelParamGroup.compose_set_params( + param_group, kwargs, kwargs_fixed=kwargs_fixed + ) + ) return args def num_param(self): - """ - number of parameters and their names + """Number of parameters and their names. :return: int, list of parameter names """ @@ -164,17 +179,19 @@ def num_param(self): return num, name_list def add_fix_linear(self, kwargs_fixed): - """ - updates fixed keyword argument list with linear parameters + """Updates fixed keyword argument list with linear parameters. :param kwargs_fixed: list of keyword arguments held fixed during sampling :return: updated keyword argument list """ for k, model in enumerate(self.model_list): - if self._fixed_magnification_list[k] is True and model in ['LENSED_POSITION', 'SOURCE_POSITION']: - kwargs_fixed[k]['source_amp'] = 1 + if self._fixed_magnification_list[k] is True and model in [ + "LENSED_POSITION", + "SOURCE_POSITION", + ]: + kwargs_fixed[k]["source_amp"] = 1 else: - kwargs_fixed[k]['point_amp'] = np.ones(self._num_point_sources_list[k]) + kwargs_fixed[k]["point_amp"] = np.ones(self._num_point_sources_list[k]) return kwargs_fixed def num_param_linear(self): @@ -185,7 +202,10 @@ def num_param_linear(self): num = 0 if self._linear_solver is True: for k, model in enumerate(self.model_list): - if self._fixed_magnification_list[k] is True and model in ['LENSED_POSITION', 'SOURCE_POSITION']: + if self._fixed_magnification_list[k] is True and model in [ + "LENSED_POSITION", + "SOURCE_POSITION", + ]: num += 1 else: num += self._num_point_sources_list[k] diff --git a/lenstronomy/Sampling/Likelihoods/flux_ratio_likelihood.py b/lenstronomy/Sampling/Likelihoods/flux_ratio_likelihood.py index 8ca36cf05..14e80b3cd 100644 --- a/lenstronomy/Sampling/Likelihoods/flux_ratio_likelihood.py +++ b/lenstronomy/Sampling/Likelihoods/flux_ratio_likelihood.py @@ -1,16 +1,23 @@ from lenstronomy.LensModel.lens_model_extensions import LensModelExtensions import numpy as np -__all__ = ['FluxRatioLikelihood'] +__all__ = ["FluxRatioLikelihood"] class FluxRatioLikelihood(object): - """ - likelihood class for magnification of multiply lensed images - """ + """Likelihood class for magnification of multiply lensed images.""" - def __init__(self, lens_model_class, flux_ratios, flux_ratio_errors, - source_type='INF', window_size=0.1, grid_number=100, polar_grid=False, aspect_ratio=0.5): + def __init__( + self, + lens_model_class, + flux_ratios, + flux_ratio_errors, + source_type="INF", + window_size=0.1, + grid_number=100, + polar_grid=False, + aspect_ratio=0.5, + ): """ :param lens_model_class: LensModel class instance @@ -42,17 +49,24 @@ def logL(self, x_pos, y_pos, kwargs_lens, kwargs_special): :param kwargs_special: dictionary of 'special' keyword parameters :return: log likelihood of the measured flux ratios given a model """ - if self._source_type == 'INF': - mag = np.abs(self._lens_model_class.magnification(x_pos, y_pos, kwargs_lens)) + if self._source_type == "INF": + mag = np.abs( + self._lens_model_class.magnification(x_pos, y_pos, kwargs_lens) + ) else: - source_sigma = kwargs_special['source_size'] - mag = self._lens_model_extensions.magnification_finite(x_pos, y_pos, kwargs_lens, source_sigma=source_sigma, - window_size=self._window_size, - grid_number=self._gird_number, - polar_grid=self._polar_grid, - aspect_ratio=self._aspect_ratio) - if len(mag)-1 != len(self._flux_ratios): - return -10**15 + source_sigma = kwargs_special["source_size"] + mag = self._lens_model_extensions.magnification_finite( + x_pos, + y_pos, + kwargs_lens, + source_sigma=source_sigma, + window_size=self._window_size, + grid_number=self._gird_number, + polar_grid=self._polar_grid, + aspect_ratio=self._aspect_ratio, + ) + if len(mag) - 1 != len(self._flux_ratios): + return -(10**15) mag_ratio = mag[1:] / mag[0] return self._logL(mag_ratio) @@ -64,19 +78,27 @@ def _logL(self, flux_ratios): :return: log likelihood fo the measured flux ratios given a model """ if not np.isfinite(flux_ratios).any(): - return -10 ** 15 + return -(10**15) if self._flux_ratio_errors.ndim <= 1: - dist = (flux_ratios - self._flux_ratios) ** 2 / self._flux_ratio_errors ** 2 / 2 + dist = ( + (flux_ratios - self._flux_ratios) ** 2 + / self._flux_ratio_errors**2 + / 2 + ) logL = -np.sum(dist) elif self._flux_ratio_errors.ndim == 2: # Assume covariance matrix is in ln units! D = np.log(flux_ratios) - np.log(self._flux_ratios) - logL = -1/2 * D @ np.linalg.inv(self._flux_ratio_errors) @ D # TODO: only calculate the inverse once + logL = ( + -1 / 2 * D @ np.linalg.inv(self._flux_ratio_errors) @ D + ) # TODO: only calculate the inverse once else: - raise ValueError('flux_ratio_errors need dimension of 1 or 2. Current dimensions are %s' - % self._flux_ratio_errors.ndim) + raise ValueError( + "flux_ratio_errors need dimension of 1 or 2. Current dimensions are %s" + % self._flux_ratio_errors.ndim + ) if not np.isfinite(logL): - return -10 ** 15 + return -(10**15) return logL @property diff --git a/lenstronomy/Sampling/Likelihoods/image_likelihood.py b/lenstronomy/Sampling/Likelihoods/image_likelihood.py index fb7734a77..f8e7687f3 100644 --- a/lenstronomy/Sampling/Likelihoods/image_likelihood.py +++ b/lenstronomy/Sampling/Likelihoods/image_likelihood.py @@ -1,17 +1,25 @@ import numpy as np from lenstronomy.Util import class_creator -__all__ = ['ImageLikelihood'] +__all__ = ["ImageLikelihood"] class ImageLikelihood(object): - """ - manages imaging data likelihoods - """ + """Manages imaging data likelihoods.""" - def __init__(self, multi_band_list, multi_band_type, kwargs_model, bands_compute=None, - image_likelihood_mask_list=None, source_marg=False, linear_prior=None, check_positive_flux=False, - kwargs_pixelbased=None, linear_solver=True): + def __init__( + self, + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=None, + image_likelihood_mask_list=None, + source_marg=False, + linear_prior=None, + check_positive_flux=False, + kwargs_pixelbased=None, + linear_solver=True, + ): """ :param bands_compute: list of bools with same length as data objects, indicates which "band" to include in the @@ -29,17 +37,29 @@ def __init__(self, multi_band_list, multi_band_type, kwargs_model, bands_compute :param linear_solver: bool, if True (default) fixes the linear amplitude parameters 'amp' (avoid sampling) such that they get overwritten by the linear solver solution. """ - self.imSim = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, - bands_compute=bands_compute, - image_likelihood_mask_list=image_likelihood_mask_list, - kwargs_pixelbased=kwargs_pixelbased, linear_solver=linear_solver) + self.imSim = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=bands_compute, + image_likelihood_mask_list=image_likelihood_mask_list, + kwargs_pixelbased=kwargs_pixelbased, + linear_solver=linear_solver, + ) self._model_type = self.imSim.type self._source_marg = source_marg self._linear_prior = linear_prior self._check_positive_flux = check_positive_flux - def logL(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, kwargs_special=None, - kwargs_extinction=None): + def logL( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): """ :param kwargs_lens: lens model keyword argument list according to LensModel module @@ -50,13 +70,19 @@ def logL(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwa :param kwargs_extinction: extinction parameter keyword argument list according to LightModel module :return: log likelihood of the data given the model """ - logL = self.imSim.likelihood_data_given_model(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, - kwargs_extinction=kwargs_extinction, - kwargs_special=kwargs_special, - source_marg=self._source_marg, linear_prior=self._linear_prior, - check_positive_flux=self._check_positive_flux) + logL = self.imSim.likelihood_data_given_model( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_extinction=kwargs_extinction, + kwargs_special=kwargs_special, + source_marg=self._source_marg, + linear_prior=self._linear_prior, + check_positive_flux=self._check_positive_flux, + ) if np.isnan(logL) is True: - return -10 ** 15 + return -(10**15) return logL @property @@ -67,13 +93,22 @@ def num_data(self): """ return self.imSim.num_data_evaluate - def num_param_linear(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_special=None, kwargs_extinction=None): + def num_param_linear( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): """ :return: number of linear parameters solved for during the image reconstruction process """ - return self.imSim.num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + return self.imSim.num_param_linear( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) def reset_point_source_cache(self, cache=True): """ diff --git a/lenstronomy/Sampling/Likelihoods/position_likelihood.py b/lenstronomy/Sampling/Likelihoods/position_likelihood.py index aa3a45051..c0385c134 100644 --- a/lenstronomy/Sampling/Likelihoods/position_likelihood.py +++ b/lenstronomy/Sampling/Likelihoods/position_likelihood.py @@ -1,18 +1,28 @@ import numpy as np from numpy.linalg import inv -__all__ = ['PositionLikelihood'] +__all__ = ["PositionLikelihood"] class PositionLikelihood(object): - """ - likelihood of positions of multiply imaged point sources - """ - def __init__(self, point_source_class, image_position_uncertainty=0.005, astrometric_likelihood=False, - image_position_likelihood=False, ra_image_list=None, dec_image_list=None, - source_position_likelihood=False, check_matched_source_position=False, source_position_tolerance=0.001, - source_position_sigma=0.001, force_no_add_image=False, restrict_image_number=False, - max_num_images=None): + """Likelihood of positions of multiply imaged point sources.""" + + def __init__( + self, + point_source_class, + image_position_uncertainty=0.005, + astrometric_likelihood=False, + image_position_likelihood=False, + ra_image_list=None, + dec_image_list=None, + source_position_likelihood=False, + check_matched_source_position=False, + source_position_tolerance=0.001, + source_position_sigma=0.001, + force_no_add_image=False, + restrict_image_number=False, + max_num_images=None, + ): """ :param point_source_class: Instance of PointSource() class @@ -51,7 +61,9 @@ def __init__(self, point_source_class, image_position_uncertainty=0.005, astrome self._source_position_likelihood = source_position_likelihood self._max_num_images = max_num_images if max_num_images is None and restrict_image_number is True: - raise ValueError('max_num_images needs to be provided when restrict_number_images is True!') + raise ValueError( + "max_num_images needs to be provided when restrict_number_images is True!" + ) self._image_position_likelihood = image_position_likelihood if ra_image_list is None: ra_image_list = [] @@ -71,130 +83,165 @@ def logL(self, kwargs_lens, kwargs_ps, kwargs_special, verbose=False): logL = 0 if self._astrometric_likelihood is True: - logL_astrometry = self.astrometric_likelihood(kwargs_ps, kwargs_special, self._image_position_sigma) + logL_astrometry = self.astrometric_likelihood( + kwargs_ps, kwargs_special, self._image_position_sigma + ) logL += logL_astrometry if verbose is True: - print('Astrometric likelihood = %s' % logL_astrometry) + print("Astrometric likelihood = %s" % logL_astrometry) if self._check_matched_source_position is True: - logL_source_scatter = self.source_position_likelihood(kwargs_lens, kwargs_ps, self._source_position_sigma, - hard_bound_rms=self._bound_source_position_scatter, - verbose=verbose) + logL_source_scatter = self.source_position_likelihood( + kwargs_lens, + kwargs_ps, + self._source_position_sigma, + hard_bound_rms=self._bound_source_position_scatter, + verbose=verbose, + ) logL += logL_source_scatter if verbose is True: - print('Source scatter punishing likelihood = %s' % logL_source_scatter) + print("Source scatter punishing likelihood = %s" % logL_source_scatter) if self._force_no_add_image: additional_image_bool = self.check_additional_images(kwargs_ps, kwargs_lens) if additional_image_bool is True: - logL -= 10.**5 + logL -= 10.0**5 if verbose is True: - print('force no additional image penalty as additional images are found!') + print( + "force no additional image penalty as additional images are found!" + ) if self._restrict_number_images is True: - ra_image_list, dec_image_list = self._pointSource.image_position(kwargs_ps=kwargs_ps, - kwargs_lens=kwargs_lens) + ra_image_list, dec_image_list = self._pointSource.image_position( + kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens + ) if len(ra_image_list[0]) > self._max_num_images: - logL -= 10.**5 + logL -= 10.0**5 if verbose is True: - print('Number of images found %s exceeded the limited number allowed %s' % (len(ra_image_list[0]), - self._max_num_images)) + print( + "Number of images found %s exceeded the limited number allowed %s" + % (len(ra_image_list[0]), self._max_num_images) + ) if self._source_position_likelihood is True: - logL_source_pos = self.source_position_likelihood(kwargs_lens, kwargs_ps, sigma=self._image_position_sigma) + logL_source_pos = self.source_position_likelihood( + kwargs_lens, kwargs_ps, sigma=self._image_position_sigma + ) logL += logL_source_pos if verbose is True: - print('source position likelihood %s' % logL_source_pos) + print("source position likelihood %s" % logL_source_pos) if self._image_position_likelihood is True: - logL_image_pos = self.image_position_likelihood(kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, - sigma=self._image_position_sigma) + logL_image_pos = self.image_position_likelihood( + kwargs_ps=kwargs_ps, + kwargs_lens=kwargs_lens, + sigma=self._image_position_sigma, + ) logL += logL_image_pos if verbose is True: - print('image position likelihood %s' % logL_image_pos) + print("image position likelihood %s" % logL_image_pos) return logL def check_additional_images(self, kwargs_ps, kwargs_lens): - """ - checks whether additional images have been found and placed in kwargs_ps of the first point source model + """Checks whether additional images have been found and placed in kwargs_ps of + the first point source model. :param kwargs_ps: point source kwargs :param kwargs_lens: lens model keyword arguments - :return: bool, True if more image positions are found than originally been assigned + :return: bool, True if more image positions are found than originally been + assigned """ - ra_image_list, dec_image_list = self._pointSource.image_position(kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, - additional_images=True) + ra_image_list, dec_image_list = self._pointSource.image_position( + kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, additional_images=True + ) for i in range(len(ra_image_list)): - if 'ra_image' in kwargs_ps[i]: - if len(ra_image_list[i]) > len(kwargs_ps[i]['ra_image']): + if "ra_image" in kwargs_ps[i]: + if len(ra_image_list[i]) > len(kwargs_ps[i]["ra_image"]): return True return False @staticmethod def astrometric_likelihood(kwargs_ps, kwargs_special, sigma): - """ - evaluates the astrometric uncertainty of the model plotted point sources (only available for 'LENSED_POSITION' - point source model) and predicted image position by the lens model including an astrometric correction term. + """Evaluates the astrometric uncertainty of the model plotted point sources + (only available for 'LENSED_POSITION' point source model) and predicted image + position by the lens model including an astrometric correction term. :param kwargs_ps: point source model kwargs list - :param kwargs_special: kwargs list, should include the astrometric corrections 'delta_x', 'delta_y' + :param kwargs_special: kwargs list, should include the astrometric corrections + 'delta_x', 'delta_y' :param sigma: 1-sigma Gaussian uncertainty in the astrometry - :return: log likelihood of the astrometirc correction between predicted image positions and model placement of - the point sources + :return: log likelihood of the astrometirc correction between predicted image + positions and model placement of the point sources """ if not len(kwargs_ps) > 0: return 0 - if 'ra_image' not in kwargs_ps[0]: + if "ra_image" not in kwargs_ps[0]: return 0 - if 'delta_x_image' in kwargs_special: - delta_x, delta_y = np.array(kwargs_special['delta_x_image']), np.array(kwargs_special['delta_y_image']) - dist = (delta_x ** 2 + delta_y ** 2) / sigma ** 2 / 2 + if "delta_x_image" in kwargs_special: + delta_x, delta_y = np.array(kwargs_special["delta_x_image"]), np.array( + kwargs_special["delta_y_image"] + ) + dist = (delta_x**2 + delta_y**2) / sigma**2 / 2 logL = -np.sum(dist) if np.isnan(logL) is True: - return -10**15 + return -(10**15) return logL else: return 0 def image_position_likelihood(self, kwargs_ps, kwargs_lens, sigma): - """ - computes the likelihood of the model predicted image position relative to measured image positions with an - astrometric error. - This routine requires the 'ra_image_list' and 'dec_image_list' being declared in the initiation of the class + """Computes the likelihood of the model predicted image position relative to + measured image positions with an astrometric error. This routine requires the + 'ra_image_list' and 'dec_image_list' being declared in the initiation of the + class. :param kwargs_ps: point source keyword argument list :param kwargs_lens: lens model keyword argument list :param sigma: 1-sigma uncertainty in the measured position of the images - :return: log likelihood of the model predicted image positions given the data/measured image positions. + :return: log likelihood of the model predicted image positions given the + data/measured image positions. """ - ra_image_list, dec_image_list = self._pointSource.image_position(kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, - original_position=True) + ra_image_list, dec_image_list = self._pointSource.image_position( + kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, original_position=True + ) logL = 0 - for i in range(len(ra_image_list)): # sum over the images of the different model components + for i in range( + len(ra_image_list) + ): # sum over the images of the different model components len_i = min(len(self._ra_image_list[i]), len(ra_image_list[i])) - logL += -np.sum(((ra_image_list[i][:len_i] - self._ra_image_list[i][:len_i])**2 + - (dec_image_list[i][:len_i] - self._dec_image_list[i][:len_i])**2) / sigma**2 / 2) + logL += -np.sum( + ( + (ra_image_list[i][:len_i] - self._ra_image_list[i][:len_i]) ** 2 + + (dec_image_list[i][:len_i] - self._dec_image_list[i][:len_i]) ** 2 + ) + / sigma**2 + / 2 + ) return logL - def source_position_likelihood(self, kwargs_lens, kwargs_ps, sigma, hard_bound_rms=None, verbose=False): - """ - computes a likelihood/punishing factor of how well the source positions of multiple images match given the image - position and a lens model. - The likelihood level is computed in respect of a displacement in the image plane and transposed through the - Hessian into the source plane. + def source_position_likelihood( + self, kwargs_lens, kwargs_ps, sigma, hard_bound_rms=None, verbose=False + ): + """Computes a likelihood/punishing factor of how well the source positions of + multiple images match given the image position and a lens model. The likelihood + level is computed in respect of a displacement in the image plane and transposed + through the Hessian into the source plane. :param kwargs_lens: lens model keyword argument list :param kwargs_ps: point source keyword argument list :param sigma: 1-sigma Gaussian uncertainty in the image plane - :param hard_bound_rms: hard bound deviation between the mapping of the images back to the source plane - (in source frame) + :param hard_bound_rms: hard bound deviation between the mapping of the images + back to the source plane (in source frame) :param verbose: bool, if True provides print statements with useful information. - :return: log likelihood of the model reproducing the correct image positions given an image position uncertainty + :return: log likelihood of the model reproducing the correct image positions + given an image position uncertainty """ if len(kwargs_ps) < 1: return 0 logL = 0 source_x, source_y = self._pointSource.source_position(kwargs_ps, kwargs_lens) for k in range(len(kwargs_ps)): - if 'ra_image' in kwargs_ps[k] and self._pointSource.point_source_type_list[k] == 'LENSED_POSITION': - - x_image = kwargs_ps[k]['ra_image'] - y_image = kwargs_ps[k]['dec_image'] + if ( + "ra_image" in kwargs_ps[k] + and self._pointSource.point_source_type_list[k] == "LENSED_POSITION" + ): + x_image = kwargs_ps[k]["ra_image"] + y_image = kwargs_ps[k]["dec_image"] # calculating the individual source positions from the image positions # TODO: have option for ray-shooting back to specific redshift in multi-plane lensing k_list = self._pointSource.k_list(k) @@ -204,22 +251,31 @@ def source_position_likelihood(self, kwargs_lens, kwargs_ps, sigma, hard_bound_r k_lens = k_list[i] else: k_lens = None - x_source_i, y_source_i = self._lensModel.ray_shooting(x_image[i], y_image[i], kwargs_lens, k=k_lens) - f_xx, f_xy, f_yx, f_yy = self._lensModel.hessian(x_image[i], y_image[i], kwargs_lens, k=k_lens) + x_source_i, y_source_i = self._lensModel.ray_shooting( + x_image[i], y_image[i], kwargs_lens, k=k_lens + ) + f_xx, f_xy, f_yx, f_yy = self._lensModel.hessian( + x_image[i], y_image[i], kwargs_lens, k=k_lens + ) A = np.array([[1 - f_xx, -f_xy], [-f_yx, 1 - f_yy]]) - Sigma_theta = np.array([[1, 0], [0, 1]]) * sigma ** 2 + Sigma_theta = np.array([[1, 0], [0, 1]]) * sigma**2 Sigma_beta = image2source_covariance(A, Sigma_theta) - delta = np.array([source_x[k] - x_source_i, source_y[k] - y_source_i]) + delta = np.array( + [source_x[k] - x_source_i, source_y[k] - y_source_i] + ) if hard_bound_rms is not None: - if delta[0]**2 + delta[1]**2 > hard_bound_rms**2: + if delta[0] ** 2 + delta[1] ** 2 > hard_bound_rms**2: if verbose is True: - print('Image positions do not match to the same source position to the required ' - 'precision. Achieved: %s, Required: %s.' % (delta, hard_bound_rms)) - logL -= 10 ** 3 + print( + "Image positions do not match to the same source position to the required " + "precision. Achieved: %s, Required: %s." + % (delta, hard_bound_rms) + ) + logL -= 10**3 try: Sigma_inv = inv(Sigma_beta) except: - return -10**15 + return -(10**15) chi2 = delta.T.dot(Sigma_inv.dot(delta)) logL -= chi2 / 2 return logL @@ -232,7 +288,9 @@ def num_data(self): """ num = 0 if self._image_position_likelihood is True: - for i in range(len(self._ra_image_list)): # sum over the images of the different model components + for i in range( + len(self._ra_image_list) + ): # sum over the images of the different model components num += len(self._ra_image_list[i]) * 2 return num diff --git a/lenstronomy/Sampling/Likelihoods/prior_likelihood.py b/lenstronomy/Sampling/Likelihoods/prior_likelihood.py index b1383f90f..59f410e75 100644 --- a/lenstronomy/Sampling/Likelihoods/prior_likelihood.py +++ b/lenstronomy/Sampling/Likelihoods/prior_likelihood.py @@ -1,23 +1,33 @@ import numpy as np from lenstronomy.Util.prob_density import KDE1D -__all__ = ['PriorLikelihood'] +__all__ = ["PriorLikelihood"] class PriorLikelihood(object): - """ - class containing additional Gaussian priors to be folded into the likelihood - - """ - def __init__(self, prior_lens=None, prior_source=None, prior_lens_light=None, prior_ps=None, prior_special=None, - prior_extinction=None, prior_lens_kde=None, prior_source_kde=None, prior_lens_light_kde=None, - prior_ps_kde=None, - prior_special_kde=None, prior_extinction_kde=None, - prior_lens_lognormal=None, prior_source_lognormal=None, - prior_lens_light_lognormal=None, - prior_ps_lognormal=None, prior_special_lognormal=None, - prior_extinction_lognormal=None, - ): + """Class containing additional Gaussian priors to be folded into the likelihood.""" + + def __init__( + self, + prior_lens=None, + prior_source=None, + prior_lens_light=None, + prior_ps=None, + prior_special=None, + prior_extinction=None, + prior_lens_kde=None, + prior_source_kde=None, + prior_lens_light_kde=None, + prior_ps_kde=None, + prior_special_kde=None, + prior_extinction_kde=None, + prior_lens_lognormal=None, + prior_source_lognormal=None, + prior_lens_light_lognormal=None, + prior_ps_lognormal=None, + prior_special_lognormal=None, + prior_extinction_lognormal=None, + ): """ :param prior_lens: list of [index_model, param_name, mean, 1-sigma priors] @@ -43,19 +53,42 @@ def __init__(self, prior_lens=None, prior_source=None, prior_lens_light=None, pr """ - self._prior_lens, self._prior_source, self._prior_lens_light, self._prior_ps, self._prior_special, self._prior_extinction = \ - prior_lens, prior_source, prior_lens_light, prior_ps, prior_special, prior_extinction - self._prior_lens_kde, self._prior_source_kde, self._prior_lens_light_kde, self._prior_ps_kde = prior_lens_kde, \ - prior_source_kde, \ - prior_lens_light_kde, \ - prior_ps_kde - self._prior_lens_lognormal, self._prior_source_lognormal, \ - self._prior_lens_light_lognormal, \ - self._prior_ps_lognormal, self._prior_special_lognormal, \ - self._prior_extinction_lognormal = \ - prior_lens_lognormal, prior_source_lognormal, \ - prior_lens_light_lognormal, prior_ps_lognormal, \ - prior_special_lognormal, prior_extinction_lognormal + ( + self._prior_lens, + self._prior_source, + self._prior_lens_light, + self._prior_ps, + self._prior_special, + self._prior_extinction, + ) = ( + prior_lens, + prior_source, + prior_lens_light, + prior_ps, + prior_special, + prior_extinction, + ) + ( + self._prior_lens_kde, + self._prior_source_kde, + self._prior_lens_light_kde, + self._prior_ps_kde, + ) = (prior_lens_kde, prior_source_kde, prior_lens_light_kde, prior_ps_kde) + ( + self._prior_lens_lognormal, + self._prior_source_lognormal, + self._prior_lens_light_lognormal, + self._prior_ps_lognormal, + self._prior_special_lognormal, + self._prior_extinction_lognormal, + ) = ( + prior_lens_lognormal, + prior_source_lognormal, + prior_lens_light_lognormal, + prior_ps_lognormal, + prior_special_lognormal, + prior_extinction_lognormal, + ) self._kde_lens_list = self._init_kde(prior_lens_kde) self._kde_source_list = self._init_kde(prior_source_kde) @@ -78,8 +111,15 @@ def _init_kde(prior_list_kde): kde_list.append(KDE1D(values=samples)) return kde_list - def logL(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, kwargs_special=None, - kwargs_extinction=None): + def logL( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): """ :param kwargs_lens: lens model parameter list @@ -93,19 +133,32 @@ def logL(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwa logL += self._prior_kwargs(kwargs_special, self._prior_special) logL += self._prior_kwargs_list(kwargs_extinction, self._prior_extinction) - logL += self._prior_lognormal_kwargs_list(kwargs_lens, - self._prior_lens_lognormal) - logL += self._prior_lognormal_kwargs_list(kwargs_source, self._prior_source_lognormal) - logL += self._prior_lognormal_kwargs_list(kwargs_lens_light, - self._prior_lens_light_lognormal) + logL += self._prior_lognormal_kwargs_list( + kwargs_lens, self._prior_lens_lognormal + ) + logL += self._prior_lognormal_kwargs_list( + kwargs_source, self._prior_source_lognormal + ) + logL += self._prior_lognormal_kwargs_list( + kwargs_lens_light, self._prior_lens_light_lognormal + ) logL += self._prior_lognormal_kwargs_list(kwargs_ps, self._prior_ps_lognormal) - logL += self._prior_lognormal_kwargs(kwargs_special, self._prior_special_lognormal) - logL += self._prior_lognormal_kwargs_list(kwargs_extinction, - self._prior_extinction_lognormal) - - logL += self._prior_kde_list(kwargs_lens, self._prior_lens_kde, self._kde_lens_list) - logL += self._prior_kde_list(kwargs_source, self._prior_source_kde, self._kde_source_list) - logL += self._prior_kde_list(kwargs_lens_light, self._prior_lens_light_kde, self._kde_lens_light_list) + logL += self._prior_lognormal_kwargs( + kwargs_special, self._prior_special_lognormal + ) + logL += self._prior_lognormal_kwargs_list( + kwargs_extinction, self._prior_extinction_lognormal + ) + + logL += self._prior_kde_list( + kwargs_lens, self._prior_lens_kde, self._kde_lens_list + ) + logL += self._prior_kde_list( + kwargs_source, self._prior_source_kde, self._kde_source_list + ) + logL += self._prior_kde_list( + kwargs_lens_light, self._prior_lens_light_kde, self._kde_lens_light_list + ) logL += self._prior_kde_list(kwargs_ps, self._prior_ps_kde, self._kde_ps_list) return logL @@ -141,14 +194,13 @@ def _prior_kwargs_list(kwargs_list, prior_list): for i in range(len(prior_list)): index, param_name, value, sigma = prior_list[i] model_value = kwargs_list[index][param_name] - dist = (model_value - value) ** 2 / sigma ** 2 / 2 + dist = (model_value - value) ** 2 / sigma**2 / 2 logL -= np.sum(dist) return logL @staticmethod def _prior_kwargs(kwargs, prior_list): - """ - prior computation for a keyword argument (not list thereof) + """Prior computation for a keyword argument (not list thereof) :param kwargs: keyword argument :return: logL @@ -159,7 +211,7 @@ def _prior_kwargs(kwargs, prior_list): for i in range(len(prior_list)): param_name, value, sigma = prior_list[i] model_value = kwargs[param_name] - dist = (model_value - value) ** 2 / sigma ** 2 / 2 + dist = (model_value - value) ** 2 / sigma**2 / 2 logL -= np.sum(dist) return logL @@ -177,14 +229,13 @@ def _prior_lognormal_kwargs_list(kwargs_list, prior_list): for i in range(len(prior_list)): index, param_name, value, sigma = prior_list[i] model_value = kwargs_list[index][param_name] - dist = (np.log(model_value) - value) ** 2 / sigma ** 2 / 2 + model_value + dist = (np.log(model_value) - value) ** 2 / sigma**2 / 2 + model_value logL -= np.sum(dist) return logL @staticmethod def _prior_lognormal_kwargs(kwargs, prior_list): - """ - prior computation for a keyword argument (not list thereof) + """Prior computation for a keyword argument (not list thereof) :param kwargs: keyword argument :return: logL @@ -195,6 +246,6 @@ def _prior_lognormal_kwargs(kwargs, prior_list): for i in range(len(prior_list)): param_name, value, sigma = prior_list[i] model_value = kwargs[param_name] - dist = (np.log(model_value) - value) ** 2 / sigma ** 2 / 2 + model_value + dist = (np.log(model_value) - value) ** 2 / sigma**2 / 2 + model_value logL -= np.sum(dist) return logL diff --git a/lenstronomy/Sampling/Likelihoods/time_delay_likelihood.py b/lenstronomy/Sampling/Likelihoods/time_delay_likelihood.py index 598e7475a..7cc8bc7cc 100644 --- a/lenstronomy/Sampling/Likelihoods/time_delay_likelihood.py +++ b/lenstronomy/Sampling/Likelihoods/time_delay_likelihood.py @@ -1,14 +1,19 @@ import numpy as np import lenstronomy.Util.constants as const -__all__ = ['TimeDelayLikelihood'] +__all__ = ["TimeDelayLikelihood"] class TimeDelayLikelihood(object): - """ - class to compute the likelihood of a model given a measurement of time delays - """ - def __init__(self, time_delays_measured, time_delays_uncertainties, lens_model_class, point_source_class): + """Class to compute the likelihood of a model given a measurement of time delays.""" + + def __init__( + self, + time_delays_measured, + time_delays_uncertainties, + lens_model_class, + point_source_class, + ): """ :param time_delays_measured: relative time delays (in days) in respect to the first image of the point source @@ -20,51 +25,61 @@ def __init__(self, time_delays_measured, time_delays_uncertainties, lens_model_c """ if time_delays_measured is None: - raise ValueError("time_delay_measured need to be specified to evaluate the time-delay likelihood.") + raise ValueError( + "time_delay_measured need to be specified to evaluate the time-delay likelihood." + ) if time_delays_uncertainties is None: - raise ValueError("time_delay_uncertainties need to be specified to evaluate the time-delay likelihood.") + raise ValueError( + "time_delay_uncertainties need to be specified to evaluate the time-delay likelihood." + ) self._delays_measured = np.array(time_delays_measured) self._delays_errors = np.array(time_delays_uncertainties) self._lensModel = lens_model_class self._pointSource = point_source_class def logL(self, kwargs_lens, kwargs_ps, kwargs_cosmo): - """ - routine to compute the log likelihood of the time delay distance - :param kwargs_lens: lens model kwargs list - :param kwargs_ps: point source kwargs list - :param kwargs_cosmo: cosmology and other kwargs - :return: log likelihood of the model given the time delay data - """ - x_pos, y_pos = self._pointSource.image_position(kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, original_position=True) + """Routine to compute the log likelihood of the time delay distance :param + kwargs_lens: lens model kwargs list :param kwargs_ps: point source kwargs list + :param kwargs_cosmo: cosmology and other kwargs :return: log likelihood of the + model given the time delay data.""" + x_pos, y_pos = self._pointSource.image_position( + kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens, original_position=True + ) x_pos, y_pos = x_pos[0], y_pos[0] delay_arcsec = self._lensModel.fermat_potential(x_pos, y_pos, kwargs_lens) - D_dt_model = kwargs_cosmo['D_dt'] + D_dt_model = kwargs_cosmo["D_dt"] delay_days = const.delay_arcsec2days(delay_arcsec, D_dt_model) logL = self._logL_delays(delay_days, self._delays_measured, self._delays_errors) return logL @staticmethod def _logL_delays(delays_model, delays_measured, delays_errors): - """ - log likelihood of modeled delays vs measured time delays under considerations of errors + """Log likelihood of modeled delays vs measured time delays under considerations + of errors. :param delays_model: n delays of the model (not relative delays) - :param delays_measured: relative delays (1-2,1-3,1-4) relative to the first in the list + :param delays_measured: relative delays (1-2,1-3,1-4) relative to the first in + the list :param delays_errors: gaussian errors on the measured delays :return: log likelihood of data given model """ - if len(delays_model)-1 != len(delays_measured): - return -10**15 + if len(delays_model) - 1 != len(delays_measured): + return -(10**15) delta_t_model = np.array(delays_model[1:]) - delays_model[0] if delays_errors.ndim <= 1: - logL = np.sum(-(delta_t_model - delays_measured) ** 2 / (2 * delays_errors ** 2)) + logL = np.sum( + -((delta_t_model - delays_measured) ** 2) / (2 * delays_errors**2) + ) elif delays_errors.ndim == 2: D = delta_t_model - delays_measured - logL = -1/2 * D @ np.linalg.inv(delays_errors) @ D # TODO: only calculate the inverse once + logL = ( + -1 / 2 * D @ np.linalg.inv(delays_errors) @ D + ) # TODO: only calculate the inverse once else: - raise ValueError('Dimension of time delay error needs to be either one- or two-dimensional, not %s' - % delays_errors.ndim) + raise ValueError( + "Dimension of time delay error needs to be either one- or two-dimensional, not %s" + % delays_errors.ndim + ) return logL @property diff --git a/lenstronomy/Sampling/Pool/multiprocessing.py b/lenstronomy/Sampling/Pool/multiprocessing.py index e28d0f8d4..e1a9844d9 100644 --- a/lenstronomy/Sampling/Pool/multiprocessing.py +++ b/lenstronomy/Sampling/Pool/multiprocessing.py @@ -15,12 +15,13 @@ import multiprocess from multiprocess.pool import Pool -__all__ = ['MultiPool'] +__all__ = ["MultiPool"] def _initializer_wrapper(actual_initializer, *rest): - """ - We ignore SIGINT. It's up to our parent to kill us in the typical condition of this arising from ``^C`` on a + """We ignore SIGINT. + + It's up to our parent to kill us in the typical condition of this arising from ``^C`` on a terminal. If someone is manually killing us with that signal, well... nothing will happen. """ signal.signal(signal.SIGINT, signal.SIG_IGN) @@ -29,7 +30,6 @@ def _initializer_wrapper(actual_initializer, *rest): class CallbackWrapper(object): - def __init__(self, callback): self.callback = callback @@ -39,11 +39,12 @@ def __call__(self, tasks): class MultiPool(Pool): - """ - A modified version of :class:`multiprocessing.pool.Pool` that has better - behavior with regard to ``KeyboardInterrupts`` in the :func:`map` method. + """A modified version of :class:`multiprocessing.pool.Pool` that has better behavior + with regard to ``KeyboardInterrupts`` in the :func:`map` method. + (Original author: `Peter K. G. Williams `_) """ + wait_timeout = 3600 def __init__(self, processes=None, initializer=None, initargs=(), **kwargs): @@ -58,8 +59,7 @@ def __init__(self, processes=None, initializer=None, initargs=(), **kwargs): :param kwargs: Extra arguments passed to the :class:`multiprocessing.pool.Pool` superclass. """ new_initializer = functools.partial(_initializer_wrapper, initializer) - super(MultiPool, self).__init__(processes, new_initializer, - initargs, **kwargs) + super(MultiPool, self).__init__(processes, new_initializer, initargs, **kwargs) self.size = self._processes self.rank = 0 @@ -74,10 +74,8 @@ def enabled(): return True def map(self, func, iterable, chunksize=None, callback=None): - """ - Equivalent to the built-in ``map()`` function and - :meth:`multiprocessing.pool.Pool.map()`, without catching - ``KeyboardInterrupt``. + """Equivalent to the built-in ``map()`` function and + :meth:`multiprocessing.pool.Pool.map()`, without catching ``KeyboardInterrupt``. :param func: A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable @@ -104,7 +102,9 @@ def map(self, func, iterable, chunksize=None, callback=None): # The key magic is that we must call r.get() with a timeout, because # a Condition.wait() without a timeout swallows KeyboardInterrupts. - r = self.map_async(func, iterable, chunksize=chunksize, callback=callbackwrapper) + r = self.map_async( + func, iterable, chunksize=chunksize, callback=callbackwrapper + ) while True: try: diff --git a/lenstronomy/Sampling/Pool/pool.py b/lenstronomy/Sampling/Pool/pool.py index 59256ed7b..4a059727d 100644 --- a/lenstronomy/Sampling/Pool/pool.py +++ b/lenstronomy/Sampling/Pool/pool.py @@ -29,18 +29,18 @@ # Standard library import sys import logging + log = logging.getLogger(__name__) _VERBOSE = 5 # from schwimmbad.multiprocessing import MultiPool # from schwimmbad.jl import JoblibPool -__all__ = ['choose_pool'] +__all__ = ["choose_pool"] def choose_pool(mpi=False, processes=1, **kwargs): - """ - Extends the capabilities of the schwimmbad.choose_pool method. + """Extends the capabilities of the schwimmbad.choose_pool method. It handles the `use_dill` parameters in kwargs, that would otherwise raise an error when processes > 1. Any thread in the returned multiprocessing pool (e.g. processes > 1) also default @@ -73,10 +73,12 @@ def choose_pool(mpi=False, processes=1, **kwargs): try: pool = MPIPool(**kwargs) except: - raise ImportError('MPIPool of schwimmbad can not be generated. lenstronomy uses a specific branch of ' - 'schwimmbad specified in the requirements.txt. Make sure you are using the correct ' - 'version of schwimmbad. In particular the "use_dill" argument is not supported in the ' - 'pypi version 0.3.0.') + raise ImportError( + "MPIPool of schwimmbad can not be generated. lenstronomy uses a specific branch of " + "schwimmbad specified in the requirements.txt. Make sure you are using the correct " + 'version of schwimmbad. In particular the "use_dill" argument is not supported in the ' + "pypi version 0.3.0." + ) if not pool.is_master(): pool.wait() sys.exit(0) @@ -85,9 +87,9 @@ def choose_pool(mpi=False, processes=1, **kwargs): return pool elif processes != 1 and MultiPool.enabled(): - if 'use_dill' in kwargs: + if "use_dill" in kwargs: # schwimmbad MultiPool does not support dill so we remove this option from the kwargs - _ = kwargs.pop('use_dill') + _ = kwargs.pop("use_dill") log.info("Running with MultiPool on {0} cores".format(processes)) return MultiPool(processes=processes, **kwargs) diff --git a/lenstronomy/Sampling/Samplers/base_nested_sampler.py b/lenstronomy/Sampling/Samplers/base_nested_sampler.py index 2e7f57a0f..7d18df2e7 100644 --- a/lenstronomy/Sampling/Samplers/base_nested_sampler.py +++ b/lenstronomy/Sampling/Samplers/base_nested_sampler.py @@ -1,18 +1,23 @@ -__author__ = 'aymgal, johannesulf' +__author__ = "aymgal, johannesulf" import lenstronomy.Util.sampling_util as utils import numpy as np -__all__ = ['NestedSampler'] +__all__ = ["NestedSampler"] class NestedSampler(object): - """ - Base class for nested samplers - """ - - def __init__(self, likelihood_module, prior_type, - prior_means, prior_sigmas, width_scale, sigma_scale): + """Base class for nested samplers.""" + + def __init__( + self, + likelihood_module, + prior_type, + prior_means, + prior_sigmas, + width_scale, + sigma_scale, + ): """ :param likelihood_module: likelihood_module like in likelihood.py (should be callable) :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube @@ -26,26 +31,24 @@ def __init__(self, likelihood_module, prior_type, lowers, uppers = self._ll.param_limits if width_scale < 1: - self.lowers, self.uppers = utils.scale_limits( - lowers, uppers, width_scale) + self.lowers, self.uppers = utils.scale_limits(lowers, uppers, width_scale) else: self.lowers, self.uppers = lowers, uppers - if prior_type == 'gaussian': + if prior_type == "gaussian": if prior_means is None or prior_sigmas is None: raise ValueError( - "For gaussian prior type, means and sigmas are required") + "For gaussian prior type, means and sigmas are required" + ) self.means, self.sigmas = prior_means, prior_sigmas * sigma_scale self.lowers, self.uppers = lowers, uppers - elif prior_type != 'uniform': - raise ValueError( - "Sampling type {} not supported".format(prior_type)) + elif prior_type != "uniform": + raise ValueError("Sampling type {} not supported".format(prior_type)) self.prior_type = prior_type self._has_warned = False def prior(self, u, *args): - """ - compute the mapping between the unit cube and parameter cube + """Compute the mapping between the unit cube and parameter cube. :param u: unit hypercube, sampled by the algorithm :return: hypercube in parameter space @@ -56,16 +59,24 @@ def prior(self, u, *args): u_orig = u u = np.array([u[i] for i in range(self.n_dims)]) - if self.prior_type == 'gaussian': - p = utils.cube2args_gaussian(u, self.lowers, self.uppers, - self.means, self.sigmas, self.n_dims, - copy=True) - elif self.prior_type == 'uniform': - p = utils.cube2args_uniform(u, self.lowers, self.uppers, - self.n_dims, copy=True) + if self.prior_type == "gaussian": + p = utils.cube2args_gaussian( + u, + self.lowers, + self.uppers, + self.means, + self.sigmas, + self.n_dims, + copy=True, + ) + elif self.prior_type == "uniform": + p = utils.cube2args_uniform( + u, self.lowers, self.uppers, self.n_dims, copy=True + ) else: raise ValueError( - 'prior type %s not supported! Chose "gaussian" or "uniform".') + 'prior type %s not supported! Chose "gaussian" or "uniform".' + ) # MultiNest expects that we modify the origal array instead of # returning the transformed parameters. @@ -79,8 +90,7 @@ def prior(self, u, *args): return p def log_likelihood(self, p, *args): - """ - compute the log-likelihood given list of parameters + """Compute the log-likelihood given list of parameters. :param x: parameter values :return: log-likelihood (from the likelihood module) @@ -98,5 +108,5 @@ def log_likelihood(self, p, *args): return float(log_l) def run(self, kwargs_run): - """run the nested sampling algorithm""" + """Run the nested sampling algorithm.""" raise NotImplementedError("Method not be implemented in base class") diff --git a/lenstronomy/Sampling/Samplers/cobaya_sampler.py b/lenstronomy/Sampling/Samplers/cobaya_sampler.py index c089cd8df..90933aeb9 100644 --- a/lenstronomy/Sampling/Samplers/cobaya_sampler.py +++ b/lenstronomy/Sampling/Samplers/cobaya_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'nataliehogg' +__author__ = "nataliehogg" # man with one sampling method always knows his posterior distribution; man with two never certain. @@ -7,31 +7,37 @@ class CobayaSampler(object): - def __init__(self, likelihood_module, mean_start, sigma_start): - """ - Wrapper for pure Metropolis--Hastings MCMC sampling with Cobaya. + """Wrapper for pure Metropolis--Hastings MCMC sampling with Cobaya. If you use this sampler, you must cite the following works: - Lewis & Bridle, https://arxiv.org/abs/astro-ph/0205436 + Lewis & Bridle, + https://arxiv.org/abs/astro-ph/0205436 Lewis, https://arxiv.org/abs/1304.4473 - Torrado & Lewis, https://arxiv.org/abs/2005.05290 and https://ascl.net/1910.019 + Torrado & Lewis, + https://arxiv.org/abs/2005.05290 + and https://ascl.net/1910.019 - For more information about Cobaya, see https://cobaya.readthedocs.io/en/latest/index.html + For more information about Cobaya, see + https://cobaya.readthedocs.io/en/latest/index.html :param likelihood_module: LikelihoodModule() instance - :param mean_start: initial point for parameters are drawn from Gaussians with these means - :param sigma_start: initial point for parameters are drawn from Gaussians with these standard deviations - + :param mean_start: initial point for parameters are drawn from Gaussians with + these means + :param sigma_start: initial point for parameters are drawn from Gaussians with + these standard deviations """ # get the logL and parameter info from LikelihoodModule self._likelihood_module = likelihood_module self._num_params, self._param_names = self._likelihood_module.param.num_param() - self._lower_limit, self._upper_limit = self._likelihood_module.param.param_limits() + ( + self._lower_limit, + self._upper_limit, + ) = self._likelihood_module.param.param_limits() self._mean_start = mean_start self._sigma_start = sigma_start @@ -54,119 +60,166 @@ def run(self, **kwargs): # add the priors to the sampled_params # currently a uniform prior is hardcoded for all params # cobaya allows any 1D continuous dist in scipy.stats; thinking how to implement this here - sampled_params = {k: {'prior': {'dist': 'uniform', 'min': self._lower_limit[i], 'max': self._upper_limit[i]}} for k, i in zip(sampled_params, range(len(sampled_params)))} + sampled_params = { + k: { + "prior": { + "dist": "uniform", + "min": self._lower_limit[i], + "max": self._upper_limit[i], + } + } + for k, i in zip(sampled_params, range(len(sampled_params))) + } # add reference values to start chain close to expected best fit # this hardcodes a Gaussian and uses the sigma_kwargs passed by the user # again cobaya allows any 1D continous distribution; thinking how to implement this # tricky with current info internal in lenstronomy - [sampled_params[k].update({'ref': {'dist': 'norm', 'loc': self._mean_start[i], 'scale': self._sigma_start[i]}}) for k, i in zip(sampled_params.keys(), range(len(sampled_params)))] + [ + sampled_params[k].update( + { + "ref": { + "dist": "norm", + "loc": self._mean_start[i], + "scale": self._sigma_start[i], + } + } + ) + for k, i in zip(sampled_params.keys(), range(len(sampled_params))) + ] # add proposal widths # first check if proposal_widths has been passed - if 'proposal_widths' not in kwargs: + if "proposal_widths" not in kwargs: pass else: # check if what's been passed is dict - if isinstance(kwargs['proposal_widths'], dict): + if isinstance(kwargs["proposal_widths"], dict): # if yes, convert to list - props = list(kwargs['proposal_widths'].values()) - elif isinstance(kwargs['proposal_widths'], list): + props = list(kwargs["proposal_widths"].values()) + elif isinstance(kwargs["proposal_widths"], list): # if no and it's a list, do nothing - props = kwargs['proposal_widths'] + props = kwargs["proposal_widths"] else: # if no and not a list, raise TypeError - raise TypeError('Proposal widths must be a list of floats or a dictionary of parameters and floats.') + raise TypeError( + "Proposal widths must be a list of floats or a dictionary of parameters and floats." + ) # check the right number of values are present if len(props) != len(sampled_params.keys()): # if not, raise ValueError - raise ValueError('You must provide the same number of proposal widths as sampled parameters.') + raise ValueError( + "You must provide the same number of proposal widths as sampled parameters." + ) # update sampled_params dict with proposal widths - [sampled_params[k].update({'proposal': props[i]}) for k, i in zip(sampled_params.keys(), range(len(props)))] + [ + sampled_params[k].update({"proposal": props[i]}) + for k, i in zip(sampled_params.keys(), range(len(props))) + ] # add LaTeX labels so lenstronomy kwarg names don't break getdist plotting # first check if the labels have been passed - if 'latex' not in kwargs: + if "latex" not in kwargs: # if not, print a warning - print('No LaTeX labels provided: manually edit the updated.yaml file to avoid lenstronomy labels breaking GetDist.') + print( + "No LaTeX labels provided: manually edit the updated.yaml file to avoid lenstronomy labels breaking GetDist." + ) pass else: - latex = kwargs['latex'] + latex = kwargs["latex"] # check the right number of labels are present if len(latex) != len(sampled_params.keys()): # if not, raise ValueError - raise ValueError('You must provide the same number of labels as sampled parameters.') + raise ValueError( + "You must provide the same number of labels as sampled parameters." + ) # update sampled_params dict with labels - [sampled_params[k].update({'latex': latex[i]}) for k, i in zip(sampled_params.keys(), range(len(latex)))] + [ + sampled_params[k].update({"latex": latex[i]}) + for k, i in zip(sampled_params.keys(), range(len(latex))) + ] def likelihood_for_cobaya(**kwargs): - ''' - We define a function to return the log-likelihood; this function is passed to Cobaya. - The function must be nested within the run() function for it to work properly. + """We define a function to return the log-likelihood; this function is + passed to Cobaya. The function must be nested within the run() function for + it to work properly. :param kwargs: dictionary of keyword arguments - ''' + """ current_input_values = [kwargs[p] for p in sampled_params] logp = self._likelihood_module.likelihood(current_input_values) return logp # gather all the information to pass to cobaya, starting with the likelihood - info = {'likelihood': {'lenstronomy_likelihood': {'external': likelihood_for_cobaya, 'input_params': sampled_params}}} + info = { + "likelihood": { + "lenstronomy_likelihood": { + "external": likelihood_for_cobaya, + "input_params": sampled_params, + } + } + } # for the above, can we do an args2kwargs for the 'output_params' key?? might bypass plotting issue # parameter info - info['params'] = sampled_params + info["params"] = sampled_params # get all the kwargs for the mcmc sampler in cobaya # if not present, passes a default value (most taken from cobaya docs) # note: parameter blocking and drag kwargs not provided because speed hierarchy not possible in strong lensing likelihoods - mcmc_kwargs = {'burn_in': kwargs.get('burn_in', 0), - 'max_tries': kwargs.get('max_tries', 100*self._num_params), - 'covmat': kwargs.get('covmat', None), - 'proposal_scale': kwargs.get('proposal_scale', 1), - 'output_every': kwargs.get('output_every', 500), - 'learn_every': kwargs.get('learn_every', 40*self._num_params), - 'learn_proposal': kwargs.get('learn_proposal', True), - 'learn_proposal_Rminus1_max': kwargs.get('learn_proposal_Rminus1_max', 2), - 'learn_proposal_Rminus1_max_early': kwargs.get('learn_proposal_Rminus1_max_early', 30), - 'learn_proposal_Rminus1_min': kwargs.get('learn_proposal_Rminus1_min', 0), - 'max_samples': kwargs.get('max_samples', np.inf), - 'Rminus1_stop': kwargs.get('Rminus1_stop', 0.01), - 'Rminus1_cl_stop': kwargs.get('Rminus1_cl_stop', 0.2), - 'Rminus1_cl_level': kwargs.get('Rminus1_cl_level', 0.95), - 'Rminus1_single_split': kwargs.get('Rminus1_single_split', 4), - 'measure_speeds': kwargs.get('measure_speeds', True), - 'oversample_power': kwargs.get('oversample_power', 0.4), - 'oversample_thin': kwargs.get('oversample_thin', True)} - - if 'drag' in kwargs: - raise ValueError('Parameter dragging not possible in a strong lensing likelihood.') + mcmc_kwargs = { + "burn_in": kwargs.get("burn_in", 0), + "max_tries": kwargs.get("max_tries", 100 * self._num_params), + "covmat": kwargs.get("covmat", None), + "proposal_scale": kwargs.get("proposal_scale", 1), + "output_every": kwargs.get("output_every", 500), + "learn_every": kwargs.get("learn_every", 40 * self._num_params), + "learn_proposal": kwargs.get("learn_proposal", True), + "learn_proposal_Rminus1_max": kwargs.get("learn_proposal_Rminus1_max", 2), + "learn_proposal_Rminus1_max_early": kwargs.get( + "learn_proposal_Rminus1_max_early", 30 + ), + "learn_proposal_Rminus1_min": kwargs.get("learn_proposal_Rminus1_min", 0), + "max_samples": kwargs.get("max_samples", np.inf), + "Rminus1_stop": kwargs.get("Rminus1_stop", 0.01), + "Rminus1_cl_stop": kwargs.get("Rminus1_cl_stop", 0.2), + "Rminus1_cl_level": kwargs.get("Rminus1_cl_level", 0.95), + "Rminus1_single_split": kwargs.get("Rminus1_single_split", 4), + "measure_speeds": kwargs.get("measure_speeds", True), + "oversample_power": kwargs.get("oversample_power", 0.4), + "oversample_thin": kwargs.get("oversample_thin", True), + } + + if "drag" in kwargs: + raise ValueError( + "Parameter dragging not possible in a strong lensing likelihood." + ) # select mcmc as the sampler and pass the relevant kwargs - info['sampler'] = {'mcmc': mcmc_kwargs} + info["sampler"] = {"mcmc": mcmc_kwargs} # where the chains and other files will be saved - if 'path' not in kwargs: - info['output'] = None + if "path" not in kwargs: + info["output"] = None else: - info['output'] = kwargs['path'] + info["output"] = kwargs["path"] # whether or not to overwrite previous chains with the same name (bool) - if 'force_overwrite' not in kwargs: - info['force'] = True + if "force_overwrite" not in kwargs: + info["force"] = True else: - info['force'] = kwargs['force_overwrite'] + info["force"] = kwargs["force_overwrite"] # check for mpi - if 'mpi' not in kwargs: - kwargs['mpi'] = False + if "mpi" not in kwargs: + kwargs["mpi"] = False # run the sampler # we wrap the call to crun to make sure any MPI exceptions are caught properly # this ensures the entire run will be terminated if any individual process breaks - if kwargs['mpi'] == True: + if kwargs["mpi"] == True: from mpi4py import MPI from cobaya.log import LoggedError @@ -181,9 +234,9 @@ def likelihood_for_cobaya(**kwargs): pass success = all(comm.allgather(success)) if not success and rank == 0: - print('Sampling failed!') + print("Sampling failed!") else: - comm = None # is this necessary? + comm = None # is this necessary? updated_info, sampler = crun(info) # get the best fit (max likelihood); returns a pandas series @@ -192,7 +245,7 @@ def likelihood_for_cobaya(**kwargs): best_fit_series = sampler.collection.bestfit() # turn that pandas series into a list (of floats) - keys = list(sampled_params) # avoiding some new pandas error... + keys = list(sampled_params) # avoiding some new pandas error... best_fit_values = best_fit_series[keys].values.tolist() return updated_info, sampler, best_fit_values diff --git a/lenstronomy/Sampling/Samplers/dynesty_sampler.py b/lenstronomy/Sampling/Samplers/dynesty_sampler.py index 0b9150d45..6c32819c6 100644 --- a/lenstronomy/Sampling/Samplers/dynesty_sampler.py +++ b/lenstronomy/Sampling/Samplers/dynesty_sampler.py @@ -1,24 +1,33 @@ -__author__ = 'aymgal, johannesulf' +__author__ = "aymgal, johannesulf" import numpy as np from lenstronomy.Sampling.Samplers.base_nested_sampler import NestedSampler import lenstronomy.Util.sampling_util as utils -__all__ = ['DynestySampler'] +__all__ = ["DynestySampler"] class DynestySampler(NestedSampler): - """ - Wrapper for dynamical nested sampling algorithm Dynesty by J. Speagle + """Wrapper for dynamical nested sampling algorithm Dynesty by J. Speagle. paper : https://arxiv.org/abs/1904.02180 doc : https://dynesty.readthedocs.io/ """ - def __init__(self, likelihood_module, prior_type='uniform', - prior_means=None, prior_sigmas=None, width_scale=1, sigma_scale=1, - bound='multi', sample='auto', use_mpi=False, use_pool=None): + def __init__( + self, + likelihood_module, + prior_type="uniform", + prior_means=None, + prior_sigmas=None, + width_scale=1, + sigma_scale=1, + bound="multi", + sample="auto", + use_mpi=False, + use_pool=None, + ): """ :param likelihood_module: likelihood_module like in likelihood.py (should be callable) :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube @@ -32,9 +41,14 @@ def __init__(self, likelihood_module, prior_type='uniform', :param use_pool: specific to Dynesty, see https://dynesty.readthedocs.io """ self._check_install() - super(DynestySampler, self).__init__(likelihood_module, prior_type, - prior_means, prior_sigmas, - width_scale, sigma_scale) + super(DynestySampler, self).__init__( + likelihood_module, + prior_type, + prior_means, + prior_sigmas, + width_scale, + sigma_scale, + ) # create the Dynesty sampler if use_mpi: @@ -48,17 +62,25 @@ def __init__(self, likelihood_module, prior_type='uniform', sys.exit(0) self._sampler = self._dynesty.DynamicNestedSampler( - loglikelihood=self.log_likelihood, prior_transform=self.prior, - ndim=self.n_dims, bound=bound, sample=sample, pool=pool, - use_pool=use_pool) + loglikelihood=self.log_likelihood, + prior_transform=self.prior, + ndim=self.n_dims, + bound=bound, + sample=sample, + pool=pool, + use_pool=use_pool, + ) else: self._sampler = self._dynesty.DynamicNestedSampler( - loglikelihood=self.log_likelihood, prior_transform=self.prior, - ndim=self.n_dims, bound=bound, sample=sample) + loglikelihood=self.log_likelihood, + prior_transform=self.prior, + ndim=self.n_dims, + bound=bound, + sample=sample, + ) def run(self, kwargs_run): - """ - run the Dynesty nested sampler + """Run the Dynesty nested sampler. see https://dynesty.readthedocs.io for content of kwargs_run @@ -78,7 +100,7 @@ def run(self, kwargs_run): # Compute weighted mean and covariance. weights = np.exp(results.logwt - log_z[-1]) # normalized weights - if np.sum(weights) != 1.: + if np.sum(weights) != 1.0: # TODO : clearly this is not optimal... # weights should by definition be normalized, but it appears that for very small # number of live points (typically in test routines), @@ -97,8 +119,10 @@ def _check_install(self): import dynesty import dynesty.utils as dyfunc except ImportError: - print("Warning : dynesty not properly installed (results might be unexpected). \ - You can get it with $pip install dynesty.") + print( + "Warning : dynesty not properly installed (results might be unexpected). \ + You can get it with $pip install dynesty." + ) self._dynesty_installed = False else: self._dynesty_installed = True diff --git a/lenstronomy/Sampling/Samplers/multinest_sampler.py b/lenstronomy/Sampling/Samplers/multinest_sampler.py index 126f6a617..e095542a2 100644 --- a/lenstronomy/Sampling/Samplers/multinest_sampler.py +++ b/lenstronomy/Sampling/Samplers/multinest_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import os import json @@ -8,20 +8,30 @@ from lenstronomy.Sampling.Samplers.base_nested_sampler import NestedSampler import lenstronomy.Util.sampling_util as utils -__all__ = ['MultiNestSampler'] +__all__ = ["MultiNestSampler"] class MultiNestSampler(NestedSampler): - """ - Wrapper for nested sampling algorithm MultInest by F. Feroz & M. Hobson + """Wrapper for nested sampling algorithm MultInest by F. + + Feroz & M. Hobson papers : arXiv:0704.3704, arXiv:0809.3437, arXiv:1306.2144 pymultinest doc : https://johannesbuchner.github.io/PyMultiNest/pymultinest.html """ - def __init__(self, likelihood_module, prior_type='uniform', - prior_means=None, prior_sigmas=None, width_scale=1, sigma_scale=1, - output_dir=None, output_basename='-', - remove_output_dir=False, use_mpi=False): + def __init__( + self, + likelihood_module, + prior_type="uniform", + prior_means=None, + prior_sigmas=None, + width_scale=1, + sigma_scale=1, + output_dir=None, + output_basename="-", + remove_output_dir=False, + use_mpi=False, + ): """ :param likelihood_module: likelihood_module like in likelihood.py (should be callable) :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube @@ -35,15 +45,20 @@ def __init__(self, likelihood_module, prior_type='uniform', :param use_mpi: flag directly passed to MultInest sampler (NOT TESTED) """ self._check_install() - super(MultiNestSampler, self).__init__(likelihood_module, prior_type, - prior_means, prior_sigmas, - width_scale, sigma_scale) + super(MultiNestSampler, self).__init__( + likelihood_module, + prior_type, + prior_means, + prior_sigmas, + width_scale, + sigma_scale, + ) # here we assume number of dimensons = number of parameters self.n_params = self.n_dims if output_dir is None: - self._output_dir = 'multinest_out_default' + self._output_dir = "multinest_out_default" else: self._output_dir = output_dir @@ -52,6 +67,7 @@ def __init__(self, likelihood_module, prior_type='uniform', if self._use_mpi: from mpi4py import MPI + self._comm = MPI.COMM_WORLD if self._comm.Get_rank() != 0: @@ -68,16 +84,16 @@ def __init__(self, likelihood_module, prior_type='uniform', # required for analysis : save parameter names in json file if self._is_master: - with open(self.files_basename + 'params.json', 'w') as file: + with open(self.files_basename + "params.json", "w") as file: json.dump(self.param_names, file, indent=2) self._rm_output = remove_output_dir def run(self, kwargs_run): - """ - run the MultiNest nested sampler + """Run the MultiNest nested sampler. - see https://johannesbuchner.github.io/PyMultiNest/pymultinest.html for content of kwargs_run + see https://johannesbuchner.github.io/PyMultiNest/pymultinest.html for content + of kwargs_run :param kwargs_run: kwargs directly passed to pymultinest.run :return: samples, means, logZ, logZ_err, logL, stats @@ -86,13 +102,20 @@ def run(self, kwargs_run): print("parameter names :", self.param_names) if self._pymultinest_installed: - self._pymultinest.run(self.log_likelihood, self.prior, self.n_dims, - outputfiles_basename=self.files_basename, - resume=False, verbose=True, - init_MPI=self._use_mpi, **kwargs_run) + self._pymultinest.run( + self.log_likelihood, + self.prior, + self.n_dims, + outputfiles_basename=self.files_basename, + resume=False, + verbose=True, + init_MPI=self._use_mpi, + **kwargs_run + ) analyzer = self._Analyzer( - self.n_dims, outputfiles_basename=self.files_basename) + self.n_dims, outputfiles_basename=self.files_basename + ) samples = analyzer.get_equal_weighted_posterior()[:, :-1] data = analyzer.get_data() # gets data from the *.txt output file stats = analyzer.get_stats() @@ -102,19 +125,20 @@ def run(self, kwargs_run): samples = np.zeros((1, self.n_dims)) data = np.zeros((self.n_dims, 3)) stats = { - 'global evidence': np.zeros(self.n_dims), - 'global evidence error': np.zeros(self.n_dims), - 'modes': [{'mean': np.zeros(self.n_dims)}] + "global evidence": np.zeros(self.n_dims), + "global evidence error": np.zeros(self.n_dims), + "modes": [{"mean": np.zeros(self.n_dims)}], } logL = -0.5 * data[:, 1] # since the second data column is -2*logL - logZ = stats['global evidence'] - logZ_err = stats['global evidence error'] + logZ = stats["global evidence"] + logZ_err = stats["global evidence error"] # or better to use stats['marginals'][:]['median'] ??? - means = stats['modes'][0]['mean'] + means = stats["modes"][0]["mean"] - print("MultiNest output files have been saved to {}*" - .format(self.files_basename)) + print( + "MultiNest output files have been saved to {}*".format(self.files_basename) + ) if self._rm_output and self._is_master: shutil.rmtree(self._output_dir, ignore_errors=True) @@ -127,8 +151,10 @@ def _check_install(self): import pymultinest from pymultinest.analyse import Analyzer except: - print("Warning : MultiNest/pymultinest not properly installed (results might be unexpected). \ - You can get it from : https://johannesbuchner.github.io/PyMultiNest/pymultinest.html") + print( + "Warning : MultiNest/pymultinest not properly installed (results might be unexpected). \ + You can get it from : https://johannesbuchner.github.io/PyMultiNest/pymultinest.html" + ) self._pymultinest_installed = False else: self._pymultinest_installed = True diff --git a/lenstronomy/Sampling/Samplers/nautilus_sampler.py b/lenstronomy/Sampling/Samplers/nautilus_sampler.py index badc81743..62f6311f5 100644 --- a/lenstronomy/Sampling/Samplers/nautilus_sampler.py +++ b/lenstronomy/Sampling/Samplers/nautilus_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal, johannesulf' +__author__ = "aymgal, johannesulf" import numpy as np import lenstronomy.Util.sampling_util as utils @@ -8,20 +8,27 @@ from lenstronomy.Sampling.Samplers.base_nested_sampler import NestedSampler -__all__ = ['NautilusSampler'] +__all__ = ["NautilusSampler"] class NautilusSampler(NestedSampler): - """ - Wrapper for the nautilus sampler by Johannes U. Lange. + """Wrapper for the nautilus sampler by Johannes U. Lange. paper : https://arxiv.org/abs/2306.16923 doc : https://nautilus-sampler.readthedocs.io """ - def __init__(self, likelihood_module, prior_type='uniform', - prior_means=None, prior_sigmas=None, width_scale=1, - sigma_scale=1, mpi=False, **kwargs): + def __init__( + self, + likelihood_module, + prior_type="uniform", + prior_means=None, + prior_sigmas=None, + width_scale=1, + sigma_scale=1, + mpi=False, + **kwargs + ): """ :param likelihood_module: likelihood_module like in likelihood.py (should be callable) :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube @@ -33,9 +40,14 @@ def __init__(self, likelihood_module, prior_type='uniform', :param kwargs: kwargs directly passed to Sampler """ self._check_install() - super(NautilusSampler, self).__init__(likelihood_module, prior_type, - prior_means, prior_sigmas, - width_scale, sigma_scale) + super(NautilusSampler, self).__init__( + likelihood_module, + prior_type, + prior_means, + prior_sigmas, + width_scale, + sigma_scale, + ) if mpi: from schwimmbad import MPIPool @@ -46,18 +58,17 @@ def __init__(self, likelihood_module, prior_type='uniform', if not pool.is_master(): pool.wait() sys.exit(0) - kwargs['pool'] = pool + kwargs["pool"] = pool - keys = [p.name for p in signature( - self._nautilus.Sampler).parameters.values()] + keys = [p.name for p in signature(self._nautilus.Sampler).parameters.values()] kwargs = {key: kwargs[key] for key in kwargs.keys() & keys} self._sampler = self._nautilus.Sampler( - self.prior, self.log_likelihood, self.n_dims, **kwargs) + self.prior, self.log_likelihood, self.n_dims, **kwargs + ) def run(self, **kwargs): - """ - run the nautilus nested sampler + """Run the nautilus nested sampler. see https://nautilus-sampler.readthedocs.io for content of kwargs @@ -67,8 +78,7 @@ def run(self, **kwargs): print("prior type :", self.prior_type) print("parameter names :", self.param_names) - keys = [p.name for p in signature( - self._sampler.run).parameters.values()] + keys = [p.name for p in signature(self._sampler.run).parameters.values()] kwargs = {key: kwargs[key] for key in kwargs.keys() & keys} self._sampler.run(**kwargs) points, log_w, log_l = self._sampler.posterior() @@ -80,7 +90,9 @@ def _check_install(self): try: import nautilus except ImportError: - print("Warning : nautilus not properly installed. \ - You can get it with $pip install nautilus-sampler.") + print( + "Warning : nautilus not properly installed. \ + You can get it with $pip install nautilus-sampler." + ) else: self._nautilus = nautilus diff --git a/lenstronomy/Sampling/Samplers/polychord_sampler.py b/lenstronomy/Sampling/Samplers/polychord_sampler.py index ffd3a693a..1f03f5773 100644 --- a/lenstronomy/Sampling/Samplers/polychord_sampler.py +++ b/lenstronomy/Sampling/Samplers/polychord_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal, johannesulf' +__author__ = "aymgal, johannesulf" import os import shutil @@ -9,24 +9,32 @@ from lenstronomy.Sampling.Samplers.base_nested_sampler import NestedSampler import lenstronomy.Util.sampling_util as utils -__all__ = ['DyPolyChordSampler'] +__all__ = ["DyPolyChordSampler"] class DyPolyChordSampler(NestedSampler): - """ - Wrapper for dynamical nested sampling algorithm DyPolyChord - by E. Higson, M. Hobson, W. Handley, A. Lasenby + """Wrapper for dynamical nested sampling algorithm DyPolyChord by E. Higson, M. + Hobson, W. Handley, A. Lasenby. papers : arXiv:1704.03459, arXiv:1804.06406 doc : https://dypolychord.readthedocs.io """ - def __init__(self, likelihood_module, prior_type='uniform', - prior_means=None, prior_sigmas=None, width_scale=1, sigma_scale=1, - output_dir=None, output_basename='-', - resume_dyn_run=False, - polychord_settings=None, - remove_output_dir=False, use_mpi=False): # , num_mpi_procs=1): + def __init__( + self, + likelihood_module, + prior_type="uniform", + prior_means=None, + prior_sigmas=None, + width_scale=1, + sigma_scale=1, + output_dir=None, + output_basename="-", + resume_dyn_run=False, + polychord_settings=None, + remove_output_dir=False, + use_mpi=False, + ): # , num_mpi_procs=1): """ :param likelihood_module: likelihood_module like in likelihood.py (should be callable) :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube @@ -42,9 +50,14 @@ def __init__(self, likelihood_module, prior_type='uniform', :param use_mpi: Use MPI computing if `True` """ self._check_install() - super(DyPolyChordSampler, self).__init__(likelihood_module, prior_type, - prior_means, prior_sigmas, - width_scale, sigma_scale) + super(DyPolyChordSampler, self).__init__( + likelihood_module, + prior_type, + prior_means, + prior_sigmas, + width_scale, + sigma_scale, + ) # if use_mpi: # mpi_str = 'mpirun -np {}'.format(num_mpi_procs) @@ -59,6 +72,7 @@ def __init__(self, likelihood_module, prior_type='uniform', if self._use_mpi: from mpi4py import MPI + self._comm = MPI.COMM_WORLD if self._comm.Get_rank() != 0: @@ -74,13 +88,14 @@ def __init__(self, likelihood_module, prior_type='uniform', self._output_basename = output_basename self._settings = copy.deepcopy(polychord_settings) - self._settings['file_root'] = self._output_basename - self._settings['base_dir'] = self._output_dir + self._settings["file_root"] = self._output_basename + self._settings["base_dir"] = self._output_dir if self._all_installed: # create the dyPolyChord callable object - self._sampler = self._RunPyPolyChord(self.log_likelihood, - self.prior, self.n_dims) + self._sampler = self._RunPyPolyChord( + self.log_likelihood, self.prior, self.n_dims + ) else: self._sampler = None @@ -88,8 +103,7 @@ def __init__(self, likelihood_module, prior_type='uniform', self._has_warned = False def run(self, dynamic_goal, kwargs_run): - """ - run the DyPolyChord dynamical nested sampler + """Run the DyPolyChord dynamical nested sampler. see https://dypolychord.readthedocs.io for content of kwargs_run @@ -104,39 +118,42 @@ def run(self, dynamic_goal, kwargs_run): # TODO : put a default dynamic_goal ? # dynamic_goal = 0 for evidence-only, 1 for posterior-only - self._dyPolyChord.run_dypolychord(self._sampler, dynamic_goal, - settings_dict_in=self._settings, - comm=self._comm, **kwargs_run) + self._dyPolyChord.run_dypolychord( + self._sampler, + dynamic_goal, + settings_dict_in=self._settings, + comm=self._comm, + **kwargs_run + ) if self._is_master: - ns_run = self._ns_process_run(self._settings['file_root'], - self._settings['base_dir']) + ns_run = self._ns_process_run( + self._settings["file_root"], self._settings["base_dir"] + ) else: # in case DyPolyChord or NestCheck was not compiled properly, for unit tests ns_run = { - 'theta': np.zeros((1, self.n_dims)), - 'logl': np.zeros(1), - 'output': { - 'logZ': np.zeros(1), - 'logZerr': np.zeros(1), - 'param_means': np.zeros(self.n_dims) - } + "theta": np.zeros((1, self.n_dims)), + "logl": np.zeros(1), + "output": { + "logZ": np.zeros(1), + "logZerr": np.zeros(1), + "param_means": np.zeros(self.n_dims), + }, } - self._write_equal_weights(ns_run['theta'], ns_run['logl']) + self._write_equal_weights(ns_run["theta"], ns_run["logl"]) if self._is_master: samples, logL = self._get_equal_weight_samples() # logL = ns_run['logl'] # samples_w = ns_run['theta'] - logZ = ns_run['output']['logZ'] - logZ_err = ns_run['output']['logZerr'] - means = ns_run['output']['param_means'] + logZ = ns_run["output"]["logZ"] + logZ_err = ns_run["output"]["logZerr"] + means = ns_run["output"]["param_means"] - print('The log evidence estimate using the first run is {}' - .format(logZ)) - print('The estimated mean of the first parameter is {}' - .format(means[0])) + print("The log evidence estimate using the first run is {}".format(logZ)) + print("The estimated mean of the first parameter is {}".format(means[0])) if self._rm_output: shutil.rmtree(self._output_dir, ignore_errors=True) @@ -146,8 +163,7 @@ def run(self, dynamic_goal, kwargs_run): sys.exit(0) def log_likelihood(self, args): - """ - compute the log-likelihood given list of parameters + """Compute the log-likelihood given list of parameters. :param args: parameter values :return: log-likelihood (from the likelihood module) @@ -155,11 +171,9 @@ def log_likelihood(self, args): return super().log_likelihood(args), [] def _get_equal_weight_samples(self): - """ - Inspired by pymultinest's Analyzer, - because DyPolyChord has more or less the same output conventions as MultiNest - """ - file_name = '{}_equal_weights.txt'.format(self._output_basename) + """Inspired by pymultinest's Analyzer, because DyPolyChord has more or less the + same output conventions as MultiNest.""" + file_name = "{}_equal_weights.txt".format(self._output_basename) file_path = os.path.join(self._output_dir, file_name) data = np.loadtxt(file_path, ndmin=2) logL = -0.5 * data[:, 0] @@ -168,12 +182,12 @@ def _get_equal_weight_samples(self): def _write_equal_weights(self, samples, logL): # write fake output file for unit tests - file_name = '{}_equal_weights.txt'.format(self._output_basename) + file_name = "{}_equal_weights.txt".format(self._output_basename) file_path = os.path.join(self._output_dir, file_name) - data = np.zeros((samples.shape[0], 1+samples.shape[1]), dtype=float) - data[:, 0] = -2. * logL + data = np.zeros((samples.shape[0], 1 + samples.shape[1]), dtype=float) + data[:, 0] = -2.0 * logL data[:, 1:] = samples - np.savetxt(file_path, data, fmt='% .14E') + np.savetxt(file_path, data, fmt="% .14E") def _check_install(self): try: @@ -182,8 +196,10 @@ def _check_install(self): except ImportError: dyPolyChord = None pypolychord_utils = None - print("Warning : dyPolyChord not properly installed. \ - You can get it from : https://github.com/ejhigson/dyPolyChord") + print( + "Warning : dyPolyChord not properly installed. \ + You can get it from : https://github.com/ejhigson/dyPolyChord" + ) dypolychord_installed = False else: dypolychord_installed = True @@ -193,8 +209,10 @@ def _check_install(self): try: from nestcheck import data_processing except ImportError: - print("Warning : nestcheck not properly installed (results might be unexpected). \ - You can get it from : https://github.com/ejhigson/nestcheck") + print( + "Warning : nestcheck not properly installed (results might be unexpected). \ + You can get it from : https://github.com/ejhigson/nestcheck" + ) nestcheck_installed = False else: nestcheck_installed = True diff --git a/lenstronomy/Sampling/Samplers/pso.py b/lenstronomy/Sampling/Samplers/pso.py index a840ce2aa..a49a7d9f6 100644 --- a/lenstronomy/Sampling/Samplers/pso.py +++ b/lenstronomy/Sampling/Samplers/pso.py @@ -1,6 +1,4 @@ -""" -Created on Sep 30, 2013 -modified on March 3-7, 2020 +"""Created on Sep 30, 2013 modified on March 3-7, 2020. @authors: J. Akeret, S. Birrer, A. Shajib """ @@ -10,12 +8,11 @@ import math import numpy as np -__all__ = ['ParticleSwarmOptimizer'] +__all__ = ["ParticleSwarmOptimizer"] class ParticleSwarmOptimizer(object): - """ - Optimizer using a swarm of particles + """Optimizer using a swarm of particles. :param func: A function that takes a vector in the parameter space as input and @@ -31,11 +28,11 @@ class ParticleSwarmOptimizer(object): object provided by ``pool`` is used for all parallelization. It can be any object with a ``map`` method that follows the same calling sequence as the built-in ``map`` function. - """ - def __init__(self, func, low, high, particle_count=25, - pool=None, args=None, kwargs=None): + def __init__( + self, func, low, high, particle_count=25, pool=None, args=None, kwargs=None + ): """ :param func: function to call to return log likelihood @@ -68,10 +65,8 @@ def __init__(self, func, low, high, particle_count=25, self.func = _FunctionWrapper(func, args, kwargs) def __getstate__(self): - """ - In order to be generally pickleable, we need to discard the pool - object before trying. - """ + """In order to be generally pickleable, we need to discard the pool object + before trying.""" d = self.__dict__ d["pool"] = None return d @@ -80,8 +75,7 @@ def __setstate__(self, state): self.__dict__ = state def set_global_best(self, position, velocity, fitness): - """ - Set the global best particle. + """Set the global best particle. :param position: position of the new global best :type position: `list` or `ndarray` @@ -97,8 +91,7 @@ def set_global_best(self, position, velocity, fitness): self.global_best.fitness = fitness def _init_swarm(self): - """ - Initiate the swarm. + """Initiate the swarm. :return: :rtype: @@ -106,25 +99,36 @@ def _init_swarm(self): swarm = [] for _ in range(self.particleCount): swarm.append( - Particle(np.random.uniform(self.low, self.high, - size=self.param_count), - np.zeros(self.param_count))) + Particle( + np.random.uniform(self.low, self.high, size=self.param_count), + np.zeros(self.param_count), + ) + ) return swarm - def sample(self, max_iter=1000, c1=1.193, c2=1.193, p=0.7, m=1e-3, n=1e-2, early_stop_tolerance=None, - verbose=True): - """ - Launches the PSO. Yields the complete swarm per iteration + def sample( + self, + max_iter=1000, + c1=1.193, + c2=1.193, + p=0.7, + m=1e-3, + n=1e-2, + early_stop_tolerance=None, + verbose=True, + ): + """Launches the PSO. Yields the complete swarm per iteration. :param max_iter: maximum iterations :param c1: cognitive weight :param c2: social weight :param p: stop criterion, percentage of particles to use :param m: stop criterion, difference between mean fitness and global best - :param n: stop criterion, difference between norm of the particle - vector and norm of the global best - :param early_stop_tolerance: will terminate at the given value (should be specified as a chi^2) + :param n: stop criterion, difference between norm of the particle vector and + norm of the global best + :param early_stop_tolerance: will terminate at the given value (should be + specified as a chi^2) :param verbose: prints when it stopped :type verbose: boolean """ @@ -152,8 +156,11 @@ def sample(self, max_iter=1000, c1=1.193, c2=1.193, p=0.7, m=1e-3, n=1e-2, early if self.is_master(): if verbose: print("Converged after {} iterations!".format(i)) - print("Best fit found: ", self.global_best.fitness, - self.global_best.position) + print( + "Best fit found: ", + self.global_best.fitness, + self.global_best.position, + ) return if early_stop_tolerance is not None: @@ -164,15 +171,26 @@ def sample(self, max_iter=1000, c1=1.193, c2=1.193, p=0.7, m=1e-3, n=1e-2, early w = 0.5 + np.random.uniform(0, 1, size=self.param_count) / 2 # w=0.72 part_vel = w * np.array(particle.velocity) - cog_vel = c1 * np.random.uniform(0, 1, size=self.param_count) \ - * (np.array(particle.personal_best.position) - - np.array(particle.position)) - soc_vel = c2 * np.random.uniform(0, 1, size=self.param_count) \ - * (np.array(self.global_best.position) - - np.array(particle.position)) + cog_vel = ( + c1 + * np.random.uniform(0, 1, size=self.param_count) + * ( + np.array(particle.personal_best.position) + - np.array(particle.position) + ) + ) + soc_vel = ( + c2 + * np.random.uniform(0, 1, size=self.param_count) + * ( + np.array(self.global_best.position) + - np.array(particle.position) + ) + ) particle.velocity = (part_vel + cog_vel + soc_vel).tolist() - particle.position = (np.array(particle.position) + - np.array(particle.velocity)).tolist() + particle.position = ( + np.array(particle.position) + np.array(particle.velocity) + ).tolist() self._get_fitness(self.swarm) @@ -183,10 +201,18 @@ def sample(self, max_iter=1000, c1=1.193, c2=1.193, p=0.7, m=1e-3, n=1e-2, early i += 1 - def optimize(self, max_iter=1000, verbose=True, c1=1.193, c2=1.193, - p=0.7, m=1e-3, n=1e-2, early_stop_tolerance=None): - """ - Run the optimization and return a full list of optimization outputs. + def optimize( + self, + max_iter=1000, + verbose=True, + c1=1.193, + c2=1.193, + p=0.7, + m=1e-3, + n=1e-2, + early_stop_tolerance=None, + ): + """Run the optimization and return a full list of optimization outputs. :param max_iter: maximum iterations :param verbose: if `True`, print a message every 10 iterations @@ -216,8 +242,7 @@ def optimize(self, max_iter=1000, verbose=True, c1=1.193, c2=1.193, return self.global_best.position, [log_likelihood_list, pos_list, vel_list] def _get_fitness(self, swarm): - """ - Set fitness (probability) of the particles in swarm. + """Set fitness (probability) of the particles in swarm. :param swarm: PSO state :type swarm: list of Particle() instances of the swarm @@ -236,8 +261,7 @@ def _get_fitness(self, swarm): particle.position = position[i] def _converged(self, it, p, m, n): - """ - Check for convergence. + """Check for convergence. :param it: :type it: @@ -271,9 +295,10 @@ def _converged_fit(self, it, p, m): :return: :rtype: """ - best_sort = np.sort([particle.personal_best.fitness for particle in - self.swarm])[::-1] - mean_fit = np.mean(best_sort[1:int(math.floor(self.particleCount * p))]) + best_sort = np.sort( + [particle.personal_best.fitness for particle in self.swarm] + )[::-1] + mean_fit = np.mean(best_sort[1 : int(math.floor(self.particleCount * p))]) # print( "best %f, mean_fit %f, ration %f"%( self.global_best[0], # mean_fit, abs((self.global_best[0]-mean_fit)))) return abs(self.global_best.fitness - mean_fit) < m @@ -292,12 +317,13 @@ def _converged_space(self, it, p, m): """ sorted_swarm = [particle for particle in self.swarm] sorted_swarm.sort() - best_of_best = sorted_swarm[0:int(floor(self.particleCount * p))] + best_of_best = sorted_swarm[0 : int(floor(self.particleCount * p))] diffs = [] for particle in best_of_best: - diffs.append(np.array(self.global_best.position) - - np.array(particle.position)) + diffs.append( + np.array(self.global_best.position) - np.array(particle.position) + ) max_norm = max(list(map(np.linalg.norm, diffs))) return abs(max_norm) < m @@ -313,17 +339,18 @@ def _converged_space2(self, p): # Andres N. Ruiz et al. sorted_swarm = [particle for particle in self.swarm] sorted_swarm.sort() - best_of_best = sorted_swarm[0:int(floor(self.particleCount * p))] + best_of_best = sorted_swarm[0 : int(floor(self.particleCount * p))] positions = [particle.position for particle in best_of_best] means = np.mean(positions, axis=0) - delta = np.mean((means - np.array(self.global_best.position)) / - np.array(self.global_best.position)) + delta = np.mean( + (means - np.array(self.global_best.position)) + / np.array(self.global_best.position) + ) return np.log10(delta) < -3.0 def is_master(self): - """ - Check if the current processor is the master. + """Check if the current processor is the master. :return: :rtype: @@ -334,7 +361,6 @@ def is_master(self): return self.pool.is_master() def _acceptable_convergence(self, chi_square_tolerance): - chi_square = -2 * self.global_best.fitness if np.min(chi_square) < chi_square_tolerance: @@ -344,14 +370,13 @@ def _acceptable_convergence(self, chi_square_tolerance): class Particle(object): - """ - Implementation of a single particle + """Implementation of a single particle. :param position: the position of the particle in the parameter space :param velocity: the velocity of the particle :param fitness: the current fitness of the particle - """ + def __init__(self, position, velocity, fitness=0): """ @@ -380,35 +405,29 @@ def personal_best(self): @classmethod def create(cls, param_count): - """ - Creates a new particle without position, velocity and -inf as fitness - """ + """Creates a new particle without position, velocity and -inf as fitness.""" - return Particle(np.array([[]] * param_count), - np.array([[]] * param_count), - -np.Inf) + return Particle( + np.array([[]] * param_count), np.array([[]] * param_count), -np.Inf + ) def update_personal_best(self): - """ - Sets the current particle representation as personal best - """ + """Sets the current particle representation as personal best.""" self._personal_best = self.copy() def copy(self): - """ - Creates a copy of itself - """ + """Creates a copy of itself.""" return Particle(copy(self.position), copy(self.velocity), self.fitness) def __str__(self): - """ - Get a `str` object for the particle state. + """Get a `str` object for the particle state. + :return: :rtype: """ - return "{:f}, pos: {:s} velocity: {:s}".format(self.fitness, - self.position, - self.velocity) + return "{:f}, pos: {:s} velocity: {:s}".format( + self.fitness, self.position, self.velocity + ) def __lt__(self, other): return self.fitness > other.fitness @@ -424,9 +443,10 @@ def __unicode__(self): class _FunctionWrapper(object): - """ - This is a hack to make the likelihood function pickleable when ``args`` - or ``kwargs`` are also included. This hack is copied from + """This is a hack to make the likelihood function pickleable when ``args`` or + ``kwargs`` are also included. + + This hack is copied from emcee: https://github.com/dfm/emcee/. """ diff --git a/lenstronomy/Sampling/__init__.py b/lenstronomy/Sampling/__init__.py index c3412e3c2..d34951c17 100644 --- a/lenstronomy/Sampling/__init__.py +++ b/lenstronomy/Sampling/__init__.py @@ -1,4 +1,4 @@ -__author__ = 'Simon Birrer' -__email__ = 'sibirrer@gmail.com' -__version__ = '0.1.0' -__credits__ = 'ETH Zurich, UCLA' \ No newline at end of file +__author__ = "Simon Birrer" +__email__ = "sibirrer@gmail.com" +__version__ = "0.1.0" +__credits__ = "ETH Zurich, UCLA" diff --git a/lenstronomy/Sampling/likelihood.py b/lenstronomy/Sampling/likelihood.py index 575c12e70..95d3defcf 100644 --- a/lenstronomy/Sampling/likelihood.py +++ b/lenstronomy/Sampling/likelihood.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.Sampling.Likelihoods.time_delay_likelihood import TimeDelayLikelihood from lenstronomy.Sampling.Likelihoods.image_likelihood import ImageLikelihood @@ -8,12 +8,12 @@ import lenstronomy.Util.class_creator as class_creator import numpy as np -__all__ = ['LikelihoodModule'] +__all__ = ["LikelihoodModule"] class LikelihoodModule(object): - """ - this class contains the routines to run a MCMC process + """This class contains the routines to run a MCMC process. + the key components are: - imSim_class: an instance of a class that simulates one (or more) images and returns the likelihood, such as ImageModel(), Multiband(), MultiExposure() @@ -22,73 +22,133 @@ class LikelihoodModule(object): Additional arguments are supported for adding a time-delay likelihood etc (see __init__ definition) """ - def __init__(self, kwargs_data_joint, kwargs_model, param_class, image_likelihood=True, check_bounds=True, - check_matched_source_position=False, astrometric_likelihood=False, image_position_likelihood=False, - source_position_likelihood=False, image_position_uncertainty=0.004, check_positive_flux=False, - source_position_tolerance=0.001, source_position_sigma=0.001, force_no_add_image=False, - source_marg=False, linear_prior=None, restrict_image_number=False, - max_num_images=None, bands_compute=None, time_delay_likelihood=False, - image_likelihood_mask_list=None, - flux_ratio_likelihood=False, kwargs_flux_compute=None, prior_lens=None, prior_source=None, - prior_extinction=None, prior_lens_light=None, prior_ps=None, prior_special=None, prior_lens_kde=None, - prior_source_kde=None, prior_lens_light_kde=None, prior_ps_kde=None, prior_special_kde=None, - prior_extinction_kde=None, prior_lens_lognormal=None, prior_source_lognormal=None, - prior_extinction_lognormal=None, prior_lens_light_lognormal=None, prior_ps_lognormal=None, - prior_special_lognormal=None, custom_logL_addition=None, kwargs_pixelbased=None): - """ - initializing class - - :param param_class: instance of a Param() class that can cast the sorted list of parameters that are sampled - into the conventions of the imSim_class + def __init__( + self, + kwargs_data_joint, + kwargs_model, + param_class, + image_likelihood=True, + check_bounds=True, + check_matched_source_position=False, + astrometric_likelihood=False, + image_position_likelihood=False, + source_position_likelihood=False, + image_position_uncertainty=0.004, + check_positive_flux=False, + source_position_tolerance=0.001, + source_position_sigma=0.001, + force_no_add_image=False, + source_marg=False, + linear_prior=None, + restrict_image_number=False, + max_num_images=None, + bands_compute=None, + time_delay_likelihood=False, + image_likelihood_mask_list=None, + flux_ratio_likelihood=False, + kwargs_flux_compute=None, + prior_lens=None, + prior_source=None, + prior_extinction=None, + prior_lens_light=None, + prior_ps=None, + prior_special=None, + prior_lens_kde=None, + prior_source_kde=None, + prior_lens_light_kde=None, + prior_ps_kde=None, + prior_special_kde=None, + prior_extinction_kde=None, + prior_lens_lognormal=None, + prior_source_lognormal=None, + prior_extinction_lognormal=None, + prior_lens_light_lognormal=None, + prior_ps_lognormal=None, + prior_special_lognormal=None, + custom_logL_addition=None, + kwargs_pixelbased=None, + ): + """Initializing class. + + :param param_class: instance of a Param() class that can cast the sorted list of + parameters that are sampled into the conventions of the imSim_class :param image_likelihood: bool, option to compute the imaging likelihood - :param source_position_likelihood: bool, if True, ray-traces image positions back to source plane and evaluates - relative errors in respect ot the position_uncertainties in the image plane - :param check_bounds: bool, option to punish the hard bounds in parameter space - :param check_matched_source_position: bool, option to check whether point source position of solver finds a - solution to match all the image positions in the same source plane coordinate - :param astrometric_likelihood: bool, additional likelihood term of the predicted vs modelled point source - position - :param image_position_uncertainty: float, 1-sigma Gaussian uncertainty on the point source position - (only used if point_source_likelihood=True) - :param check_positive_flux: bool, option to punish models that do not have all positive linear amplitude - parameters - :param source_position_tolerance: float, punishment of check_solver occurs when image positions are predicted - further away than this number - :param image_likelihood_mask_list: list of boolean 2d arrays of size of images marking the pixels to be - evaluated in the likelihood - :param force_no_add_image: bool, if True: computes ALL image positions of the point source. If there are more - images predicted than modelled, a punishment occurs - :param source_marg: marginalization addition on the imaging likelihood based on the covariance of the inferred - linear coefficients - :param linear_prior: float or list of floats (when multi-linear setting is chosen) indicating the range of - linear amplitude priors when computing the marginalization term. - :param restrict_image_number: bool, if True: computes ALL image positions of the point source. If there are more - images predicted than indicated in max_num_images, a punishment occurs + :param source_position_likelihood: bool, if True, ray-traces image positions + back to source plane and evaluates relative errors in respect ot the + position_uncertainties in the image plane + :param check_bounds: bool, option to punish the hard bounds in parameter space + :param check_matched_source_position: bool, option to check whether point source + position of solver finds a solution to match all the image positions in the + same source plane coordinate + :param astrometric_likelihood: bool, additional likelihood term of the predicted + vs modelled point source position + :param image_position_uncertainty: float, 1-sigma Gaussian uncertainty on the + point source position (only used if point_source_likelihood=True) + :param check_positive_flux: bool, option to punish models that do not have all + positive linear amplitude parameters + :param source_position_tolerance: float, punishment of check_solver occurs when + image positions are predicted further away than this number + :param image_likelihood_mask_list: list of boolean 2d arrays of size of images + marking the pixels to be evaluated in the likelihood + :param force_no_add_image: bool, if True: computes ALL image positions of the + point source. If there are more images predicted than modelled, a punishment + occurs + :param source_marg: marginalization addition on the imaging likelihood based on + the covariance of the inferred linear coefficients + :param linear_prior: float or list of floats (when multi-linear setting is + chosen) indicating the range of linear amplitude priors when computing the + marginalization term. + :param restrict_image_number: bool, if True: computes ALL image positions of the + point source. If there are more images predicted than indicated in + max_num_images, a punishment occurs :param max_num_images: int, see restrict_image_number - :param bands_compute: list of bools with same length as data objects, indicates which "band" to include in the - fitting - :param time_delay_likelihood: bool, if True computes the time-delay likelihood of the FIRST point source - :param kwargs_flux_compute: keyword arguments of how to compute the image position fluxes - (see FluxRatioLikeliood) - :param custom_logL_addition: a definition taking as arguments (kwargs_lens, kwargs_source, kwargs_lens_light, - kwargs_ps, kwargs_special, kwargs_extinction) and returns a logL (punishing) value. - :param kwargs_pixelbased: keyword arguments with various settings related to the pixel-based solver - (see SLITronomy documentation) + :param bands_compute: list of bools with same length as data objects, indicates + which "band" to include in the fitting + :param time_delay_likelihood: bool, if True computes the time-delay likelihood + of the FIRST point source + :param kwargs_flux_compute: keyword arguments of how to compute the image + position fluxes (see FluxRatioLikeliood) + :param custom_logL_addition: a definition taking as arguments (kwargs_lens, + kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special, + kwargs_extinction) and returns a logL (punishing) value. + :param kwargs_pixelbased: keyword arguments with various settings related to the + pixel-based solver (see SLITronomy documentation) """ - multi_band_list, multi_band_type, time_delays_measured, time_delays_uncertainties, flux_ratios, flux_ratio_errors, ra_image_list, dec_image_list = self._unpack_data(**kwargs_data_joint) + ( + multi_band_list, + multi_band_type, + time_delays_measured, + time_delays_uncertainties, + flux_ratios, + flux_ratio_errors, + ra_image_list, + dec_image_list, + ) = self._unpack_data(**kwargs_data_joint) if len(multi_band_list) == 0: image_likelihood = False self.param = param_class self._lower_limit, self._upper_limit = self.param.param_limits() - self._prior_likelihood = PriorLikelihood(prior_lens, prior_source, prior_lens_light, prior_ps, prior_special, - prior_extinction, - prior_lens_kde, prior_source_kde, prior_lens_light_kde, prior_ps_kde, - prior_special_kde, prior_extinction_kde, - prior_lens_lognormal, prior_source_lognormal, - prior_lens_light_lognormal, prior_ps_lognormal, - prior_special_lognormal, prior_extinction_lognormal, - ) + self._prior_likelihood = PriorLikelihood( + prior_lens, + prior_source, + prior_lens_light, + prior_ps, + prior_special, + prior_extinction, + prior_lens_kde, + prior_source_kde, + prior_lens_light_kde, + prior_ps_kde, + prior_special_kde, + prior_extinction_kde, + prior_lens_lognormal, + prior_source_lognormal, + prior_lens_light_lognormal, + prior_ps_lognormal, + prior_special_lognormal, + prior_extinction_lognormal, + ) self._time_delay_likelihood = time_delay_likelihood self._image_likelihood = image_likelihood self._flux_ratio_likelihood = flux_ratio_likelihood @@ -98,30 +158,56 @@ def __init__(self, kwargs_data_joint, kwargs_model, param_class, image_likelihoo self._kwargs_flux_compute = kwargs_flux_compute self._check_bounds = check_bounds self._custom_logL_addition = custom_logL_addition - self._kwargs_time_delay = {'time_delays_measured': time_delays_measured, - 'time_delays_uncertainties': time_delays_uncertainties} - self.kwargs_imaging = {'multi_band_list': multi_band_list, 'multi_band_type': multi_band_type, - 'bands_compute': bands_compute, - 'image_likelihood_mask_list': image_likelihood_mask_list, 'source_marg': source_marg, - 'linear_prior': linear_prior, 'check_positive_flux': check_positive_flux, - 'kwargs_pixelbased': kwargs_pixelbased, 'linear_solver': linear_solver} - self._kwargs_position = {'astrometric_likelihood': astrometric_likelihood, - 'image_position_likelihood': image_position_likelihood, - 'source_position_likelihood': source_position_likelihood, - 'ra_image_list': ra_image_list, 'dec_image_list': dec_image_list, - 'image_position_uncertainty': image_position_uncertainty, - 'check_matched_source_position': check_matched_source_position, - 'source_position_tolerance': source_position_tolerance, - 'source_position_sigma': source_position_sigma, - 'force_no_add_image': force_no_add_image, - 'restrict_image_number': restrict_image_number, 'max_num_images': max_num_images} - self._kwargs_flux = {'flux_ratios': flux_ratios, 'flux_ratio_errors': flux_ratio_errors} + self._kwargs_time_delay = { + "time_delays_measured": time_delays_measured, + "time_delays_uncertainties": time_delays_uncertainties, + } + self.kwargs_imaging = { + "multi_band_list": multi_band_list, + "multi_band_type": multi_band_type, + "bands_compute": bands_compute, + "image_likelihood_mask_list": image_likelihood_mask_list, + "source_marg": source_marg, + "linear_prior": linear_prior, + "check_positive_flux": check_positive_flux, + "kwargs_pixelbased": kwargs_pixelbased, + "linear_solver": linear_solver, + } + self._kwargs_position = { + "astrometric_likelihood": astrometric_likelihood, + "image_position_likelihood": image_position_likelihood, + "source_position_likelihood": source_position_likelihood, + "ra_image_list": ra_image_list, + "dec_image_list": dec_image_list, + "image_position_uncertainty": image_position_uncertainty, + "check_matched_source_position": check_matched_source_position, + "source_position_tolerance": source_position_tolerance, + "source_position_sigma": source_position_sigma, + "force_no_add_image": force_no_add_image, + "restrict_image_number": restrict_image_number, + "max_num_images": max_num_images, + } + self._kwargs_flux = { + "flux_ratios": flux_ratios, + "flux_ratio_errors": flux_ratio_errors, + } self._kwargs_flux.update(self._kwargs_flux_compute) - self._class_instances(kwargs_model=kwargs_model, kwargs_imaging=self.kwargs_imaging, - kwargs_position=self._kwargs_position, kwargs_flux=self._kwargs_flux, - kwargs_time_delay=self._kwargs_time_delay) - - def _class_instances(self, kwargs_model, kwargs_imaging, kwargs_position, kwargs_flux, kwargs_time_delay): + self._class_instances( + kwargs_model=kwargs_model, + kwargs_imaging=self.kwargs_imaging, + kwargs_position=self._kwargs_position, + kwargs_flux=self._kwargs_flux, + kwargs_time_delay=self._kwargs_time_delay, + ) + + def _class_instances( + self, + kwargs_model, + kwargs_imaging, + kwargs_position, + kwargs_flux, + kwargs_time_delay, + ): """ :param kwargs_model: lenstronomy model keyword arguments @@ -134,41 +220,55 @@ def _class_instances(self, kwargs_model, kwargs_imaging, kwargs_position, kwargs # TODO: in case lens model or point source models are only applied on partial images, then this current class # has ambiguities when it comes to position likelihood, time-delay likelihood and flux ratio likelihood - lens_model_class, _, _, point_source_class, _ = class_creator.create_class_instances(all_models=True, - **kwargs_model) + ( + lens_model_class, + _, + _, + point_source_class, + _, + ) = class_creator.create_class_instances(all_models=True, **kwargs_model) self.PointSource = point_source_class if self._time_delay_likelihood is True: - self.time_delay_likelihood = TimeDelayLikelihood(lens_model_class=lens_model_class, - point_source_class=point_source_class, - **kwargs_time_delay) + self.time_delay_likelihood = TimeDelayLikelihood( + lens_model_class=lens_model_class, + point_source_class=point_source_class, + **kwargs_time_delay + ) if self._image_likelihood is True: - self.image_likelihood = ImageLikelihood(kwargs_model=kwargs_model, **kwargs_imaging) - self._position_likelihood = PositionLikelihood(point_source_class, **kwargs_position) + self.image_likelihood = ImageLikelihood( + kwargs_model=kwargs_model, **kwargs_imaging + ) + self._position_likelihood = PositionLikelihood( + point_source_class, **kwargs_position + ) if self._flux_ratio_likelihood is True: - self.flux_ratio_likelihood = FluxRatioLikelihood(lens_model_class, **kwargs_flux) + self.flux_ratio_likelihood = FluxRatioLikelihood( + lens_model_class, **kwargs_flux + ) def __call__(self, a): return self.logL(a) def logL(self, args, verbose=False): - """ - routine to compute X2 given variable parameters for a MCMC/PSO chain - + """Routine to compute X2 given variable parameters for a MCMC/PSO chain. :param args: ordered parameter values that are being sampled :type args: tuple or list of floats - :param verbose: if True, makes print statements about individual likelihood components + :param verbose: if True, makes print statements about individual likelihood + components :type verbose: boolean :returns: log likelihood of the data given the model (natural logarithm) """ # extract parameters kwargs_return = self.param.args2kwargs(args) if self._check_bounds is True: - penalty, bound_hit = self.check_bounds(args, self._lower_limit, self._upper_limit, verbose=verbose) + penalty, bound_hit = self.check_bounds( + args, self._lower_limit, self._upper_limit, verbose=verbose + ) if bound_hit is True: - return -10**15 + return -(10**15) return self.log_likelihood(kwargs_return, verbose=verbose) def log_likelihood(self, kwargs_return, verbose=False): @@ -185,11 +285,13 @@ def log_likelihood(self, kwargs_return, verbose=False): :returns: - logL (float) log likelihood of the data given the model (natural logarithm) """ - kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special = kwargs_return['kwargs_lens'], \ - kwargs_return['kwargs_source'], \ - kwargs_return['kwargs_lens_light'], \ - kwargs_return['kwargs_ps'], \ - kwargs_return['kwargs_special'] + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special = ( + kwargs_return["kwargs_lens"], + kwargs_return["kwargs_source"], + kwargs_return["kwargs_lens_light"], + kwargs_return["kwargs_ps"], + kwargs_return["kwargs_special"], + ) # update model instance in case of changes affecting it (i.e. redshift sampling in multi-plane) self._update_model(kwargs_special) # generate image and computes likelihood @@ -200,48 +302,56 @@ def log_likelihood(self, kwargs_return, verbose=False): logL_image = self.image_likelihood.logL(**kwargs_return) logL += logL_image if verbose is True: - print('image logL = %s' % logL_image) + print("image logL = %s" % logL_image) if self._time_delay_likelihood is True: - logL_time_delay = self.time_delay_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_special) + logL_time_delay = self.time_delay_likelihood.logL( + kwargs_lens, kwargs_ps, kwargs_special + ) logL += logL_time_delay if verbose is True: - print('time-delay logL = %s' % logL_time_delay) + print("time-delay logL = %s" % logL_time_delay) if self._flux_ratio_likelihood is True: - ra_image_list, dec_image_list = self.PointSource.image_position(kwargs_ps=kwargs_ps, - kwargs_lens=kwargs_lens) + ra_image_list, dec_image_list = self.PointSource.image_position( + kwargs_ps=kwargs_ps, kwargs_lens=kwargs_lens + ) x_pos, y_pos = ra_image_list[0], dec_image_list[0] - logL_flux_ratios = self.flux_ratio_likelihood.logL(x_pos, y_pos, kwargs_lens, kwargs_special) + logL_flux_ratios = self.flux_ratio_likelihood.logL( + x_pos, y_pos, kwargs_lens, kwargs_special + ) logL += logL_flux_ratios if verbose is True: - print('flux ratio logL = %s' % logL_flux_ratios) - logL += self._position_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_special, verbose=verbose) + print("flux ratio logL = %s" % logL_flux_ratios) + logL += self._position_likelihood.logL( + kwargs_lens, kwargs_ps, kwargs_special, verbose=verbose + ) logL_prior = self._prior_likelihood.logL(**kwargs_return) logL += logL_prior if verbose is True: - print('Prior likelihood = %s' % logL_prior) + print("Prior likelihood = %s" % logL_prior) if self._custom_logL_addition is not None: logL_cond = self._custom_logL_addition(**kwargs_return) logL += logL_cond if verbose is True: - print('custom added logL = %s' % logL_cond) + print("custom added logL = %s" % logL_cond) self._reset_point_source_cache(bool_input=False) return logL # , None @staticmethod def check_bounds(args, lowerLimit, upperLimit, verbose=False): - """ - checks whether the parameter vector has left its bound, if so, adds a big number - """ - penalty = 0. + """Checks whether the parameter vector has left its bound, if so, adds a big + number.""" + penalty = 0.0 bound_hit = False args = np.atleast_1d(args) for i in range(0, len(args)): if args[i] < lowerLimit[i] or args[i] > upperLimit[i]: - penalty = 10.**5 + penalty = 10.0**5 bound_hit = True if verbose is True: - print('parameter %s with value %s hit the bounds [%s, %s] ' % (i, args[i], lowerLimit[i], - upperLimit[i])) + print( + "parameter %s with value %s hit the bounds [%s, %s] " + % (i, args[i], lowerLimit[i], upperLimit[i]) + ) return penalty, bound_hit return penalty, bound_hit @@ -266,9 +376,8 @@ def param_limits(self): return self._lower_limit, self._upper_limit def effective_num_data_points(self, **kwargs): - """ - returns the effective number of data points considered in the X2 estimation to compute the reduced X2 value - """ + """Returns the effective number of data points considered in the X2 estimation + to compute the reduced X2 value.""" num_linear = 0 if self._image_likelihood is True: num_linear = self.image_likelihood.num_param_linear(**kwargs) @@ -279,8 +388,7 @@ def likelihood(self, a): return self.logL(a) def negativelogL(self, a): - """ - for minimizer function, the negative value of the logl value is requested + """For minimizer function, the negative value of the logl value is requested. :param a: array of parameters :return: -logL @@ -288,9 +396,16 @@ def negativelogL(self, a): return -self.logL(a) @staticmethod - def _unpack_data(multi_band_list=None, multi_band_type='multi-linear', time_delays_measured=None, - time_delays_uncertainties=None, flux_ratios=None, flux_ratio_errors=None, ra_image_list=None, - dec_image_list=None): + def _unpack_data( + multi_band_list=None, + multi_band_type="multi-linear", + time_delays_measured=None, + time_delays_uncertainties=None, + flux_ratios=None, + flux_ratio_errors=None, + ra_image_list=None, + dec_image_list=None, + ): """ :param multi_band_list: list of [[kwargs_data, kwargs_psf, kwargs_numerics], [], ...] @@ -307,8 +422,16 @@ def _unpack_data(multi_band_list=None, multi_band_type='multi-linear', time_dela ra_image_list = [] if dec_image_list is None: dec_image_list = [] - return multi_band_list, multi_band_type, time_delays_measured, time_delays_uncertainties, flux_ratios, \ - flux_ratio_errors, ra_image_list, dec_image_list + return ( + multi_band_list, + multi_band_type, + time_delays_measured, + time_delays_uncertainties, + flux_ratios, + flux_ratio_errors, + ra_image_list, + dec_image_list, + ) def _reset_point_source_cache(self, bool_input=True): self.PointSource.delete_lens_model_cache() @@ -317,16 +440,21 @@ def _reset_point_source_cache(self, bool_input=True): self.image_likelihood.reset_point_source_cache(bool_input) def _update_model(self, kwargs_special): - """ - updates lens model instance of this class (and all class instances related to it) when an update to the - modeled redshifts of the deflector and/or source planes are made + """Updates lens model instance of this class (and all class instances related to + it) when an update to the modeled redshifts of the deflector and/or source + planes are made. - :param kwargs_special: keyword arguments from SpecialParam() class return of sampling arguments + :param kwargs_special: keyword arguments from SpecialParam() class return of + sampling arguments :return: None, all class instances updated to recent modek """ kwargs_model, update_bool = self.param.update_kwargs_model(kwargs_special) if update_bool is True: - self._class_instances(kwargs_model=kwargs_model, kwargs_imaging=self.kwargs_imaging, - kwargs_position=self._kwargs_position, kwargs_flux=self._kwargs_flux, - kwargs_time_delay=self._kwargs_time_delay) + self._class_instances( + kwargs_model=kwargs_model, + kwargs_imaging=self.kwargs_imaging, + kwargs_position=self._kwargs_position, + kwargs_flux=self._kwargs_flux, + kwargs_time_delay=self._kwargs_time_delay, + ) # TODO remove redundancies with Param() calls updates diff --git a/lenstronomy/Sampling/param_group.py b/lenstronomy/Sampling/param_group.py index 8062b103e..ca646cfeb 100644 --- a/lenstronomy/Sampling/param_group.py +++ b/lenstronomy/Sampling/param_group.py @@ -1,51 +1,48 @@ -''' -This module provides helper classes for managing sample parameters. This is -for internal use, if you are not modifying lenstronomy sampling to include -new parameters you can safely ignore this. -''' +"""This module provides helper classes for managing sample parameters. -__author__ = 'jhodonnell' -__all__ = ['ModelParamGroup', 'SingleParam', 'ArrayParam'] +This is for internal use, if you are not modifying lenstronomy sampling to include new +parameters you can safely ignore this. +""" + +__author__ = "jhodonnell" +__all__ = ["ModelParamGroup", "SingleParam", "ArrayParam"] import numpy as np class ModelParamGroup: - ''' - This abstract class represents any lenstronomy fitting parameters used - in the Param class. - - Subclasses should implement num_params(), set_params(), and get_params() - to convert parameters from lenstronomy's semantic dictionary format to a - flattened array format and back. - - This class also contains three static methods to easily aggregate groups - of parameter classes, called `compose_num_params()`, `compose_set_params()`, - and `compose_get_params()`. - ''' + """This abstract class represents any lenstronomy fitting parameters used in the + Param class. + + Subclasses should implement num_params(), set_params(), and get_params() to convert + parameters from lenstronomy's semantic dictionary format to a flattened array format + and back. + + This class also contains three static methods to easily aggregate groups of + parameter classes, called `compose_num_params()`, `compose_set_params()`, and + `compose_get_params()`. + """ + def num_params(self): - ''' - Tells the number of parameters that this group samples and their names. + """Tells the number of parameters that this group samples and their names. :returns: 2-tuple of (num param, list of names) - ''' + """ raise NotImplementedError def set_params(self, kwargs): - ''' - Converts lenstronomy semantic parameters in dictionary format into a + """Converts lenstronomy semantic parameters in dictionary format into a flattened array of parameters. - The flattened array is for use in optimization algorithms, e.g. MCMC, - Particle swarm, etc. + The flattened array is for use in optimization algorithms, e.g. MCMC, Particle + swarm, etc. :returns: flattened array of parameters as floats - ''' + """ raise NotImplementedError def get_params(self, args, i): - ''' - Converts a flattened array of parameters back into a lenstronomy dictionary, + """Converts a flattened array of parameters back into a lenstronomy dictionary, starting at index i. :param args: flattened arguments to convert to lenstronomy format @@ -53,22 +50,23 @@ def get_params(self, args, i): :param i: index to begin at in args :type i: int :returns: dictionary of parameters - ''' + """ raise NotImplementedError @staticmethod def compose_num_params(each_group, *args, **kwargs): - ''' - Aggregates the number of parameters for a group of parameter groups, - calling each instance's `num_params()` method and combining the results + """Aggregates the number of parameters for a group of parameter groups, calling + each instance's `num_params()` method and combining the results. - :param each_group: collection of parameter groups. Should each be subclasses of ModelParamGroup. + :param each_group: collection of parameter groups. Should each be subclasses of + ModelParamGroup. :type each_group: list :param args: Extra arguments to be passed to each call of `num_params()` - :param kwargs: Extra keyword arguments to be passed to each call of `num_params()` - - :returns: As in each individual `num_params()`, a 2-tuple of (num params, list of param names) - ''' + :param kwargs: Extra keyword arguments to be passed to each call of + `num_params()` + :returns: As in each individual `num_params()`, a 2-tuple of (num params, list + of param names) + """ tot_param = 0 param_names = [] for group in each_group: @@ -79,20 +77,20 @@ def compose_num_params(each_group, *args, **kwargs): @staticmethod def compose_set_params(each_group, param_kwargs, *args, **kwargs): - ''' - Converts lenstronomy semantic arguments in dictionary format to a - flattened list of floats for use in optimization/fitting algorithms. - Combines the results for a set of arbitrarily many parameter groups. + """Converts lenstronomy semantic arguments in dictionary format to a flattened + list of floats for use in optimization/fitting algorithms. Combines the results + for a set of arbitrarily many parameter groups. - :param each_group: collection of parameter groups. Should each be subclasses of ModelParamGroup. + :param each_group: collection of parameter groups. Should each be subclasses of + ModelParamGroup. :type each_group: list :param param_kwargs: the kwargs to process :type param_kwargs: dict :param args: Extra arguments to be passed to each call of `set_params()` - :param kwargs: Extra keyword arguments to be passed to each call of `set_params()` - + :param kwargs: Extra keyword arguments to be passed to each call of + `set_params()` :returns: As in each individual `set_params()`, a list of floats - ''' + """ output_args = [] for group in each_group: output_args += group.set_params(param_kwargs, *args, **kwargs) @@ -100,10 +98,9 @@ def compose_set_params(each_group, param_kwargs, *args, **kwargs): @staticmethod def compose_get_params(each_group, flat_args, i, *args, **kwargs): - ''' - Converts a flattened array of parameters to lenstronomy semantic - parameters in dictionary format. - Combines the results for a set of arbitrarily many parameter groups. + """Converts a flattened array of parameters to lenstronomy semantic parameters + in dictionary format. Combines the results for a set of arbitrarily many + parameter groups. :param each_group: collection of parameter groups. Should each be subclasses of ModelParamGroup. :type each_group: list @@ -115,7 +112,7 @@ def compose_get_params(each_group, flat_args, i, *args, **kwargs): :param kwargs: Extra keyword arguments to be passed to each call of `set_params()` :returns: As in each individual `get_params()`, a 2-tuple of (dictionary of params, new index) - ''' + """ output_kwargs = {} for group in each_group: kwargs_grp, i = group.get_params(flat_args, i, *args, **kwargs) @@ -124,8 +121,7 @@ def compose_get_params(each_group, flat_args, i, *args, **kwargs): class SingleParam(ModelParamGroup): - ''' - Helper for handling parameters which are a single float. + """Helper for handling parameters which are a single float. Subclasses should define: @@ -134,23 +130,22 @@ class SingleParam(ModelParamGroup): :param param_names: List of strings, the name of each parameter :param _kwargs_lower: Dictionary. Lower bounds of each parameter :param _kwargs_upper: Dictionary. Upper bounds of each parameter - ''' + """ + def __init__(self, on): - ''' + """ :param on: Whether this paramter should be sampled :type on: bool - ''' + """ self._on = bool(on) def num_params(self, kwargs_fixed): - ''' - Tells the number of parameters that this group samples and their names. + """Tells the number of parameters that this group samples and their names. :param kwargs_fixed: Dictionary of fixed arguments :type kwargs_fixed: dict - :returns: 2-tuple of (num param, list of names) - ''' + """ if self.on: npar, names = 0, [] for name in self.param_names: @@ -161,20 +156,18 @@ def num_params(self, kwargs_fixed): return 0, [] def set_params(self, kwargs, kwargs_fixed): - ''' - Converts lenstronomy semantic parameters in dictionary format into a + """Converts lenstronomy semantic parameters in dictionary format into a flattened array of parameters. - The flattened array is for use in optimization algorithms, e.g. MCMC, - Particle swarm, etc. + The flattened array is for use in optimization algorithms, e.g. MCMC, Particle + swarm, etc. :param kwargs: lenstronomy parameters to flatten :type kwargs: dict :param kwargs_fixed: Dictionary of fixed arguments :type kwargs_fixed: dict - :returns: flattened array of parameters as floats - ''' + """ if self.on: output = [] for name in self.param_names: @@ -184,8 +177,7 @@ def set_params(self, kwargs, kwargs_fixed): return [] def get_params(self, args, i, kwargs_fixed, kwargs_upper=None, kwargs_lower=None): - ''' - Converts a flattened array of parameters back into a lenstronomy dictionary, + """Converts a flattened array of parameters back into a lenstronomy dictionary, starting at index i. :param args: flattened arguments to convert to lenstronomy format @@ -194,9 +186,8 @@ def get_params(self, args, i, kwargs_fixed, kwargs_upper=None, kwargs_lower=None :type i: int :param kwargs_fixed: Dictionary of fixed arguments :type kwargs_fixed: dict - :returns: dictionary of parameters - ''' + """ out = {} if self.on: for name in self.param_names: @@ -229,10 +220,9 @@ def on(self): class ArrayParam(ModelParamGroup): - ''' - Helper for handling parameters which are an array of values. Examples - include mass_scaling, which is an array of scaling parameters, and wavelet - or gaussian decompositions which have different coefficients for each mode. + """Helper for handling parameters which are an array of values. Examples include + mass_scaling, which is an array of scaling parameters, and wavelet or gaussian + decompositions which have different coefficients for each mode. Subclasses should define: @@ -241,23 +231,22 @@ class ArrayParam(ModelParamGroup): :param param_names: Dictionary mapping the name of each parameter to the number of values needed. :param _kwargs_lower: Dictionary. Lower bounds of each parameter :param _kwargs_upper: Dictionary. Upper bounds of each parameter - ''' + """ + def __init__(self, on): - ''' + """ :param on: Whether this paramter should be sampled :type on: bool - ''' + """ self._on = bool(on) def num_params(self, kwargs_fixed): - ''' - Tells the number of parameters that this group samples and their names. + """Tells the number of parameters that this group samples and their names. :param kwargs_fixed: Dictionary of fixed arguments :type kwargs_fixed: dict - :returns: 2-tuple of (num param, list of names) - ''' + """ if not self.on: return 0, [] @@ -271,20 +260,18 @@ def num_params(self, kwargs_fixed): return npar, names def set_params(self, kwargs, kwargs_fixed): - ''' - Converts lenstronomy semantic parameters in dictionary format into a + """Converts lenstronomy semantic parameters in dictionary format into a flattened array of parameters. - The flattened array is for use in optimization algorithms, e.g. MCMC, - Particle swarm, etc. + The flattened array is for use in optimization algorithms, e.g. MCMC, Particle + swarm, etc. :param kwargs: lenstronomy parameters to flatten :type kwargs: dict :param kwargs_fixed: Dictionary of fixed arguments :type kwargs_fixed: dict - :returns: flattened array of parameters as floats - ''' + """ if not self.on: return [] @@ -295,8 +282,7 @@ def set_params(self, kwargs, kwargs_fixed): return args def get_params(self, args, i, kwargs_fixed, kwargs_lower=None, kwargs_upper=None): - ''' - Converts a flattened array of parameters back into a lenstronomy dictionary, + """Converts a flattened array of parameters back into a lenstronomy dictionary, starting at index i. :param args: flattened arguments to convert to lenstronomy format @@ -310,14 +296,14 @@ def get_params(self, args, i, kwargs_fixed, kwargs_lower=None, kwargs_upper=None :param kwargs_upper: Dictionary of upper bounds :type kwargs_upper: dict :returns: dictionary of parameters - ''' + """ if not self.on: return {}, i params = {} for name, count in self.param_names.items(): if name not in kwargs_fixed: - params[name] = args[i:i + count] + params[name] = args[i : i + count] if kwargs_lower is not None: for j in range(len(params[name])): diff --git a/lenstronomy/Sampling/parameters.py b/lenstronomy/Sampling/parameters.py index 398b15b33..cb655a1a6 100644 --- a/lenstronomy/Sampling/parameters.py +++ b/lenstronomy/Sampling/parameters.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import copy @@ -10,13 +10,12 @@ from lenstronomy.PointSource.point_source_param import PointSourceParam from lenstronomy.Sampling.special_param import SpecialParam -__all__ = ['Param'] +__all__ = ["Param"] class Param(object): - """ - class that handles the parameter constraints. In particular when different model profiles share joint constraints. - + """Class that handles the parameter constraints. In particular when different model + profiles share joint constraints. Options between same model classes: @@ -104,26 +103,54 @@ class that handles the parameter constraints. In particular when different model Log10 sampling of the lens parameters : 'log_sampling_lens': [[i_lens, ['param_name1', 'param_name2', ...]], [...], ...], Sample the log10 of the lens model parameters. - - """ - def __init__(self, kwargs_model, - kwargs_fixed_lens=None, kwargs_fixed_source=None, kwargs_fixed_lens_light=None, kwargs_fixed_ps=None, - kwargs_fixed_special=None, kwargs_fixed_extinction=None, - kwargs_lower_lens=None, kwargs_lower_source=None, kwargs_lower_lens_light=None, kwargs_lower_ps=None, - kwargs_lower_special=None, kwargs_lower_extinction=None, - kwargs_upper_lens=None, kwargs_upper_source=None, kwargs_upper_lens_light=None, kwargs_upper_ps=None, - kwargs_upper_special=None, kwargs_upper_extinction=None, - kwargs_lens_init=None, linear_solver=True, joint_lens_with_lens=[], joint_lens_light_with_lens_light=[], - joint_source_with_source=[], joint_lens_with_light=[], joint_source_with_point_source=[], - joint_lens_light_with_point_source=[], joint_extinction_with_lens_light=[], - joint_lens_with_source_light=[], mass_scaling_list=None, point_source_offset=False, - general_scaling=None, - num_point_source_list=None, image_plane_source_list=None, solver_type='NONE', Ddt_sampling=None, - source_size=False, num_tau0=0, lens_redshift_sampling_indexes=None, - source_redshift_sampling_indexes=None, source_grid_offset=False, num_shapelet_lens=0, - log_sampling_lens=[]): + def __init__( + self, + kwargs_model, + kwargs_fixed_lens=None, + kwargs_fixed_source=None, + kwargs_fixed_lens_light=None, + kwargs_fixed_ps=None, + kwargs_fixed_special=None, + kwargs_fixed_extinction=None, + kwargs_lower_lens=None, + kwargs_lower_source=None, + kwargs_lower_lens_light=None, + kwargs_lower_ps=None, + kwargs_lower_special=None, + kwargs_lower_extinction=None, + kwargs_upper_lens=None, + kwargs_upper_source=None, + kwargs_upper_lens_light=None, + kwargs_upper_ps=None, + kwargs_upper_special=None, + kwargs_upper_extinction=None, + kwargs_lens_init=None, + linear_solver=True, + joint_lens_with_lens=[], + joint_lens_light_with_lens_light=[], + joint_source_with_source=[], + joint_lens_with_light=[], + joint_source_with_point_source=[], + joint_lens_light_with_point_source=[], + joint_extinction_with_lens_light=[], + joint_lens_with_source_light=[], + mass_scaling_list=None, + point_source_offset=False, + general_scaling=None, + num_point_source_list=None, + image_plane_source_list=None, + solver_type="NONE", + Ddt_sampling=None, + source_size=False, + num_tau0=0, + lens_redshift_sampling_indexes=None, + source_redshift_sampling_indexes=None, + source_grid_offset=False, + num_shapelet_lens=0, + log_sampling_lens=[], + ): """ :param kwargs_model: keyword arguments to describe all model components used in class_creator.create_class_instances() @@ -193,13 +220,15 @@ def __init__(self, kwargs_model, :param log_sampling_lens: Sample the log10 of the lens model parameters. Format : [[i_lens, ['param_name1', 'param_name2', ...]], [...], ...], """ - self._lens_model_list = kwargs_model.get('lens_model_list', []) - self._lens_redshift_list = kwargs_model.get('lens_redshift_list', None) - self._source_light_model_list = kwargs_model.get('source_light_model_list', []) - self._source_redshift_list = kwargs_model.get('source_redshift_list', None) - self._lens_light_model_list = kwargs_model.get('lens_light_model_list', []) - self._point_source_model_list = kwargs_model.get('point_source_model_list', []) - self._optical_depth_model_list = kwargs_model.get('optical_depth_model_list', []) + self._lens_model_list = kwargs_model.get("lens_model_list", []) + self._lens_redshift_list = kwargs_model.get("lens_redshift_list", None) + self._source_light_model_list = kwargs_model.get("source_light_model_list", []) + self._source_redshift_list = kwargs_model.get("source_redshift_list", None) + self._lens_light_model_list = kwargs_model.get("lens_light_model_list", []) + self._point_source_model_list = kwargs_model.get("point_source_model_list", []) + self._optical_depth_model_list = kwargs_model.get( + "optical_depth_model_list", [] + ) self._kwargs_model = kwargs_model # check how many redshifts need to be sampled @@ -209,18 +238,37 @@ def __init__(self, kwargs_model, if source_redshift_sampling_indexes is not None: num_z_source = int(np.max(source_redshift_sampling_indexes) + 1) num_z_sampling = max(num_z_sampling, num_z_source) - self._num_z_sampling, self._lens_redshift_sampling_indexes, self._source_redshift_sampling_indexes = num_z_sampling, lens_redshift_sampling_indexes, source_redshift_sampling_indexes - - self._lens_model_class, self._source_model_class, _, _, _ = class_creator.create_class_instances(all_models=True, **kwargs_model) - self._image2SourceMapping = Image2SourceMapping(lensModel=self._lens_model_class, - sourceModel=self._source_model_class) + ( + self._num_z_sampling, + self._lens_redshift_sampling_indexes, + self._source_redshift_sampling_indexes, + ) = ( + num_z_sampling, + lens_redshift_sampling_indexes, + source_redshift_sampling_indexes, + ) + + ( + self._lens_model_class, + self._source_model_class, + _, + _, + _, + ) = class_creator.create_class_instances(all_models=True, **kwargs_model) + self._image2SourceMapping = Image2SourceMapping( + lensModel=self._lens_model_class, sourceModel=self._source_model_class + ) if kwargs_fixed_lens is None: kwargs_fixed_lens = [{} for _ in range(len(self._lens_model_list))] if kwargs_fixed_source is None: - kwargs_fixed_source = [{} for _ in range(len(self._source_light_model_list))] + kwargs_fixed_source = [ + {} for _ in range(len(self._source_light_model_list)) + ] if kwargs_fixed_lens_light is None: - kwargs_fixed_lens_light = [{} for _ in range(len(self._lens_light_model_list))] + kwargs_fixed_lens_light = [ + {} for _ in range(len(self._lens_light_model_list)) + ] if kwargs_fixed_ps is None: kwargs_fixed_ps = [{} for _ in range(len(self._point_source_model_list))] if kwargs_fixed_special is None: @@ -232,20 +280,26 @@ def __init__(self, kwargs_model, self._joint_lens_with_light = joint_lens_with_light self._joint_lens_with_source_light = joint_lens_with_source_light - self._joint_source_with_point_source = copy.deepcopy(joint_source_with_point_source) + self._joint_source_with_point_source = copy.deepcopy( + joint_source_with_point_source + ) # Set up the parameters being sampled in log space in a similar way than the parameters being fixed. self._log_sampling_lens = log_sampling_lens kwargs_logsampling_lens = [[] for i in range(len(self._lens_model_list))] - kwargs_logsampling_lens = self._update_log_sampling(kwargs_logsampling_lens, log_sampling_lens) + kwargs_logsampling_lens = self._update_log_sampling( + kwargs_logsampling_lens, log_sampling_lens + ) for param_list in self._joint_source_with_point_source: if len(param_list) == 2: - param_list.append(['center_x', 'center_y']) - self._joint_lens_light_with_point_source = copy.deepcopy(joint_lens_light_with_point_source) + param_list.append(["center_x", "center_y"]) + self._joint_lens_light_with_point_source = copy.deepcopy( + joint_lens_light_with_point_source + ) for param_list in self._joint_lens_light_with_point_source: if len(param_list) == 2: - param_list.append(['center_x', 'center_y']) + param_list.append(["center_x", "center_y"]) if mass_scaling_list is None: mass_scaling_list = [False] * len(self._lens_model_list) self._mass_scaling_list = mass_scaling_list @@ -281,62 +335,118 @@ def __init__(self, kwargs_model, except: self._num_images = 0 self._solver_type = solver_type - if self._solver_type == 'NONE': + if self._solver_type == "NONE": self._solver = False else: self._solver = True - self._solver_module = Solver(solver_type=self._solver_type, lensModel=self._lens_model_class, - num_images=self._num_images) + self._solver_module = Solver( + solver_type=self._solver_type, + lensModel=self._lens_model_class, + num_images=self._num_images, + ) source_model_list = self._source_light_model_list - if len(source_model_list) != 1 or source_model_list[0] not in ['SLIT_STARLETS', 'SLIT_STARLETS_GEN2']: + if len(source_model_list) != 1 or source_model_list[0] not in [ + "SLIT_STARLETS", + "SLIT_STARLETS_GEN2", + ]: # source_grid_offset only defined for source profiles compatible with pixel-based solver source_grid_offset = False self._joint_extinction_with_lens_light = joint_extinction_with_lens_light # fix parameters joint within the same model types - kwargs_fixed_lens_updated = self._add_fixed_lens(kwargs_fixed_lens, kwargs_lens_init) - kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_lens) - kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_source_light) - kwargs_fixed_lens_light_updated = self._fix_joint_param(kwargs_fixed_lens_light, self._joint_lens_light_with_lens_light) - kwargs_fixed_source_updated = self._fix_joint_param(kwargs_fixed_source, self._joint_source_with_source) + kwargs_fixed_lens_updated = self._add_fixed_lens( + kwargs_fixed_lens, kwargs_lens_init + ) + kwargs_fixed_lens_updated = self._fix_joint_param( + kwargs_fixed_lens_updated, self._joint_lens_with_lens + ) + kwargs_fixed_lens_updated = self._fix_joint_param( + kwargs_fixed_lens_updated, self._joint_lens_with_source_light + ) + kwargs_fixed_lens_light_updated = self._fix_joint_param( + kwargs_fixed_lens_light, self._joint_lens_light_with_lens_light + ) + kwargs_fixed_source_updated = self._fix_joint_param( + kwargs_fixed_source, self._joint_source_with_source + ) kwargs_fixed_ps_updated = copy.deepcopy(kwargs_fixed_ps) - kwargs_fixed_extinction_updated = self._fix_joint_param(kwargs_fixed_extinction, self._joint_extinction_with_lens_light) + kwargs_fixed_extinction_updated = self._fix_joint_param( + kwargs_fixed_extinction, self._joint_extinction_with_lens_light + ) # fix parameters joint with other model types - kwargs_fixed_lens_updated = self._fix_joint_param(kwargs_fixed_lens_updated, self._joint_lens_with_light) - kwargs_fixed_source_updated = self._fix_joint_param(kwargs_fixed_source_updated, self._joint_source_with_point_source) - kwargs_fixed_lens_light_updated = self._fix_joint_param(kwargs_fixed_lens_light_updated, - self._joint_lens_light_with_point_source) - self.lensParams = LensParam(self._lens_model_list, kwargs_fixed_lens_updated, - kwargs_logsampling=kwargs_logsampling_lens, - num_images=self._num_images, - solver_type=self._solver_type, kwargs_lower=kwargs_lower_lens, - kwargs_upper=kwargs_upper_lens, num_shapelet_lens=num_shapelet_lens) - self.lensLightParams = LightParam(self._lens_light_model_list, kwargs_fixed_lens_light_updated, param_type='lens_light', - linear_solver=linear_solver, kwargs_lower=kwargs_lower_lens_light, - kwargs_upper=kwargs_upper_lens_light) - self.sourceParams = LightParam(self._source_light_model_list, kwargs_fixed_source_updated, param_type='source_light', - linear_solver=linear_solver, kwargs_lower=kwargs_lower_source, - kwargs_upper=kwargs_upper_source) - self.pointSourceParams = PointSourceParam(self._point_source_model_list, kwargs_fixed_ps_updated, - num_point_source_list=num_point_source_list, - linear_solver=linear_solver, kwargs_lower=kwargs_lower_ps, - kwargs_upper=kwargs_upper_ps) - self.extinctionParams = LightParam(self._optical_depth_model_list, kwargs_fixed_extinction_updated, - kwargs_lower=kwargs_lower_extinction, kwargs_upper=kwargs_upper_extinction, - linear_solver=False) - self.specialParams = SpecialParam(Ddt_sampling=Ddt_sampling, mass_scaling=self._mass_scaling, - general_scaling_params=self._general_scaling_masks, - kwargs_fixed=kwargs_fixed_special, num_scale_factor=self._num_scale_factor, - kwargs_lower=kwargs_lower_special, kwargs_upper=kwargs_upper_special, - point_source_offset=self._point_source_offset, num_images=self._num_images, - source_size=source_size, num_tau0=num_tau0, num_z_sampling=num_z_sampling, - source_grid_offset=source_grid_offset) + kwargs_fixed_lens_updated = self._fix_joint_param( + kwargs_fixed_lens_updated, self._joint_lens_with_light + ) + kwargs_fixed_source_updated = self._fix_joint_param( + kwargs_fixed_source_updated, self._joint_source_with_point_source + ) + kwargs_fixed_lens_light_updated = self._fix_joint_param( + kwargs_fixed_lens_light_updated, self._joint_lens_light_with_point_source + ) + self.lensParams = LensParam( + self._lens_model_list, + kwargs_fixed_lens_updated, + kwargs_logsampling=kwargs_logsampling_lens, + num_images=self._num_images, + solver_type=self._solver_type, + kwargs_lower=kwargs_lower_lens, + kwargs_upper=kwargs_upper_lens, + num_shapelet_lens=num_shapelet_lens, + ) + self.lensLightParams = LightParam( + self._lens_light_model_list, + kwargs_fixed_lens_light_updated, + param_type="lens_light", + linear_solver=linear_solver, + kwargs_lower=kwargs_lower_lens_light, + kwargs_upper=kwargs_upper_lens_light, + ) + self.sourceParams = LightParam( + self._source_light_model_list, + kwargs_fixed_source_updated, + param_type="source_light", + linear_solver=linear_solver, + kwargs_lower=kwargs_lower_source, + kwargs_upper=kwargs_upper_source, + ) + self.pointSourceParams = PointSourceParam( + self._point_source_model_list, + kwargs_fixed_ps_updated, + num_point_source_list=num_point_source_list, + linear_solver=linear_solver, + kwargs_lower=kwargs_lower_ps, + kwargs_upper=kwargs_upper_ps, + ) + self.extinctionParams = LightParam( + self._optical_depth_model_list, + kwargs_fixed_extinction_updated, + kwargs_lower=kwargs_lower_extinction, + kwargs_upper=kwargs_upper_extinction, + linear_solver=False, + ) + self.specialParams = SpecialParam( + Ddt_sampling=Ddt_sampling, + mass_scaling=self._mass_scaling, + general_scaling_params=self._general_scaling_masks, + kwargs_fixed=kwargs_fixed_special, + num_scale_factor=self._num_scale_factor, + kwargs_lower=kwargs_lower_special, + kwargs_upper=kwargs_upper_special, + point_source_offset=self._point_source_offset, + num_images=self._num_images, + source_size=source_size, + num_tau0=num_tau0, + num_z_sampling=num_z_sampling, + source_grid_offset=source_grid_offset, + ) for lens_source_joint in self._joint_lens_with_source_light: i_source = lens_source_joint[0] if i_source in self._image_plane_source_list: - raise ValueError("linking a source light model with a lens model AND simultaneously parameterizing the" - " source position in the image plane is not valid!") + raise ValueError( + "linking a source light model with a lens model AND simultaneously parameterizing the" + " source position in the image plane is not valid!" + ) self._linear_solver = linear_solver @property @@ -349,8 +459,7 @@ def num_point_source_images(self): @property def linear_solver(self): - """ - boolean to state whether linear solver is activated or not + """Boolean to state whether linear solver is activated or not. :return: boolean """ @@ -376,47 +485,70 @@ def args2kwargs(self, args, bijective=False): kwargs_extinction, i = self.extinctionParams.get_params(args, i) self._update_lens_model(kwargs_special) # update lens_light joint parameters - kwargs_lens_light = self._update_lens_light_joint_with_point_source(kwargs_lens_light, kwargs_ps) - kwargs_lens_light = self._update_joint_param(kwargs_lens_light, kwargs_lens_light, - self._joint_lens_light_with_lens_light) + kwargs_lens_light = self._update_lens_light_joint_with_point_source( + kwargs_lens_light, kwargs_ps + ) + kwargs_lens_light = self._update_joint_param( + kwargs_lens_light, kwargs_lens_light, self._joint_lens_light_with_lens_light + ) # update lens_light joint with lens model parameters - kwargs_lens = self._update_joint_param(kwargs_lens_light, kwargs_lens, self._joint_lens_with_light) - kwargs_lens = self._update_joint_param(kwargs_source, kwargs_lens, self._joint_lens_with_source_light) + kwargs_lens = self._update_joint_param( + kwargs_lens_light, kwargs_lens, self._joint_lens_with_light + ) + kwargs_lens = self._update_joint_param( + kwargs_source, kwargs_lens, self._joint_lens_with_source_light + ) # update extinction model with lens light model - kwargs_extinction = self._update_joint_param(kwargs_lens_light, kwargs_extinction, - self._joint_extinction_with_lens_light) + kwargs_extinction = self._update_joint_param( + kwargs_lens_light, kwargs_extinction, self._joint_extinction_with_lens_light + ) # update lens model joint parameters (including scaling) - kwargs_lens = self._update_joint_param(kwargs_lens, kwargs_lens, self._joint_lens_with_lens) + kwargs_lens = self._update_joint_param( + kwargs_lens, kwargs_lens, self._joint_lens_with_lens + ) kwargs_lens = self.update_lens_scaling(kwargs_special, kwargs_lens) # update point source constraint solver if self._solver is True: - x_pos, y_pos = kwargs_ps[0]['ra_image'], kwargs_ps[0]['dec_image'] + x_pos, y_pos = kwargs_ps[0]["ra_image"], kwargs_ps[0]["dec_image"] kwargs_lens = self._solver_module.update_solver(kwargs_lens, x_pos, y_pos) # update source joint with point source - kwargs_source = self._update_source_joint_with_point_source(kwargs_lens, kwargs_source, kwargs_ps, - kwargs_special, image_plane=bijective) + kwargs_source = self._update_source_joint_with_point_source( + kwargs_lens, kwargs_source, kwargs_ps, kwargs_special, image_plane=bijective + ) # update source joint with source - kwargs_source = self._update_joint_param(kwargs_source, kwargs_source, self._joint_source_with_source) + kwargs_source = self._update_joint_param( + kwargs_source, kwargs_source, self._joint_source_with_source + ) # optional revert lens_scaling for bijective if bijective is True: - kwargs_lens = self.update_lens_scaling(kwargs_special, kwargs_lens, inverse=True) - kwargs_return = {'kwargs_lens': kwargs_lens, 'kwargs_source': kwargs_source, - 'kwargs_lens_light': kwargs_lens_light, 'kwargs_ps': kwargs_ps, - 'kwargs_special': kwargs_special, 'kwargs_extinction': kwargs_extinction} + kwargs_lens = self.update_lens_scaling( + kwargs_special, kwargs_lens, inverse=True + ) + kwargs_return = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + "kwargs_ps": kwargs_ps, + "kwargs_special": kwargs_special, + "kwargs_extinction": kwargs_extinction, + } return kwargs_return - def kwargs2args(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_special=None, kwargs_extinction=None): - """ - inverse of getParam function - :param kwargs_lens: keyword arguments depending on model options - :param kwargs_source: keyword arguments depending on model options - :param kwargs_lens_light: lens light model keyword argument list - :param kwargs_ps: point source model keyword argument list - :param kwargs_special: special keyword arguments - :param kwargs_extinction: extinction model keyword argument list - :return: numpy array of parameters - """ + def kwargs2args( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): + """Inverse of getParam function :param kwargs_lens: keyword arguments depending + on model options :param kwargs_source: keyword arguments depending on model + options :param kwargs_lens_light: lens light model keyword argument list :param + kwargs_ps: point source model keyword argument list :param kwargs_special: + special keyword arguments :param kwargs_extinction: extinction model keyword + argument list :return: numpy array of parameters.""" args = self.lensParams.set_params(kwargs_lens) args += self.sourceParams.set_params(kwargs_source) args += self.lensLightParams.set_params(kwargs_lens_light) @@ -430,18 +562,22 @@ def param_limits(self): :return: lower and upper limits of the arguments being sampled """ - lower_limit = self.kwargs2args(kwargs_lens=self.lensParams.lower_limit, - kwargs_source=self.sourceParams.lower_limit, - kwargs_lens_light=self.lensLightParams.lower_limit, - kwargs_ps=self.pointSourceParams.lower_limit, - kwargs_special=self.specialParams.lower_limit, - kwargs_extinction=self.extinctionParams.lower_limit) - upper_limit = self.kwargs2args(kwargs_lens=self.lensParams.upper_limit, - kwargs_source=self.sourceParams.upper_limit, - kwargs_lens_light=self.lensLightParams.upper_limit, - kwargs_ps=self.pointSourceParams.upper_limit, - kwargs_special=self.specialParams.upper_limit, - kwargs_extinction=self.extinctionParams.upper_limit) + lower_limit = self.kwargs2args( + kwargs_lens=self.lensParams.lower_limit, + kwargs_source=self.sourceParams.lower_limit, + kwargs_lens_light=self.lensLightParams.lower_limit, + kwargs_ps=self.pointSourceParams.lower_limit, + kwargs_special=self.specialParams.lower_limit, + kwargs_extinction=self.extinctionParams.lower_limit, + ) + upper_limit = self.kwargs2args( + kwargs_lens=self.lensParams.upper_limit, + kwargs_source=self.sourceParams.upper_limit, + kwargs_lens_light=self.lensLightParams.upper_limit, + kwargs_ps=self.pointSourceParams.upper_limit, + kwargs_special=self.specialParams.upper_limit, + kwargs_extinction=self.extinctionParams.upper_limit, + ) return lower_limit, upper_limit def num_param(self): @@ -479,60 +615,82 @@ def num_param_linear(self): return num def image2source_plane(self, kwargs_source, kwargs_lens, image_plane=False): - """ - maps the image plane position definition of the source plane + """Maps the image plane position definition of the source plane. :param kwargs_source: source light model keyword argument list :param kwargs_lens: lens model keyword argument list - :param image_plane: boolean, if True, does not up map image plane parameters to source plane - :return: source light model keyword arguments with mapped position arguments from image to source plane + :param image_plane: boolean, if True, does not up map image plane parameters to + source plane + :return: source light model keyword arguments with mapped position arguments + from image to source plane """ kwargs_source_copy = copy.deepcopy(kwargs_source) for i, kwargs in enumerate(kwargs_source_copy): if self._image_plane_source_list[i] is True and not image_plane: - if 'center_x' in kwargs: - x_mapped, y_mapped = self._image2SourceMapping.image2source(kwargs['center_x'], kwargs['center_y'], - kwargs_lens, index_source=i) - kwargs['center_x'] = x_mapped - kwargs['center_y'] = y_mapped + if "center_x" in kwargs: + x_mapped, y_mapped = self._image2SourceMapping.image2source( + kwargs["center_x"], + kwargs["center_y"], + kwargs_lens, + index_source=i, + ) + kwargs["center_x"] = x_mapped + kwargs["center_y"] = y_mapped return kwargs_source_copy - def _update_source_joint_with_point_source(self, kwargs_lens_list, kwargs_source_list, kwargs_ps, kwargs_special, - image_plane=False): - kwargs_source_list = self.image2source_plane(kwargs_source_list, kwargs_lens_list, image_plane=image_plane) + def _update_source_joint_with_point_source( + self, + kwargs_lens_list, + kwargs_source_list, + kwargs_ps, + kwargs_special, + image_plane=False, + ): + kwargs_source_list = self.image2source_plane( + kwargs_source_list, kwargs_lens_list, image_plane=image_plane + ) for setting in self._joint_source_with_point_source: i_point_source, k_source, param_list = setting - if 'ra_source' in kwargs_ps[i_point_source]: - x_mapped = kwargs_ps[i_point_source]['ra_source'] - y_mapped = kwargs_ps[i_point_source]['dec_source'] + if "ra_source" in kwargs_ps[i_point_source]: + x_mapped = kwargs_ps[i_point_source]["ra_source"] + y_mapped = kwargs_ps[i_point_source]["dec_source"] else: - x_pos, y_pos = kwargs_ps[i_point_source]['ra_image'], kwargs_ps[i_point_source]['dec_image'] + x_pos, y_pos = ( + kwargs_ps[i_point_source]["ra_image"], + kwargs_ps[i_point_source]["dec_image"], + ) # x_pos, y_pos = self.real_image_positions(x_pos, y_pos, kwargs_special) - x_mapped, y_mapped = self._image2SourceMapping.image2source(x_pos, y_pos, kwargs_lens_list, - index_source=k_source) + x_mapped, y_mapped = self._image2SourceMapping.image2source( + x_pos, y_pos, kwargs_lens_list, index_source=k_source + ) for param_name in param_list: - if param_name == 'center_x': + if param_name == "center_x": kwargs_source_list[k_source][param_name] = np.mean(x_mapped) - elif param_name == 'center_y': + elif param_name == "center_y": kwargs_source_list[k_source][param_name] = np.mean(y_mapped) else: - kwargs_source_list[k_source][param_name] = kwargs_ps[i_point_source][param_name] + kwargs_source_list[k_source][param_name] = kwargs_ps[ + i_point_source + ][param_name] return kwargs_source_list - def _update_lens_light_joint_with_point_source(self, kwargs_lens_light_list, kwargs_ps): - + def _update_lens_light_joint_with_point_source( + self, kwargs_lens_light_list, kwargs_ps + ): for setting in self._joint_lens_light_with_point_source: i_point_source, k_lens_light, param_list = setting - if 'ra_image' in kwargs_ps[i_point_source]: - x_mapped = kwargs_ps[i_point_source]['ra_image'] - y_mapped = kwargs_ps[i_point_source]['dec_image'] + if "ra_image" in kwargs_ps[i_point_source]: + x_mapped = kwargs_ps[i_point_source]["ra_image"] + y_mapped = kwargs_ps[i_point_source]["dec_image"] else: - raise ValueError("Joint lens light with point source not possible as point source is defined in the source plane!") + raise ValueError( + "Joint lens light with point source not possible as point source is defined in the source plane!" + ) for param_name in param_list: - if param_name == 'center_x': + if param_name == "center_x": kwargs_lens_light_list[k_lens_light][param_name] = np.mean(x_mapped) - elif param_name == 'center_y': + elif param_name == "center_y": kwargs_lens_light_list[k_lens_light][param_name] = np.mean(y_mapped) return kwargs_lens_light_list @@ -555,24 +713,26 @@ def _update_joint_param(kwargs_list_1, kwargs_list_2, joint_setting_list): for param_to, param_from in param_list.items(): kwargs_list_2[k_2][param_to] = kwargs_list_1[i_1][param_from] else: - raise TypeError("Bad format for constraint setting: got %s" % param_list) + raise TypeError( + "Bad format for constraint setting: got %s" % param_list + ) return kwargs_list_2 @staticmethod def _update_log_sampling(kwargs_logsampling_lens, log_sampling_lens): - """ - Update the list of parameters being sampled in log-space - :param kwargs_logsampling_lens: list of list of parameters to sample in log10 - :param log_sampling_lens: [[i_1, ['param_name1', 'param_name2', ...]], [...], ...] - :return: updated kwargs_logsampling_lens - """ + """Update the list of parameters being sampled in log-space :param + kwargs_logsampling_lens: list of list of parameters to sample in log10 :param + log_sampling_lens: [[i_1, ['param_name1', 'param_name2', ...]], [...], ...] + :return: updated kwargs_logsampling_lens.""" for setting in log_sampling_lens: i_1, param_list = setting if type(param_list) == list: kwargs_logsampling_lens[i_1] = param_list else: raise TypeError( - "Bad format for constraint setting: got %s. This should be in the format [[i_1, ['param_name1', 'param_name2', ...]], [...], ...]" % param_list) + "Bad format for constraint setting: got %s. This should be in the format [[i_1, ['param_name1', 'param_name2', ...]], [...], ...]" + % param_list + ) return kwargs_logsampling_lens @staticmethod @@ -591,12 +751,12 @@ def _fix_joint_param(kwargs_list_2, joint_setting_list): return kwargs_list_2_update def update_lens_scaling(self, kwargs_special, kwargs_lens, inverse=False): - """ - multiplies the scaling parameters of the profiles + """Multiplies the scaling parameters of the profiles. :param kwargs_special: keyword arguments of the 'special' arguments :param kwargs_lens: lens model keyword argument list - :param inverse: bool, if True, performs the inverse lens scaling for bijective transforms + :param inverse: bool, if True, performs the inverse lens scaling for bijective + transforms :return: updated lens model keyword argument list """ kwargs_lens_updated = copy.deepcopy(kwargs_lens) @@ -607,35 +767,40 @@ def update_lens_scaling(self, kwargs_special, kwargs_lens, inverse=False): # TODO: remove separate logic for mass scaling. either deprecate it # entirely, implement the details as a special case of general_scaling if self._mass_scaling: - scale_factor_list = np.array(kwargs_special['scale_factor']) + scale_factor_list = np.array(kwargs_special["scale_factor"]) if inverse is True: - scale_factor_list = 1. / np.array(kwargs_special['scale_factor']) + scale_factor_list = 1.0 / np.array(kwargs_special["scale_factor"]) for i, kwargs in enumerate(kwargs_lens_updated): if self._mass_scaling_list[i] is not False: scale_factor = scale_factor_list[self._mass_scaling_list[i] - 1] - if 'theta_E' in kwargs: - kwargs['theta_E'] *= scale_factor - elif 'alpha_Rs' in kwargs: - kwargs['alpha_Rs'] *= scale_factor - elif 'alpha_1' in kwargs: - kwargs['alpha_1'] *= scale_factor - elif 'sigma0' in kwargs: - kwargs['sigma0'] *= scale_factor - elif 'k_eff' in kwargs: - kwargs['k_eff'] *= scale_factor + if "theta_E" in kwargs: + kwargs["theta_E"] *= scale_factor + elif "alpha_Rs" in kwargs: + kwargs["alpha_Rs"] *= scale_factor + elif "alpha_1" in kwargs: + kwargs["alpha_1"] *= scale_factor + elif "sigma0" in kwargs: + kwargs["sigma0"] *= scale_factor + elif "k_eff" in kwargs: + kwargs["k_eff"] *= scale_factor if self._general_scaling: for param_name in self._general_scaling_masks.keys(): - factors = kwargs_special[f'{param_name}_scale_factor'] - _pows = kwargs_special[f'{param_name}_scale_pow'] + factors = kwargs_special[f"{param_name}_scale_factor"] + _pows = kwargs_special[f"{param_name}_scale_pow"] for i, kwargs in enumerate(kwargs_lens_updated): scale_idx = self._general_scaling_masks[param_name][i] if scale_idx is not False: if inverse: - kwargs[param_name] = (kwargs[param_name] / factors[scale_idx - 1]) ** (1 / _pows[scale_idx - 1]) + kwargs[param_name] = ( + kwargs[param_name] / factors[scale_idx - 1] + ) ** (1 / _pows[scale_idx - 1]) else: - kwargs[param_name] = factors[scale_idx - 1] * kwargs[param_name]**_pows[scale_idx - 1] + kwargs[param_name] = ( + factors[scale_idx - 1] + * kwargs[param_name] ** _pows[scale_idx - 1] + ) return kwargs_lens_updated @@ -643,29 +808,39 @@ def _add_fixed_lens(self, kwargs_fixed, kwargs_init): kwargs_fixed_update = copy.deepcopy(kwargs_fixed) if self._solver is True: if kwargs_init is None: - raise ValueError("kwargs_lens_init must be specified when the point source solver is enabled!") - kwargs_fixed_update = self._solver_module.add_fixed_lens(kwargs_fixed_update, kwargs_init) + raise ValueError( + "kwargs_lens_init must be specified when the point source solver is enabled!" + ) + kwargs_fixed_update = self._solver_module.add_fixed_lens( + kwargs_fixed_update, kwargs_init + ) return kwargs_fixed_update def update_kwargs_model(self, kwargs_special): - """ - updates model keyword arguments with redshifts being sampled + """Updates model keyword arguments with redshifts being sampled. - :param kwargs_special: keyword arguments from SpecialParam() class return of sampling arguments + :param kwargs_special: keyword arguments from SpecialParam() class return of + sampling arguments :return: kwargs_model, bool (True if kwargs_model has changed, else False) """ if self._num_z_sampling == 0: return self._kwargs_model, False - z_samples = kwargs_special.get('z_sampling') + z_samples = kwargs_special.get("z_sampling") lens_redshift_list = copy.deepcopy(self._lens_redshift_list) - if not(self._lens_redshift_list is None or self._lens_redshift_sampling_indexes is None): + if not ( + self._lens_redshift_list is None + or self._lens_redshift_sampling_indexes is None + ): # iterate through index lists for i, index in enumerate(self._lens_redshift_sampling_indexes): # update redshifts of lens and source redshift list in new form if index > -1: lens_redshift_list[i] = z_samples[index] source_redshift_list = copy.deepcopy(self._source_redshift_list) - if not (self._source_redshift_list is None or self._source_redshift_sampling_indexes is None): + if not ( + self._source_redshift_list is None + or self._source_redshift_sampling_indexes is None + ): # iterate through index lists for i, index in enumerate(self._source_redshift_sampling_indexes): # update redshifts of lens and source redshift list in new form @@ -673,44 +848,48 @@ def update_kwargs_model(self, kwargs_special): source_redshift_list[i] = z_samples[index] # update lens model and source model classes kwargs_model = copy.deepcopy(self._kwargs_model) - kwargs_model['lens_redshift_list'] = lens_redshift_list - kwargs_model['source_redshift_list'] = source_redshift_list + kwargs_model["lens_redshift_list"] = lens_redshift_list + kwargs_model["source_redshift_list"] = source_redshift_list return kwargs_model, True def _update_lens_model(self, kwargs_special): - """ - updates lens model instance of this class (and all class instances related to it) when an update to the - modeled redshifts of the deflector and/or source planes are made + """Updates lens model instance of this class (and all class instances related to + it) when an update to the modeled redshifts of the deflector and/or source + planes are made. - :param kwargs_special: keyword arguments from SpecialParam() class return of sampling arguments + :param kwargs_special: keyword arguments from SpecialParam() class return of + sampling arguments :return: None, internal calls instance updated """ kwargs_model, update_bool = self.update_kwargs_model(kwargs_special) if update_bool is True: # TODO: this class instances are effectively duplicated in the likelihood module and may cause a lot of overhead # in the calculation as the instances are re-generated every step, and even so doing it twice! - self._lens_model_class, self._source_model_class, _, _, _ = class_creator.create_class_instances( - all_models=True, **kwargs_model) - self._image2SourceMapping = Image2SourceMapping(lensModel=self._lens_model_class, - sourceModel=self._source_model_class) + ( + self._lens_model_class, + self._source_model_class, + _, + _, + _, + ) = class_creator.create_class_instances(all_models=True, **kwargs_model) + self._image2SourceMapping = Image2SourceMapping( + lensModel=self._lens_model_class, sourceModel=self._source_model_class + ) def check_solver(self, kwargs_lens, kwargs_ps): - """ - test whether the image positions map back to the same source position - :param kwargs_lens: lens model keyword argument list - :param kwargs_ps: point source model keyword argument list - :return: Euclidean distance between the ray-shooting of the image positions - """ + """Test whether the image positions map back to the same source position :param + kwargs_lens: lens model keyword argument list :param kwargs_ps: point source + model keyword argument list :return: Euclidean distance between the ray-shooting + of the image positions.""" if self._solver is True: - image_x, image_y = kwargs_ps[0]['ra_image'], kwargs_ps[0]['dec_image'] + image_x, image_y = kwargs_ps[0]["ra_image"], kwargs_ps[0]["dec_image"] dist = self._solver_module.check_solver(image_x, image_y, kwargs_lens) return np.max(dist) else: return 0 def print_setting(self): - """ - prints the setting of the parameter class + """Prints the setting of the parameter class. :return: """ @@ -732,11 +911,16 @@ def print_setting(self): print("===================") print("Joint parameters for different models") print("Joint lens with lens:", self._joint_lens_with_lens) - print("Joint lens light with lens light:", self._joint_lens_light_with_lens_light) + print( + "Joint lens light with lens light:", self._joint_lens_light_with_lens_light + ) print("Joint source with source:", self._joint_source_with_source) print("Joint lens with light:", self._joint_lens_with_light) print("Joint source with point source:", self._joint_source_with_point_source) - print("Joint lens light with point source:", self._joint_lens_light_with_point_source) + print( + "Joint lens light with point source:", + self._joint_lens_light_with_point_source, + ) print("Mass scaling:", self._num_scale_factor, "groups") print("General lens scaling:", self._general_scaling_masks) print("===================") diff --git a/lenstronomy/Sampling/sampler.py b/lenstronomy/Sampling/sampler.py index 90acc8c69..35e20b6a5 100644 --- a/lenstronomy/Sampling/sampler.py +++ b/lenstronomy/Sampling/sampler.py @@ -1,4 +1,4 @@ -__author__ = ['sibirrer', 'ajshajib', 'dgilman', 'nataliehogg'] +__author__ = ["sibirrer", "ajshajib", "dgilman", "nataliehogg"] import time @@ -8,7 +8,7 @@ from lenstronomy.Sampling.Pool.pool import choose_pool from scipy.optimize import minimize -__all__ = ['Sampler'] +__all__ = ["Sampler"] class Sampler(object): @@ -19,6 +19,7 @@ class which executes the different sampling methods Feel free to sample with your convenient sampler! """ + def __init__(self, likelihoodModule): """ @@ -27,7 +28,7 @@ def __init__(self, likelihoodModule): self.chain = likelihoodModule self.lower_limit, self.upper_limit = self.chain.param_limits - def simplex(self, init_pos, n_iterations, method, print_key='SIMPLEX'): + def simplex(self, init_pos, n_iterations, method, print_key="SIMPLEX"): """ :param init_pos: starting point for the optimization @@ -35,46 +36,69 @@ def simplex(self, init_pos, n_iterations, method, print_key='SIMPLEX'): :param method: the optimization method, default is 'Nelder-Mead' :return: the best fit for the lens model using the optimization routine specified by method """ - print('Performing the optimization using algorithm:', method) + print("Performing the optimization using algorithm:", method) time_start = time.time() - result = minimize(self.chain.negativelogL, x0=init_pos, method=method, - options={'maxiter': n_iterations, 'disp': True}) - logL = self.chain.logL(result['x']) - kwargs_return = self.chain.param.args2kwargs(result['x']) - print(-logL * 2 / (max(self.chain.effective_num_data_points(**kwargs_return), 1)), - 'reduced X^2 of best position') - print(logL, 'log likelihood') - print(self.chain.effective_num_data_points(**kwargs_return), 'effective number of data points') - print(kwargs_return.get('kwargs_lens', None), 'lens result') - print(kwargs_return.get('kwargs_source', None), 'source result') - print(kwargs_return.get('kwargs_lens_light', None), 'lens light result') - print(kwargs_return.get('kwargs_ps', None), 'point source result') - print(kwargs_return.get('kwargs_special', None), 'special param result') + result = minimize( + self.chain.negativelogL, + x0=init_pos, + method=method, + options={"maxiter": n_iterations, "disp": True}, + ) + logL = self.chain.logL(result["x"]) + kwargs_return = self.chain.param.args2kwargs(result["x"]) + print( + -logL * 2 / (max(self.chain.effective_num_data_points(**kwargs_return), 1)), + "reduced X^2 of best position", + ) + print(logL, "log likelihood") + print( + self.chain.effective_num_data_points(**kwargs_return), + "effective number of data points", + ) + print(kwargs_return.get("kwargs_lens", None), "lens result") + print(kwargs_return.get("kwargs_source", None), "source result") + print(kwargs_return.get("kwargs_lens_light", None), "lens light result") + print(kwargs_return.get("kwargs_ps", None), "point source result") + print(kwargs_return.get("kwargs_special", None), "special param result") time_end = time.time() - print(time_end - time_start, 'time used for ', print_key) - print('===================') - - return result['x'] - - def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None, - threadCount=1, init_pos=None, mpi=False, print_key='PSO'): - """ - Return the best fit for the lens model on catalogue basis with - particle swarm optimizer. + print(time_end - time_start, "time used for ", print_key) + print("===================") + + return result["x"] + + def pso( + self, + n_particles, + n_iterations, + lower_start=None, + upper_start=None, + threadCount=1, + init_pos=None, + mpi=False, + print_key="PSO", + ): + """Return the best fit for the lens model on catalogue basis with particle swarm + optimizer. :param n_particles: number of particles in the sampling process :param n_iterations: number of iterations of the swarm - :param lower_start: numpy array, lower end parameter of the values of the starting particles - :param upper_start: numpy array, upper end parameter of the values of the starting particles - :param threadCount: number of threads in the computation (only applied if mpi=False) + :param lower_start: numpy array, lower end parameter of the values of the + starting particles + :param upper_start: numpy array, upper end parameter of the values of the + starting particles + :param threadCount: number of threads in the computation (only applied if + mpi=False) :param init_pos: numpy array, position of the initial best guess model :param mpi: bool, if True, makes instance of MPIPool to allow for MPI execution :param print_key: string, prints the process name in the progress bar (optional) - :return: kwargs_result (of best fit), [lnlikelihood of samples, positions of samples, velocity of samples]) + :return: kwargs_result (of best fit), [lnlikelihood of samples, positions of + samples, velocity of samples]) """ if lower_start is None or upper_start is None: - lower_start, upper_start = np.array(self.lower_limit), np.array(self.upper_limit) + lower_start, upper_start = np.array(self.lower_limit), np.array( + self.upper_limit + ) print("PSO initialises its particles with default values") else: lower_start = np.maximum(lower_start, self.lower_limit) @@ -83,20 +107,19 @@ def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None, pool = choose_pool(mpi=mpi, processes=threadCount, use_dill=True) if mpi is True and pool.is_master(): - print('MPI option chosen for PSO.') + print("MPI option chosen for PSO.") - pso = ParticleSwarmOptimizer(self.chain.logL, - lower_start, upper_start, n_particles, - pool=pool) + pso = ParticleSwarmOptimizer( + self.chain.logL, lower_start, upper_start, n_particles, pool=pool + ) if init_pos is None: init_pos = (upper_start - lower_start) / 2 + lower_start - pso.set_global_best(init_pos, [0]*len(init_pos), - self.chain.logL(init_pos)) + pso.set_global_best(init_pos, [0] * len(init_pos), self.chain.logL(init_pos)) if pool.is_master(): - print('Computing the %s ...' % print_key) + print("Computing the %s ..." % print_key) time_start = time.time() @@ -104,26 +127,43 @@ def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None, if pool.is_master(): kwargs_return = self.chain.param.args2kwargs(result) - print(pso.global_best.fitness * 2 / (max( - self.chain.effective_num_data_points(**kwargs_return), 1)), 'reduced X^2 of best position') - print(pso.global_best.fitness, 'log likelihood') - print(self.chain.effective_num_data_points(**kwargs_return), 'effective number of data points') - print(kwargs_return.get('kwargs_lens', None), 'lens result') - print(kwargs_return.get('kwargs_source', None), 'source result') - print(kwargs_return.get('kwargs_lens_light', None), 'lens light result') - print(kwargs_return.get('kwargs_ps', None), 'point source result') - print(kwargs_return.get('kwargs_special', None), 'special param result') + print( + pso.global_best.fitness + * 2 + / (max(self.chain.effective_num_data_points(**kwargs_return), 1)), + "reduced X^2 of best position", + ) + print(pso.global_best.fitness, "log likelihood") + print( + self.chain.effective_num_data_points(**kwargs_return), + "effective number of data points", + ) + print(kwargs_return.get("kwargs_lens", None), "lens result") + print(kwargs_return.get("kwargs_source", None), "source result") + print(kwargs_return.get("kwargs_lens_light", None), "lens light result") + print(kwargs_return.get("kwargs_ps", None), "point source result") + print(kwargs_return.get("kwargs_special", None), "special param result") time_end = time.time() - print(time_end - time_start, 'time used for ', print_key) - print('===================') + print(time_end - time_start, "time used for ", print_key) + print("===================") return result, [log_likelihood_list, pos_list, vel_list] - def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start, - mpi=False, progress=False, threadCount=1, - initpos=None, backend_filename=None, start_from_backend=False): - """ - Run MCMC with emcee. - For details, please have a look at the documentation of the emcee packager. + def mcmc_emcee( + self, + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=False, + progress=False, + threadCount=1, + initpos=None, + backend_filename=None, + start_from_backend=False, + ): + """Run MCMC with emcee. For details, please have a look at the documentation of + the emcee packager. :param n_walkers: number of walkers in the emcee process :type n_walkers: integer @@ -155,15 +195,26 @@ def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start, num_param, _ = self.chain.param.num_param() if initpos is None: - initpos = sampling_util.sample_ball_truncated(mean_start, sigma_start, self.lower_limit, self.upper_limit, - size=n_walkers) + initpos = sampling_util.sample_ball_truncated( + mean_start, + sigma_start, + self.lower_limit, + self.upper_limit, + size=n_walkers, + ) pool = choose_pool(mpi=mpi, processes=threadCount, use_dill=True) if backend_filename is not None: - backend = emcee.backends.HDFBackend(backend_filename, name="lenstronomy_mcmc_emcee") + backend = emcee.backends.HDFBackend( + backend_filename, name="lenstronomy_mcmc_emcee" + ) if pool.is_master(): - print("Warning: All samples (including burn-in) will be saved in backup file '{}'.".format(backend_filename)) + print( + "Warning: All samples (including burn-in) will be saved in backup file '{}'.".format( + backend_filename + ) + ) if start_from_backend: initpos = None n_run_eff = n_run @@ -171,32 +222,47 @@ def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start, n_run_eff = n_burn + n_run backend.reset(n_walkers, num_param) if pool.is_master(): - print("Warning: backup file '{}' has been reset!".format(backend_filename)) + print( + "Warning: backup file '{}' has been reset!".format( + backend_filename + ) + ) else: backend = None n_run_eff = n_burn + n_run time_start = time.time() - sampler = emcee.EnsembleSampler(n_walkers, num_param, self.chain.logL, pool=pool, backend=backend) + sampler = emcee.EnsembleSampler( + n_walkers, num_param, self.chain.logL, pool=pool, backend=backend + ) sampler.run_mcmc(initpos, n_run_eff, progress=progress) flat_samples = sampler.get_chain(discard=n_burn, thin=1, flat=True) dist = sampler.get_log_prob(flat=True, discard=n_burn, thin=1) if pool.is_master(): - print('Computing the MCMC...') - print('Number of walkers = ', n_walkers) - print('Burn-in iterations: ', n_burn) - print('Sampling iterations (in current run):', n_run_eff) + print("Computing the MCMC...") + print("Number of walkers = ", n_walkers) + print("Burn-in iterations: ", n_burn) + print("Sampling iterations (in current run):", n_run_eff) time_end = time.time() - print(time_end - time_start, 'time taken for MCMC sampling') + print(time_end - time_start, "time taken for MCMC sampling") return flat_samples, dist - def mcmc_zeus(self, n_walkers, n_run, n_burn, mean_start, sigma_start, - mpi=False, threadCount=1, - progress=False, initpos=None, backend_filename=None, - **kwargs_zeus): - + def mcmc_zeus( + self, + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=False, + threadCount=1, + progress=False, + initpos=None, + backend_filename=None, + **kwargs_zeus + ): """ Lightning fast MCMC with zeus: https://github.com/minaskar/zeus @@ -227,62 +293,82 @@ def mcmc_zeus(self, n_walkers, n_run, n_burn, mean_start, sigma_start, """ import zeus - print('Using zeus to perform the MCMC.') + print("Using zeus to perform the MCMC.") num_param, _ = self.chain.param.num_param() # zeus kwargs; checks the dict for the key and if not present returns the given value - moves = kwargs_zeus.get('moves') - tune = kwargs_zeus.get('tune', True) - tolerance = kwargs_zeus.get('tolerance', 0.05) - patience = kwargs_zeus.get('patience', 5) - maxsteps = kwargs_zeus.get('maxsteps', 10000) - mu = kwargs_zeus.get('mu', 1.0) - maxiter = kwargs_zeus.get('maxiter', 10000) - pool = kwargs_zeus.get('pool', None) - vectorize = kwargs_zeus.get('vectorize', False) - blobs_dtype = kwargs_zeus.get('blobs_dtype') - verbose = kwargs_zeus.get('verbose', True) - check_walkers = kwargs_zeus.get('check_walkers', True) - shuffle_ensemble = kwargs_zeus.get('shuffle_ensemble', True) - light_mode = kwargs_zeus.get('light_mode', False) + moves = kwargs_zeus.get("moves") + tune = kwargs_zeus.get("tune", True) + tolerance = kwargs_zeus.get("tolerance", 0.05) + patience = kwargs_zeus.get("patience", 5) + maxsteps = kwargs_zeus.get("maxsteps", 10000) + mu = kwargs_zeus.get("mu", 1.0) + maxiter = kwargs_zeus.get("maxiter", 10000) + pool = kwargs_zeus.get("pool", None) + vectorize = kwargs_zeus.get("vectorize", False) + blobs_dtype = kwargs_zeus.get("blobs_dtype") + verbose = kwargs_zeus.get("verbose", True) + check_walkers = kwargs_zeus.get("check_walkers", True) + shuffle_ensemble = kwargs_zeus.get("shuffle_ensemble", True) + light_mode = kwargs_zeus.get("light_mode", False) # kwargs specifically for the callbacks - autocorrelation_callback = kwargs_zeus.get('autocorrelation_callback', False) - ncheck = kwargs_zeus.get('ncheck', 1) - dact = kwargs_zeus.get('dact', 0.01) - nact = kwargs_zeus.get('nact', 50) - discard = kwargs_zeus.get('discard', 0.5) - trigger = kwargs_zeus.get('trigger', True) - method = kwargs_zeus.get('method', 'mk') - splitr_callback = kwargs_zeus.get('splitr_callback', False) - epsilon = kwargs_zeus.get('epsilon', 0.01) - nsplits = kwargs_zeus.get('nsplits', 2) - miniter_callback=kwargs_zeus.get('miniter_callback', False) - nmin = kwargs_zeus.get('nmin', 1000) + autocorrelation_callback = kwargs_zeus.get("autocorrelation_callback", False) + ncheck = kwargs_zeus.get("ncheck", 1) + dact = kwargs_zeus.get("dact", 0.01) + nact = kwargs_zeus.get("nact", 50) + discard = kwargs_zeus.get("discard", 0.5) + trigger = kwargs_zeus.get("trigger", True) + method = kwargs_zeus.get("method", "mk") + splitr_callback = kwargs_zeus.get("splitr_callback", False) + epsilon = kwargs_zeus.get("epsilon", 0.01) + nsplits = kwargs_zeus.get("nsplits", 2) + miniter_callback = kwargs_zeus.get("miniter_callback", False) + nmin = kwargs_zeus.get("nmin", 1000) if initpos is None: - initpos = sampling_util.sample_ball_truncated(mean_start, sigma_start, self.lower_limit, self.upper_limit, - size=n_walkers) + initpos = sampling_util.sample_ball_truncated( + mean_start, + sigma_start, + self.lower_limit, + self.upper_limit, + size=n_walkers, + ) n_run_eff = n_burn + n_run callback_list = [] if backend_filename is not None: - backend = zeus.callbacks.SaveProgressCallback(filename=backend_filename, ncheck=ncheck) + backend = zeus.callbacks.SaveProgressCallback( + filename=backend_filename, ncheck=ncheck + ) callback_list.append(backend) else: pass if autocorrelation_callback == True: - autocorrelation = zeus.callbacks.AutocorrelationCallback(ncheck=ncheck, dact=dact, nact=nact, discard=discard, trigger=trigger, method=method) + autocorrelation = zeus.callbacks.AutocorrelationCallback( + ncheck=ncheck, + dact=dact, + nact=nact, + discard=discard, + trigger=trigger, + method=method, + ) callback_list.append(autocorrelation) else: pass if splitr_callback == True: - splitr = zeus.callbacks.SplitRCallback(ncheck=ncheck, epsilon=epsilon, nsplits=nsplits, discard=discard, trigger=trigger) + splitr = zeus.callbacks.SplitRCallback( + ncheck=ncheck, + epsilon=epsilon, + nsplits=nsplits, + discard=discard, + trigger=trigger, + ) callback_list.append(splitr) else: pass @@ -295,12 +381,25 @@ def mcmc_zeus(self, n_walkers, n_run, n_burn, mean_start, sigma_start, pool = choose_pool(mpi=mpi, processes=threadCount, use_dill=True) - sampler = zeus.EnsembleSampler(nwalkers=n_walkers, ndim=num_param, logprob_fn=self.chain.logL, - moves=moves, tune=tune, tolerance=tolerance, patience=patience, - maxsteps=maxsteps, mu=mu, maxiter=maxiter, pool=pool, vectorize=vectorize, - blobs_dtype=blobs_dtype, verbose=verbose, check_walkers=check_walkers, - shuffle_ensemble=shuffle_ensemble, light_mode=light_mode - ) + sampler = zeus.EnsembleSampler( + nwalkers=n_walkers, + ndim=num_param, + logprob_fn=self.chain.logL, + moves=moves, + tune=tune, + tolerance=tolerance, + patience=patience, + maxsteps=maxsteps, + mu=mu, + maxiter=maxiter, + pool=pool, + vectorize=vectorize, + blobs_dtype=blobs_dtype, + verbose=verbose, + check_walkers=check_walkers, + shuffle_ensemble=shuffle_ensemble, + light_mode=light_mode, + ) sampler.run_mcmc(initpos, n_run_eff, progress=progress, callbacks=callback_list) diff --git a/lenstronomy/Sampling/special_param.py b/lenstronomy/Sampling/special_param.py index c8841e4ea..784702e51 100644 --- a/lenstronomy/Sampling/special_param.py +++ b/lenstronomy/Sampling/special_param.py @@ -1,6 +1,6 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" -__all__ = ['SpecialParam'] +__all__ = ["SpecialParam"] import numpy as np from .param_group import ModelParamGroup, SingleParam, ArrayParam @@ -12,93 +12,87 @@ class DdtSamplingParam(SingleParam): - ''' - Time delay parameter - ''' - param_names = ['D_dt'] - _kwargs_lower = {'D_dt': 0} - _kwargs_upper = {'D_dt': 100000} + """Time delay parameter.""" + + param_names = ["D_dt"] + _kwargs_lower = {"D_dt": 0} + _kwargs_upper = {"D_dt": 100000} class SourceSizeParam(SingleParam): - ''' - Source size parameter - ''' - param_names = ['source_size'] - _kwargs_lower = {'source_size': 0} - _kwargs_upper = {'source_size': 1} + """Source size parameter.""" + + param_names = ["source_size"] + _kwargs_lower = {"source_size": 0} + _kwargs_upper = {"source_size": 1} class SourceGridOffsetParam(SingleParam): - ''' - Source grid offset, both x and y. - ''' - param_names = ['delta_x_source_grid', 'delta_y_source_grid'] - _kwargs_lower = { - 'delta_x_source_grid': -100, - 'delta_y_source_grid': -100 - } - _kwargs_upper = { - 'delta_x_source_grid': 100, - 'delta_y_source_grid': 100 - } + """Source grid offset, both x and y.""" + + param_names = ["delta_x_source_grid", "delta_y_source_grid"] + _kwargs_lower = {"delta_x_source_grid": -100, "delta_y_source_grid": -100} + _kwargs_upper = {"delta_x_source_grid": 100, "delta_y_source_grid": 100} class MassScalingParam(ArrayParam): - ''' - Mass scaling. Can scale the masses of arbitrary subsets of lens models - ''' - _kwargs_lower = {'scale_factor': 0} - _kwargs_upper = {'scale_factor': 1000} + """Mass scaling. + + Can scale the masses of arbitrary subsets of lens models + """ + + _kwargs_lower = {"scale_factor": 0} + _kwargs_upper = {"scale_factor": 1000} + def __init__(self, num_scale_factor): super().__init__(on=int(num_scale_factor) > 0) - self.param_names = {'scale_factor': int(num_scale_factor)} + self.param_names = {"scale_factor": int(num_scale_factor)} class PointSourceOffsetParam(ArrayParam): - ''' - Point source offset, both x and y - ''' - _kwargs_lower = {'delta_x_image': -1, 'delta_y_image': -1} - _kwargs_upper = {'delta_x_image': 1, 'delta_y_image': 1} + """Point source offset, both x and y.""" + + _kwargs_lower = {"delta_x_image": -1, "delta_y_image": -1} + _kwargs_upper = {"delta_x_image": 1, "delta_y_image": 1} + def __init__(self, offset, num_images): super().__init__(on=offset and (int(num_images) > 0)) self.param_names = { - 'delta_x_image': int(num_images), - 'delta_y_image': int(num_images), + "delta_x_image": int(num_images), + "delta_y_image": int(num_images), } class Tau0ListParam(ArrayParam): - ''' - Optical depth renormalization parameters - ''' - _kwargs_lower = {'tau0_list': 0} - _kwargs_upper = {'tau0_list': 1000} + """Optical depth renormalization parameters.""" + + _kwargs_lower = {"tau0_list": 0} + _kwargs_upper = {"tau0_list": 1000} + def __init__(self, num_tau0): super().__init__(on=int(num_tau0) > 0) - self.param_names = {'tau0_list': int(num_tau0)} + self.param_names = {"tau0_list": int(num_tau0)} class ZSamplingParam(ArrayParam): - ''' - Redshift sampling. - ''' - _kwargs_lower = {'z_sampling': 0} - _kwargs_upper = {'z_sampling': 1000} + """Redshift sampling.""" + + _kwargs_lower = {"z_sampling": 0} + _kwargs_upper = {"z_sampling": 1000} + def __init__(self, num_z_sampling): super().__init__(on=int(num_z_sampling) > 0) - self.param_names = {'z_sampling': int(num_z_sampling)} + self.param_names = {"z_sampling": int(num_z_sampling)} class GeneralScalingParam(ArrayParam): - ''' - General lens scaling. + """General lens scaling. For each scaled lens parameter, adds a `{param}_scale_factor` and - `{param}_scale_pow` special parameter, and updates the scaled param - as `param = param_scale_factor * param**param_scale_pow`. - ''' + `{param}_scale_pow` special parameter, and updates the scaled param as `param = + param_scale_factor * param**param_scale_pow`. + """ + def __init__(self, params: dict): # params is a dictionary self.param_names = {} @@ -113,12 +107,12 @@ def __init__(self, params: dict): num_param = np.max(array) if num_param > 0: - fac_name = f'{name}_scale_factor' + fac_name = f"{name}_scale_factor" self.param_names[fac_name] = num_param self._kwargs_lower[fac_name] = 0 self._kwargs_upper[fac_name] = 1000 - pow_name = f'{name}_scale_pow' + pow_name = f"{name}_scale_pow" self.param_names[pow_name] = num_param self._kwargs_lower[pow_name] = -10 self._kwargs_upper[pow_name] = 10 @@ -129,17 +123,30 @@ def __init__(self, params: dict): # ======================================== # - class SpecialParam(object): - """ - class that handles special parameters that are not directly part of a specific model component. - These includes cosmology relevant parameters, astrometric errors and overall scaling parameters. + """Class that handles special parameters that are not directly part of a specific + model component. + + These includes cosmology relevant parameters, astrometric errors and overall scaling + parameters. """ - def __init__(self, Ddt_sampling=False, mass_scaling=False, num_scale_factor=1, - general_scaling_params=None, kwargs_fixed=None, kwargs_lower=None, - kwargs_upper=None, point_source_offset=False, source_size=False, num_images=0, num_tau0=0, - num_z_sampling=0, source_grid_offset=False): + def __init__( + self, + Ddt_sampling=False, + mass_scaling=False, + num_scale_factor=1, + general_scaling_params=None, + kwargs_fixed=None, + kwargs_lower=None, + kwargs_upper=None, + point_source_offset=False, + source_size=False, + num_images=0, + num_tau0=0, + num_z_sampling=0, + source_grid_offset=False, + ): """ :param Ddt_sampling: bool, if True, samples the time-delay distance D_dt (in units of Mpc) @@ -202,8 +209,12 @@ def get_params(self, args, i, impose_bound=False): """ if impose_bound: result = ModelParamGroup.compose_get_params( - self._param_groups, args, i, kwargs_fixed=self._kwargs_fixed, - kwargs_lower=self.lower_limit, kwargs_upper=self.upper_limit + self._param_groups, + args, + i, + kwargs_fixed=self._kwargs_fixed, + kwargs_lower=self.lower_limit, + kwargs_upper=self.upper_limit, ) else: result = ModelParamGroup.compose_get_params( @@ -232,11 +243,13 @@ def num_param(self): @property def _param_groups(self): - return [self._D_dt_sampling, - self._mass_scaling, - self._general_scaling, - self._point_source_offset, - self._source_size, - self._tau0, - self._z_sampling, - self._source_grid_offset] + return [ + self._D_dt_sampling, + self._mass_scaling, + self._general_scaling, + self._point_source_offset, + self._source_size, + self._tau0, + self._z_sampling, + self._source_grid_offset, + ] diff --git a/lenstronomy/SimulationAPI/ObservationConfig/DES.py b/lenstronomy/SimulationAPI/ObservationConfig/DES.py index 7f92edc39..55677d35e 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/DES.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/DES.py @@ -1,46 +1,58 @@ """Provisional DES instrument and observational settings. + See Optics and Observation Conditions spreadsheet at -https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing for list of -sources. """ +https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing +for list of +sources. +""" import lenstronomy.Util.util as util -__all__ = ['DES'] - -g_band_obs = {'exposure_time': 90., - 'sky_brightness': 22.01, - 'magnitude_zero_point': 26.58, - 'num_exposures': 10, - 'seeing': 1.12, - 'psf_type': 'GAUSSIAN'} - -r_band_obs = {'exposure_time': 90., - 'sky_brightness': 21.15, - 'magnitude_zero_point': 26.78, - 'num_exposures': 10, - 'seeing': 0.96, - 'psf_type': 'GAUSSIAN'} - -i_band_obs = {'exposure_time': 90., - 'sky_brightness': 19.89, - 'magnitude_zero_point': 26.75, - 'num_exposures': 10, - 'seeing': 0.88, - 'psf_type': 'GAUSSIAN'} - -z_band_obs = {'exposure_time': 90., - 'sky_brightness': 18.72, - 'magnitude_zero_point': 26.48, - 'num_exposures': 10, - 'seeing': 0.84, - 'psf_type': 'GAUSSIAN'} - -Y_band_obs = {'exposure_time': 45., - 'sky_brightness': 17.96, - 'magnitude_zero_point': 25.40, - 'num_exposures': 10, - 'seeing': 0.9, - 'psf_type': 'GAUSSIAN'} +__all__ = ["DES"] + +g_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 22.01, + "magnitude_zero_point": 26.58, + "num_exposures": 10, + "seeing": 1.12, + "psf_type": "GAUSSIAN", +} + +r_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 21.15, + "magnitude_zero_point": 26.78, + "num_exposures": 10, + "seeing": 0.96, + "psf_type": "GAUSSIAN", +} + +i_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 19.89, + "magnitude_zero_point": 26.75, + "num_exposures": 10, + "seeing": 0.88, + "psf_type": "GAUSSIAN", +} +z_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 18.72, + "magnitude_zero_point": 26.48, + "num_exposures": 10, + "seeing": 0.84, + "psf_type": "GAUSSIAN", +} + +Y_band_obs = { + "exposure_time": 45.0, + "sky_brightness": 17.96, + "magnitude_zero_point": 25.40, + "num_exposures": 10, + "seeing": 0.9, + "psf_type": "GAUSSIAN", +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -52,45 +64,45 @@ class DES(object): - """ - class contains DES instrument and observation configurations - """ - def __init__(self, band='g', psf_type='GAUSSIAN', coadd_years=3): + """Class contains DES instrument and observation configurations.""" + + def __init__(self, band="g", psf_type="GAUSSIAN", coadd_years=3): """ :param band: string, 'g', 'r', 'i', 'z', or 'Y' supported. Determines obs dictionary. :param psf_type: string, type of PSF ('GAUSSIAN' supported). :param coadd_years: int, number of years corresponding to num_exposures in obs dict. Currently supported: 1-6. """ - if band == 'g': + if band == "g": self.obs = g_band_obs - elif band == 'r': + elif band == "r": self.obs = r_band_obs - elif band == 'i': + elif band == "i": self.obs = i_band_obs - elif band == 'z': + elif band == "z": self.obs = z_band_obs - elif band == 'Y': + elif band == "Y": self.obs = Y_band_obs else: - raise ValueError("band %s not supported! Choose 'g', 'r', 'i', 'z', or 'Y'." % band) + raise ValueError( + "band %s not supported! Choose 'g', 'r', 'i', 'z', or 'Y'." % band + ) - if psf_type != 'GAUSSIAN': + if psf_type != "GAUSSIAN": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years > 6 or coadd_years < 1: - raise ValueError(" %s coadd_years not supported! Choose an integer between 1 and 6." % coadd_years) + raise ValueError( + " %s coadd_years not supported! Choose an integer between 1 and 6." + % coadd_years + ) elif coadd_years != 3: - self.obs['num_exposures'] = (coadd_years * 10) // 3 + self.obs["num_exposures"] = (coadd_years * 10) // 3 - self.camera = {'read_noise': 7, - 'pixel_scale': 0.263, - 'ccd_gain': 4} - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.camera = {"read_noise": 7, "pixel_scale": 0.263, "ccd_gain": 4} + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/ObservationConfig/Euclid.py b/lenstronomy/SimulationAPI/ObservationConfig/Euclid.py index 1ee23061f..77b0739f4 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/Euclid.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/Euclid.py @@ -1,18 +1,22 @@ """Provisional Euclid instrument and observational settings. + See Optics and Observation Conditions spreadsheet at -https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing for list of -sources. """ +https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing +for list of +sources. +""" import lenstronomy.Util.util as util -__all__ = ['Euclid'] - -VIS_obs = {'exposure_time': 565., - 'sky_brightness': 22.35, - 'magnitude_zero_point': 24.0, - 'num_exposures': 4, - 'seeing': 0.16, - 'psf_type': 'GAUSSIAN'} +__all__ = ["Euclid"] +VIS_obs = { + "exposure_time": 565.0, + "sky_brightness": 22.35, + "magnitude_zero_point": 24.0, + "num_exposures": 4, + "seeing": 0.16, + "psf_type": "GAUSSIAN", +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -24,10 +28,9 @@ class Euclid(object): - """ - class contains Euclid instrument and observation configurations - """ - def __init__(self, band='VIS', psf_type='GAUSSIAN', coadd_years=6): + """Class contains Euclid instrument and observation configurations.""" + + def __init__(self, band="VIS", psf_type="GAUSSIAN", coadd_years=6): """ :param band: string, only 'VIS' supported. Determines obs dictionary. @@ -35,25 +38,24 @@ def __init__(self, band='VIS', psf_type='GAUSSIAN', coadd_years=6): :param coadd_years: int, number of years corresponding to num_exposures in obs dict. Currently supported: 2-6. """ self.obs = VIS_obs - if band != 'VIS': + if band != "VIS": raise ValueError("band %s not supported! Choose 'VIS'." % band) - if psf_type != 'GAUSSIAN': + if psf_type != "GAUSSIAN": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years > 6 or coadd_years < 2: - raise ValueError(" %s coadd_years not supported! Choose an integer between 2 and 6." % coadd_years) + raise ValueError( + " %s coadd_years not supported! Choose an integer between 2 and 6." + % coadd_years + ) elif coadd_years != 6: - self.obs['num_exposures'] = (coadd_years * VIS_obs["num_exposures"]) // 6 + self.obs["num_exposures"] = (coadd_years * VIS_obs["num_exposures"]) // 6 - self.camera = {'read_noise': 4.2, - 'pixel_scale': 0.101, - 'ccd_gain': 3.1} - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.camera = {"read_noise": 4.2, "pixel_scale": 0.101, "ccd_gain": 3.1} + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/ObservationConfig/HST.py b/lenstronomy/SimulationAPI/ObservationConfig/HST.py index d61405e66..4d1d0bf4e 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/HST.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/HST.py @@ -1,31 +1,35 @@ """Provisional HST instrument and observational settings. + See Optics and Observation Conditions spreadsheet at -https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing for list of -sources. """ +https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing +for list of +sources. +""" import lenstronomy.Util.util as util -__all__ = ['HST'] +__all__ = ["HST"] # F160W filter configs -WFC3_F160W_band_obs = {'exposure_time': 5400., # ~90mins orbit on HST, but this number corresponds to - # approximately two HST orbits with overheads, guide star aquisition. ~2700s science exposure per orbit - 'sky_brightness': 22.3, - 'magnitude_zero_point': 25.96, - 'num_exposures': 1, - 'seeing': 0.08, # set equal to the approx pixel size for drizzled PSF. Note that undrizzled PSF FWHM ~ 0.15" (Windhorst et al 2011) - 'psf_type': 'GAUSSIAN' - } +WFC3_F160W_band_obs = { + "exposure_time": 5400.0, # ~90mins orbit on HST, but this number corresponds to + # approximately two HST orbits with overheads, guide star aquisition. ~2700s science exposure per orbit + "sky_brightness": 22.3, + "magnitude_zero_point": 25.96, + "num_exposures": 1, + "seeing": 0.08, # set equal to the approx pixel size for drizzled PSF. Note that undrizzled PSF FWHM ~ 0.15" (Windhorst et al 2011) + "psf_type": "GAUSSIAN", +} # configs meant to simulate images close to those provided as part of the Time Delay Lens Modeling Challenge -TDLMC_F160W_band_obs = {'exposure_time': 5400., # ~90mins orbit on HST, but this number corresponds to - # approximately two HST orbits with overheads, guide star aquisition. ~2700s science exposure per orbit - 'sky_brightness': 22.0, - 'magnitude_zero_point': 25.9463, - 'num_exposures': 1, - 'seeing': None, - 'psf_type': 'PIXEL' # note kernel_point_source (the PSF map) must be provided separately - } - +TDLMC_F160W_band_obs = { + "exposure_time": 5400.0, # ~90mins orbit on HST, but this number corresponds to + # approximately two HST orbits with overheads, guide star aquisition. ~2700s science exposure per orbit + "sky_brightness": 22.0, + "magnitude_zero_point": 25.9463, + "num_exposures": 1, + "seeing": None, + "psf_type": "PIXEL", # note kernel_point_source (the PSF map) must be provided separately +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -37,11 +41,9 @@ class HST(object): - """ - class contains HST instrument and observation configurations - """ + """Class contains HST instrument and observation configurations.""" - def __init__(self, band='TDLMC_F160W', psf_type='PIXEL', coadd_years=None): + def __init__(self, band="TDLMC_F160W", psf_type="PIXEL", coadd_years=None): """ :param band: string, 'WFC3_F160W' or 'TDLMC_F160W' supported. Determines obs dictionary. @@ -49,33 +51,37 @@ def __init__(self, band='TDLMC_F160W', psf_type='PIXEL', coadd_years=None): :param coadd_years: int, number of years corresponding to num_exposures in obs dict. Currently supported: None. """ - if band == 'TDLMC_F160W': + if band == "TDLMC_F160W": self.obs = TDLMC_F160W_band_obs - elif band == 'WFC3_F160W' or band == 'F160W': + elif band == "WFC3_F160W" or band == "F160W": self.obs = WFC3_F160W_band_obs else: - raise ValueError("band %s not supported! Choose 'WFC3_F160W' or 'TDLMC_F160W'." % band) + raise ValueError( + "band %s not supported! Choose 'WFC3_F160W' or 'TDLMC_F160W'." % band + ) - if psf_type == 'GAUSSIAN': - self.obs['psf_type'] = 'GAUSSIAN' - elif psf_type != 'PIXEL': + if psf_type == "GAUSSIAN": + self.obs["psf_type"] = "GAUSSIAN" + elif psf_type != "PIXEL": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years is not None: - raise ValueError(" %s coadd_years not supported! " - "You may manually adjust num_exposures in obs dict if required." % coadd_years) + raise ValueError( + " %s coadd_years not supported! " + "You may manually adjust num_exposures in obs dict if required." + % coadd_years + ) # WFC3 camera settings - self.camera = {'read_noise': 4, - 'pixel_scale': 0.08, # approx pixel size for drizzled PSF - 'ccd_gain': 2.5, - } - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.camera = { + "read_noise": 4, + "pixel_scale": 0.08, # approx pixel size for drizzled PSF + "ccd_gain": 2.5, + } + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/ObservationConfig/JWST.py b/lenstronomy/SimulationAPI/ObservationConfig/JWST.py index 7bbc26ec7..05b8ad1ee 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/JWST.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/JWST.py @@ -5,28 +5,29 @@ """ import lenstronomy.Util.util as util -__all__ = ['JWST'] - - -NIRCAM_F200W_band_obs = {'exposure_time': 3600., - 'sky_brightness': 29.52, #this is derived using the ETC - 'magnitude_zero_point': 28.00, - #'detector': 'NRCA1', - 'num_exposures': 1, - 'seeing': None, - 'psf_type': 'PIXEL', - } - - -NIRCAM_F356W_band_obs = {'exposure_time': 3600., - 'sky_brightness': 28.39, #this is derived using the ETC - 'magnitude_zero_point': 26.47, - #'detector': 'NRCALONG', - 'num_exposures': 1, - 'seeing': None, - 'psf_type': 'PIXEL' # note kernel_point_source (the PSF map) must be provided separately - } - +__all__ = ["JWST"] + + +NIRCAM_F200W_band_obs = { + "exposure_time": 3600.0, + "sky_brightness": 29.52, # this is derived using the ETC + "magnitude_zero_point": 28.00, + #'detector': 'NRCA1', + "num_exposures": 1, + "seeing": None, + "psf_type": "PIXEL", +} + + +NIRCAM_F356W_band_obs = { + "exposure_time": 3600.0, + "sky_brightness": 28.39, # this is derived using the ETC + "magnitude_zero_point": 26.47, + #'detector': 'NRCALONG', + "num_exposures": 1, + "seeing": None, + "psf_type": "PIXEL", # note kernel_point_source (the PSF map) must be provided separately +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -38,11 +39,9 @@ class JWST(object): - """ - class contains JWST instrument and observation configurations - """ + """Class contains JWST instrument and observation configurations.""" - def __init__(self, band='F200W', psf_type='PIXEL', coadd_years=None): + def __init__(self, band="F200W", psf_type="PIXEL", coadd_years=None): """ :param band: string, 'F200W' or 'F356W' supported. Determines obs dictionary. @@ -50,36 +49,41 @@ def __init__(self, band='F200W', psf_type='PIXEL', coadd_years=None): :param coadd_years: int, number of years corresponding to num_exposures in obs dict. Currently supported: None. """ - if band == 'F200W': + if band == "F200W": self.obs = NIRCAM_F200W_band_obs - self.arm = 'short' - elif band == 'F356W': + self.arm = "short" + elif band == "F356W": self.obs = NIRCAM_F356W_band_obs - self.arm = 'long' + self.arm = "long" else: - raise ValueError("band %s not supported!"% band) + raise ValueError("band %s not supported!" % band) - if psf_type == 'GAUSSIAN': - self.obs['psf_type'] = 'GAUSSIAN' - elif psf_type != 'PIXEL': + if psf_type == "GAUSSIAN": + self.obs["psf_type"] = "GAUSSIAN" + elif psf_type != "PIXEL": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years is not None: - raise ValueError(" %s coadd_years not supported! " - "You may manually adjust num_exposures in obs dict if required." % coadd_years) + raise ValueError( + " %s coadd_years not supported! " + "You may manually adjust num_exposures in obs dict if required." + % coadd_years + ) # NIRCAM camera settings - if self.arm == 'short': - self.camera = {'read_noise': 15.77, - 'pixel_scale': 0.031, - 'ccd_gain': 2.05, - } - elif self.arm == 'long': - self.camera = {'read_noise': 13.25, - 'pixel_scale': 0.063, - 'ccd_gain': 1.82, - } + if self.arm == "short": + self.camera = { + "read_noise": 15.77, + "pixel_scale": 0.031, + "ccd_gain": 2.05, + } + elif self.arm == "long": + self.camera = { + "read_noise": 13.25, + "pixel_scale": 0.063, + "ccd_gain": 1.82, + } """ :keyword read_noise: std of noise generated by read-out (in units of electrons) :keyword pixel_scale: scale (in arcseconds) of pixels diff --git a/lenstronomy/SimulationAPI/ObservationConfig/LSST.py b/lenstronomy/SimulationAPI/ObservationConfig/LSST.py index 740e33ea1..3c7e7204c 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/LSST.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/LSST.py @@ -1,55 +1,68 @@ """Provisional LSST instrument and observational settings. + See Optics and Observation Conditions spreadsheet at -https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing for list of -sources. """ +https://docs.google.com/spreadsheets/d/1pMUB_OOZWwXON2dd5oP8PekhCT5MBBZJO1HV7IMZg4Y/edit?usp=sharing +for list of +sources. +""" import copy import lenstronomy.Util.util as util -__all__ = ['LSST'] - -u_band_obs = {'exposure_time': 15., - 'sky_brightness': 22.99, - 'magnitude_zero_point': 26.5, - 'num_exposures': 140, - 'seeing': 0.81, - 'psf_type': 'GAUSSIAN'} - -g_band_obs = {'exposure_time': 15., - 'sky_brightness': 22.26, - 'magnitude_zero_point': 28.30, - 'num_exposures': 200, - 'seeing': 0.77, - 'psf_type': 'GAUSSIAN'} - -r_band_obs = {'exposure_time': 15., - 'sky_brightness': 21.2, - 'magnitude_zero_point': 28.13, - 'num_exposures': 460, - 'seeing': 0.73, - 'psf_type': 'GAUSSIAN'} - -i_band_obs = {'exposure_time': 15., - 'sky_brightness': 20.48, - 'magnitude_zero_point': 27.79, - 'num_exposures': 460, - 'seeing': 0.71, - 'psf_type': 'GAUSSIAN'} - -z_band_obs = {'exposure_time': 15., - 'sky_brightness': 19.6, - 'magnitude_zero_point': 27.40, - 'num_exposures': 400, - 'seeing': 0.69, - 'psf_type': 'GAUSSIAN'} - -y_band_obs = {'exposure_time': 15., - 'sky_brightness': 18.61, - 'magnitude_zero_point': 26.58, - 'num_exposures': 400, - 'seeing': 0.68, - 'psf_type': 'GAUSSIAN'} - - +__all__ = ["LSST"] + +u_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 22.99, + "magnitude_zero_point": 26.5, + "num_exposures": 140, + "seeing": 0.81, + "psf_type": "GAUSSIAN", +} + +g_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 22.26, + "magnitude_zero_point": 28.30, + "num_exposures": 200, + "seeing": 0.77, + "psf_type": "GAUSSIAN", +} + +r_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 21.2, + "magnitude_zero_point": 28.13, + "num_exposures": 460, + "seeing": 0.73, + "psf_type": "GAUSSIAN", +} + +i_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 20.48, + "magnitude_zero_point": 27.79, + "num_exposures": 460, + "seeing": 0.71, + "psf_type": "GAUSSIAN", +} + +z_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 19.6, + "magnitude_zero_point": 27.40, + "num_exposures": 400, + "seeing": 0.69, + "psf_type": "GAUSSIAN", +} + +y_band_obs = { + "exposure_time": 15.0, + "sky_brightness": 18.61, + "magnitude_zero_point": 26.58, + "num_exposures": 400, + "seeing": 0.68, + "psf_type": "GAUSSIAN", +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -62,11 +75,9 @@ class LSST(object): - """ - class contains LSST instrument and observation configurations - """ + """Class contains LSST instrument and observation configurations.""" - def __init__(self, band='g', psf_type='GAUSSIAN', coadd_years=10): + def __init__(self, band="g", psf_type="GAUSSIAN", coadd_years=10): """ :param band: string, 'u', 'g', 'r', 'i', 'z' or 'y' supported. Determines obs dictionary. @@ -75,38 +86,42 @@ def __init__(self, band='g', psf_type='GAUSSIAN', coadd_years=10): """ if band.isalpha(): band = band.lower() - if band == 'g': + if band == "g": self.obs = copy.deepcopy(g_band_obs) - elif band == 'r': + elif band == "r": self.obs = copy.deepcopy(r_band_obs) - elif band == 'i': + elif band == "i": self.obs = copy.deepcopy(i_band_obs) - elif band == 'u': + elif band == "u": self.obs = copy.deepcopy(u_band_obs) - elif band == 'z': + elif band == "z": self.obs = copy.deepcopy(z_band_obs) - elif band == 'y': + elif band == "y": self.obs = copy.deepcopy(y_band_obs) else: - raise ValueError("band %s not supported! Choose 'u', 'g', 'r', 'i', 'z' or 'y'." % band) + raise ValueError( + "band %s not supported! Choose 'u', 'g', 'r', 'i', 'z' or 'y'." % band + ) - if psf_type != 'GAUSSIAN': + if psf_type != "GAUSSIAN": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years > 10 or coadd_years < 1: - raise ValueError(" %s coadd_years not supported! Choose an integer between 1 and 10." % coadd_years) + raise ValueError( + " %s coadd_years not supported! Choose an integer between 1 and 10." + % coadd_years + ) elif coadd_years != 10: - self.obs['num_exposures'] = coadd_years*self.obs['num_exposures']//10 - - self.camera = {'read_noise': 10, # will be <10 - 'pixel_scale': 0.2, - 'ccd_gain': 2.3, - } - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.obs["num_exposures"] = coadd_years * self.obs["num_exposures"] // 10 + + self.camera = { + "read_noise": 10, # will be <10 + "pixel_scale": 0.2, + "ccd_gain": 2.3, + } + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/ObservationConfig/Roman.py b/lenstronomy/SimulationAPI/ObservationConfig/Roman.py index e5aeef1e6..70cfca219 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/Roman.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/Roman.py @@ -12,37 +12,49 @@ # For wide area survey mode: exposure time and number of exposures for relevant filters set as given in https://roman.gsfc.nasa.gov/high_latitude_wide_area_survey.html -__all__ = ['Roman'] - -F062_band_obs = {'sky_brightness': 23.19, - 'magnitude_zero_point': 26.56, - 'seeing': 0.058} - -F087_band_obs = {'sky_brightness': 22.93, - 'magnitude_zero_point': 26.30, - 'seeing': 0.073} - -F106_band_obs = {'sky_brightness': 22.99, - 'magnitude_zero_point': 26.44, - 'seeing': 0.087} - -F129_band_obs = {'sky_brightness': 22.99, - 'magnitude_zero_point': 26.40, - 'seeing': 0.105} - -F158_band_obs = {'sky_brightness': 23.10, - 'magnitude_zero_point': 26.43, - 'seeing': 0.127} - -F184_band_obs = {'sky_brightness': 23.22, - 'magnitude_zero_point': 25.95, - 'seeing': 0.151} - -F146_band_obs = {'sky_brightness': 22.03, - 'magnitude_zero_point': 26.65, - 'seeing': 0.105} - - +__all__ = ["Roman"] + +F062_band_obs = { + "sky_brightness": 23.19, + "magnitude_zero_point": 26.56, + "seeing": 0.058, +} + +F087_band_obs = { + "sky_brightness": 22.93, + "magnitude_zero_point": 26.30, + "seeing": 0.073, +} + +F106_band_obs = { + "sky_brightness": 22.99, + "magnitude_zero_point": 26.44, + "seeing": 0.087, +} + +F129_band_obs = { + "sky_brightness": 22.99, + "magnitude_zero_point": 26.40, + "seeing": 0.105, +} + +F158_band_obs = { + "sky_brightness": 23.10, + "magnitude_zero_point": 26.43, + "seeing": 0.127, +} + +F184_band_obs = { + "sky_brightness": 23.22, + "magnitude_zero_point": 25.95, + "seeing": 0.151, +} + +F146_band_obs = { + "sky_brightness": 22.03, + "magnitude_zero_point": 26.65, + "seeing": 0.105, +} """ :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) :keyword magnitude_zero_point: magnitude in which 1 count (e-) per second per arcsecond square is registered @@ -52,77 +64,87 @@ class Roman(object): - """ - class contains Roman instrument and observation configurations - """ + """Class contains Roman instrument and observation configurations.""" - def __init__(self, band='F062', psf_type='GAUSSIAN', survey_mode='wide_area'): + def __init__(self, band="F062", psf_type="GAUSSIAN", survey_mode="wide_area"): """ :param band: string, 'F062', 'F087', 'F106', 'F129', 'F158' , 'F184' or 'F146' supported. Determines obs dictionary. :param psf_type: string, type of PSF ('GAUSSIAN', 'PIXEL' supported). """ - - if band == 'F062': + + if band == "F062": self.obs = F062_band_obs - elif band == 'F087': + elif band == "F087": self.obs = F087_band_obs - elif band == 'F106': + elif band == "F106": self.obs = F106_band_obs - elif band == 'F129': + elif band == "F129": self.obs = F129_band_obs - elif band == 'F158': + elif band == "F158": self.obs = F158_band_obs - elif band == 'F184': + elif band == "F184": self.obs = F184_band_obs - elif band == 'F146': + elif band == "F146": self.obs = F146_band_obs else: - raise ValueError("band %s not supported! Choose 'F062', 'F087', 'F106', 'F129', 'F158' , 'F184' or 'F146'" % band) + raise ValueError( + "band %s not supported! Choose 'F062', 'F087', 'F106', 'F129', 'F158' , 'F184' or 'F146'" + % band + ) - if survey_mode == 'wide_area': + if survey_mode == "wide_area": # the number of exposures is given per sector # a full pass of the High Latitude Wide Area Survey is 155 sectors - if band in ['F106', 'F158', 'F184', 'F062']: + if band in ["F106", "F158", "F184", "F062"]: exp_per_tile = 3 - elif band == 'F129': + elif band == "F129": exp_per_tile = 4 else: - raise ValueError("band %s is not supported with the wide_area survey mode! Choose 'F106', 'F062, 'F158', 'F184' or F129" % band) - - self.obs.update({'exposure_time': 146, 'num_exposures': 32*exp_per_tile}) - elif survey_mode == 'microlensing': - if band == 'F146': + raise ValueError( + "band %s is not supported with the wide_area survey mode! Choose 'F106', 'F062, 'F158', 'F184' or F129" + % band + ) + + self.obs.update({"exposure_time": 146, "num_exposures": 32 * exp_per_tile}) + elif survey_mode == "microlensing": + if band == "F146": # These are the exposure times and number of exposures for the primary filter, F146 - self.obs.update({'exposure_time': 46.8, 'num_exposures': 41000}) - elif band == 'F087': + self.obs.update({"exposure_time": 46.8, "num_exposures": 41000}) + elif band == "F087": # These are the exposure times and number of exposures for the secondary filter, F087 - self.obs.update({'exposure_time': 286., 'num_exposures': 860}) + self.obs.update({"exposure_time": 286.0, "num_exposures": 860}) else: - raise ValueError("band %s is not supported with the microlensing survey mode! Choose 'F146' or 'F087'" % band) + raise ValueError( + "band %s is not supported with the microlensing survey mode! Choose 'F146' or 'F087'" + % band + ) else: - raise ValueError("survey mode %s not supported! Choose 'wide_area' or 'microlensing'" % survey_mode) + raise ValueError( + "survey mode %s not supported! Choose 'wide_area' or 'microlensing'" + % survey_mode + ) - if psf_type == 'PIXEL': + if psf_type == "PIXEL": import lenstronomy + module_path = os.path.dirname(lenstronomy.__file__) - psf_filename = os.path.join(module_path, 'SimulationAPI/ObservationConfig/PSF_models/{}.fits'.format(band)) + psf_filename = os.path.join( + module_path, + "SimulationAPI/ObservationConfig/PSF_models/{}.fits".format(band), + ) kernel = pyfits.getdata(psf_filename) - self.obs.update({'psf_type': 'PIXEL', 'kernel_point_source': kernel}) - elif psf_type == 'GAUSSIAN': - self.obs.update({'psf_type': 'GAUSSIAN'}) + self.obs.update({"psf_type": "PIXEL", "kernel_point_source": kernel}) + elif psf_type == "GAUSSIAN": + self.obs.update({"psf_type": "GAUSSIAN"}) else: raise ValueError("psf_type %s not supported!" % psf_type) - self.camera = {'read_noise': 15.5, - 'pixel_scale': 0.11, - 'ccd_gain': 1} - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.camera = {"read_noise": 15.5, "pixel_scale": 0.11, "ccd_gain": 1} + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/ObservationConfig/ZTF.py b/lenstronomy/SimulationAPI/ObservationConfig/ZTF.py index 62e1daad8..1b893d3c0 100644 --- a/lenstronomy/SimulationAPI/ObservationConfig/ZTF.py +++ b/lenstronomy/SimulationAPI/ObservationConfig/ZTF.py @@ -2,8 +2,7 @@ import lenstronomy.Util.util as util -__all__ = ['ZTF'] - +__all__ = ["ZTF"] """ Sources https://iopscience.iop.org/article/10.1088/1538-3873/aaecbe/pdf @@ -26,27 +25,32 @@ - num_exposures """ -g_band_obs = {'exposure_time': 30., - 'sky_brightness': 22.01, - 'magnitude_zero_point': 26.325, - 'num_exposures': 40, - 'seeing': 2.1, - 'psf_type': 'GAUSSIAN'} - -r_band_obs = {'exposure_time': 30., - 'sky_brightness': 21.15, - 'magnitude_zero_point': 26.275, - 'num_exposures': 40, - 'seeing': 2.0, - 'psf_type': 'GAUSSIAN'} - -i_band_obs = {'exposure_time': 30., - 'sky_brightness': 19.89, - 'magnitude_zero_point': 25.660, - 'num_exposures': 40, - 'seeing': 2.1, - 'psf_type': 'GAUSSIAN'} - +g_band_obs = { + "exposure_time": 30.0, + "sky_brightness": 22.01, + "magnitude_zero_point": 26.325, + "num_exposures": 40, + "seeing": 2.1, + "psf_type": "GAUSSIAN", +} + +r_band_obs = { + "exposure_time": 30.0, + "sky_brightness": 21.15, + "magnitude_zero_point": 26.275, + "num_exposures": 40, + "seeing": 2.0, + "psf_type": "GAUSSIAN", +} + +i_band_obs = { + "exposure_time": 30.0, + "sky_brightness": 19.89, + "magnitude_zero_point": 25.660, + "num_exposures": 40, + "seeing": 2.1, + "psf_type": "GAUSSIAN", +} """ :keyword exposure_time: exposure time per image (in seconds) :keyword sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons) @@ -58,41 +62,38 @@ class ZTF(object): - """ - class contains ZTF instrument and observation configurations - """ - def __init__(self, band='g', psf_type='GAUSSIAN', coadd_years=3): + """Class contains ZTF instrument and observation configurations.""" + + def __init__(self, band="g", psf_type="GAUSSIAN", coadd_years=3): """ :param band: string, 'g', 'r', or 'i', supported. Determines obs dictionary. :param psf_type: string, type of PSF ('GAUSSIAN' supported). :param coadd_years: int, number of years corresponding to num_exposures in obs dict. Currently supported: 1-3. """ - if band == 'g': + if band == "g": self.obs = g_band_obs - elif band == 'r': + elif band == "r": self.obs = r_band_obs - elif band == 'i': + elif band == "i": self.obs = i_band_obs else: raise ValueError("band %s not supported! Choose 'g', 'r', or 'i'." % band) - if psf_type != 'GAUSSIAN': + if psf_type != "GAUSSIAN": raise ValueError("psf_type %s not supported!" % psf_type) if coadd_years > 3 or coadd_years < 1: - raise ValueError(" %s coadd_years not supported! Choose an integer between 1 and 3." % coadd_years) + raise ValueError( + " %s coadd_years not supported! Choose an integer between 1 and 3." + % coadd_years + ) elif coadd_years != 3: - self.obs['num_exposures'] = (coadd_years * 40) // 3 + self.obs["num_exposures"] = (coadd_years * 40) // 3 - self.camera = {'read_noise': 10.3, - 'pixel_scale': 1.01, - 'ccd_gain': 5.8 - } - """ - :keyword read_noise: std of noise generated by read-out (in units of electrons) - :keyword pixel_scale: scale (in arcseconds) of pixels - :keyword ccd_gain: electrons/ADU (analog-to-digital unit). - """ + self.camera = {"read_noise": 10.3, "pixel_scale": 1.01, "ccd_gain": 5.8} + """:keyword read_noise: std of noise generated by read-out (in units of + electrons) :keyword pixel_scale: scale (in arcseconds) of pixels :keyword + ccd_gain: electrons/ADU (analog-to-digital unit).""" def kwargs_single_band(self): """ diff --git a/lenstronomy/SimulationAPI/data_api.py b/lenstronomy/SimulationAPI/data_api.py index 1c56dd389..fab429fe2 100644 --- a/lenstronomy/SimulationAPI/data_api.py +++ b/lenstronomy/SimulationAPI/data_api.py @@ -3,17 +3,19 @@ import lenstronomy.Util.util as util import numpy as np -__all__ = ['DataAPI'] +__all__ = ["DataAPI"] class DataAPI(SingleBand): - """ - This class is a wrapper of the general description of data in SingleBand() to translate those quantities into - configurations in the core lenstronomy Data modules to simulate images according to those quantities. - This class is meant to be an example of a wrapper. More possibilities in terms of PSF and data type - options are available. Have a look in the specific modules if you are interested in. + """This class is a wrapper of the general description of data in SingleBand() to + translate those quantities into configurations in the core lenstronomy Data modules + to simulate images according to those quantities. + This class is meant to be an example of a wrapper. More possibilities in terms of + PSF and data type options are available. Have a look in the specific modules if you + are interested in. """ + def __init__(self, numpix, kwargs_pixel_grid=None, **kwargs_single_band): """ @@ -24,16 +26,18 @@ def __init__(self, numpix, kwargs_pixel_grid=None, **kwargs_single_band): """ self.numpix = numpix if kwargs_pixel_grid is not None: - required_keys = ['ra_at_xy_0', 'dec_at_xy_0', 'transform_pix2angle'] + required_keys = ["ra_at_xy_0", "dec_at_xy_0", "transform_pix2angle"] if not all(k in kwargs_pixel_grid for k in required_keys): - raise ValueError('Missing 1 or more required' + 'kwargs_pixel_grid parameters') + raise ValueError( + "Missing 1 or more required" + "kwargs_pixel_grid parameters" + ) self._kwargs_pixel_grid = kwargs_pixel_grid SingleBand.__init__(self, **kwargs_single_band) @property def data_class(self): - """ - creates a Data() instance of lenstronomy based on knowledge of the observation + """Creates a Data() instance of lenstronomy based on knowledge of the + observation. :return: instance of Data() class """ @@ -48,20 +52,35 @@ def kwargs_data(self): """ # default pixel grid if self._kwargs_pixel_grid is None: - _, _, ra_at_xy_0, dec_at_xy_0, _, _, transform_pix2angle, _ = util.make_grid_with_coordtransform( - numPix=self.numpix, deltapix=self.pixel_scale, subgrid_res=1, - left_lower=False, inverse=False) + ( + _, + _, + ra_at_xy_0, + dec_at_xy_0, + _, + _, + transform_pix2angle, + _, + ) = util.make_grid_with_coordtransform( + numPix=self.numpix, + deltapix=self.pixel_scale, + subgrid_res=1, + left_lower=False, + inverse=False, + ) # user defined pixel grid else: - ra_at_xy_0 = self._kwargs_pixel_grid['ra_at_xy_0'] - dec_at_xy_0 = self._kwargs_pixel_grid['dec_at_xy_0'] - transform_pix2angle = self._kwargs_pixel_grid['transform_pix2angle'] + ra_at_xy_0 = self._kwargs_pixel_grid["ra_at_xy_0"] + dec_at_xy_0 = self._kwargs_pixel_grid["dec_at_xy_0"] + transform_pix2angle = self._kwargs_pixel_grid["transform_pix2angle"] # CCD gain corrected exposure time to allow a direct Poisson estimates based on IID counts scaled_exposure_time = self.flux_iid(1) - kwargs_data = {'image_data': np.zeros((self.numpix, self.numpix)), - 'ra_at_xy_0': ra_at_xy_0, - 'dec_at_xy_0': dec_at_xy_0, - 'transform_pix2angle': transform_pix2angle, - 'background_rms': self.background_noise, - 'exposure_time': scaled_exposure_time} + kwargs_data = { + "image_data": np.zeros((self.numpix, self.numpix)), + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + "transform_pix2angle": transform_pix2angle, + "background_rms": self.background_noise, + "exposure_time": scaled_exposure_time, + } return kwargs_data diff --git a/lenstronomy/SimulationAPI/model_api.py b/lenstronomy/SimulationAPI/model_api.py index 3dbbcc184..2b810ccce 100644 --- a/lenstronomy/SimulationAPI/model_api.py +++ b/lenstronomy/SimulationAPI/model_api.py @@ -6,44 +6,64 @@ import copy -__all__ = ['ModelAPI'] +__all__ = ["ModelAPI"] class ModelAPI(object): + """This class manages the model choices. + + The role is to return instances of the lenstronomy LightModel, LensModel, + PointSource modules according to the options chosen by the user. Currently, all + other model choices are equivalent to the ones provided by LightModel, LensModel, + PointSource. The current options of the class instance only describe a subset of + possibilities. """ - This class manages the model choices. The role is to return instances of the lenstronomy LightModel, LensModel, - PointSource modules according to the options chosen by the user. - Currently, all other model choices are equivalent to the ones provided by LightModel, LensModel, PointSource. - The current options of the class instance only describe a subset of possibilities. - """ - def __init__(self, lens_model_list=None, z_lens=None, z_source=None, lens_redshift_list=None, - source_light_model_list=None, lens_light_model_list=None, point_source_model_list=None, - source_redshift_list=None, cosmo=None, z_source_convention=None, tabulated_deflection_angles=None, - observed_convention_index=None): - """ - # TODO: make inputs follow the kwargs_model of the class_creator instances of 'kwargs_model', - # i.e. multi-plane options, perhaps others + + def __init__( + self, + lens_model_list=None, + z_lens=None, + z_source=None, + lens_redshift_list=None, + source_light_model_list=None, + lens_light_model_list=None, + point_source_model_list=None, + source_redshift_list=None, + cosmo=None, + z_source_convention=None, + tabulated_deflection_angles=None, + observed_convention_index=None, + ): + """# TODO: make inputs follow the kwargs_model of the class_creator instances of + 'kwargs_model', # i.e. multi-plane options, perhaps others :param lens_model_list: list of strings with lens model names - :param z_lens: redshift of the deflector (only considered when operating in single plane mode). - Is only needed for specific functions that require a cosmology. - :param z_source: redshift of the source: Needed in multi_plane option only, - not required for the core functionalities in the single plane mode. This will be the redshift of the source - plane (if not further specified the 'source_redshift_list') and the point source redshift - (regardless of 'source_redshift_list') - :param lens_redshift_list: list of deflector redshift (corresponding to the lens model list), - only applicable in multi_plane mode. - :param source_light_model_list: list of strings with source light model names (lensed light profiles) - :param lens_light_model_list: list of strings with lens light model names (not lensed light profiles) + :param z_lens: redshift of the deflector (only considered when operating in + single plane mode). Is only needed for specific functions that require a + cosmology. + :param z_source: redshift of the source: Needed in multi_plane option only, not + required for the core functionalities in the single plane mode. This will be + the redshift of the source plane (if not further specified the + 'source_redshift_list') and the point source redshift (regardless of + 'source_redshift_list') + :param lens_redshift_list: list of deflector redshift (corresponding to the lens + model list), only applicable in multi_plane mode. + :param source_light_model_list: list of strings with source light model names + (lensed light profiles) + :param lens_light_model_list: list of strings with lens light model names (not + lensed light profiles) :param point_source_model_list: list of strings with point source model names :param source_redshift_list: list of redshifts of the source profiles (optional) - :param cosmo: instance of the astropy cosmology class. If not specified, uses the default cosmology. - :param z_source_convention: float, redshift of a source to define the reduced deflection angles of the lens - models. If None, 'z_source' is used. - :param tabulated_deflection_angles: a class that returns deflection angles given a set of (x, y) coordinates. - Effectively a fixed lens model. See documentation in Profiles.numerical_alpha - :param observed_convention_index: a list of indices that correspond to lens models where the center_x,center_y - values correspond to the observed (lensed positions), not the physical positions in space + :param cosmo: instance of the astropy cosmology class. If not specified, uses + the default cosmology. + :param z_source_convention: float, redshift of a source to define the reduced + deflection angles of the lens models. If None, 'z_source' is used. + :param tabulated_deflection_angles: a class that returns deflection angles given + a set of (x, y) coordinates. Effectively a fixed lens model. See + documentation in Profiles.numerical_alpha + :param observed_convention_index: a list of indices that correspond to lens + models where the center_x,center_y values correspond to the observed (lensed + positions), not the physical positions in space """ if lens_model_list is None: lens_model_list = [] @@ -65,21 +85,33 @@ def __init__(self, lens_model_list=None, z_lens=None, z_source=None, lens_redshi if z_source_convention is None: z_source_convention = z_source - self._lens_model_class = LensModel(lens_model_list=lens_model_list, z_source=z_source, z_lens=z_lens, - lens_redshift_list=lens_redshift_list, multi_plane=multi_plane, cosmo=cosmo, - z_source_convention=z_source_convention, - numerical_alpha_class=tabulated_deflection_angles, - observed_convention_index=observed_convention_index) - self._source_model_class = LightModel(light_model_list=source_light_model_list, - source_redshift_list=source_redshift_list) - self._lens_light_model_class = LightModel(light_model_list=lens_light_model_list) + self._lens_model_class = LensModel( + lens_model_list=lens_model_list, + z_source=z_source, + z_lens=z_lens, + lens_redshift_list=lens_redshift_list, + multi_plane=multi_plane, + cosmo=cosmo, + z_source_convention=z_source_convention, + numerical_alpha_class=tabulated_deflection_angles, + observed_convention_index=observed_convention_index, + ) + self._source_model_class = LightModel( + light_model_list=source_light_model_list, + source_redshift_list=source_redshift_list, + ) + self._lens_light_model_class = LightModel( + light_model_list=lens_light_model_list + ) fixed_magnification = [False] * len(point_source_model_list) for i, ps_type in enumerate(point_source_model_list): - if ps_type == 'SOURCE_POSITION': + if ps_type == "SOURCE_POSITION": fixed_magnification[i] = True - self._point_source_model_class = PointSource(point_source_type_list=point_source_model_list, - lensModel=self._lens_model_class, - fixed_magnification_list=fixed_magnification) + self._point_source_model_class = PointSource( + point_source_type_list=point_source_model_list, + lensModel=self._lens_model_class, + fixed_magnification_list=fixed_magnification, + ) self._cosmo = cosmo self._z_source_convention = z_source_convention self._lens_redshift_list = lens_redshift_list @@ -134,17 +166,17 @@ def physical2lensing_conversion(self, kwargs_mass): z_lens = self._lens_redshift_list[i] lens_cosmo = LensCosmo(z_lens, self._z_source_convention, cosmo=self._cosmo) - if 'sigma_v' in kwargs_mass_i: - sigma_v = kwargs_mass_i['sigma_v'] + if "sigma_v" in kwargs_mass_i: + sigma_v = kwargs_mass_i["sigma_v"] theta_E = lens_cosmo.sis_sigma_v2theta_E(sigma_v) - kwargs_lens[i]['theta_E'] = theta_E - del kwargs_lens[i]['sigma_v'] - elif 'M200' in kwargs_mass_i: - M200 = kwargs_mass_i['M200'] - c = kwargs_mass_i['concentration'] + kwargs_lens[i]["theta_E"] = theta_E + del kwargs_lens[i]["sigma_v"] + elif "M200" in kwargs_mass_i: + M200 = kwargs_mass_i["M200"] + c = kwargs_mass_i["concentration"] Rs, alpha_RS = lens_cosmo.nfw_physical2angle(M200, c) - kwargs_lens[i]['Rs'] = Rs - kwargs_lens[i]['alpha_Rs'] = alpha_RS - del kwargs_lens[i]['M200'] - del kwargs_lens[i]['concentration'] + kwargs_lens[i]["Rs"] = Rs + kwargs_lens[i]["alpha_Rs"] = alpha_RS + del kwargs_lens[i]["M200"] + del kwargs_lens[i]["concentration"] return kwargs_lens diff --git a/lenstronomy/SimulationAPI/observation_api.py b/lenstronomy/SimulationAPI/observation_api.py index 690f1a850..09ae23935 100644 --- a/lenstronomy/SimulationAPI/observation_api.py +++ b/lenstronomy/SimulationAPI/observation_api.py @@ -4,14 +4,14 @@ from lenstronomy.Data.psf import PSF from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class Instrument(object): - """ - basic access points to instrument properties - """ + """Basic access points to instrument properties.""" + def __init__(self, pixel_scale, read_noise=None, ccd_gain=None): """ @@ -30,11 +30,19 @@ def __init__(self, pixel_scale, read_noise=None, ccd_gain=None): @export class Observation(object): - """ - basic access point to observation properties - """ - def __init__(self, exposure_time, sky_brightness=None, seeing=None, num_exposures=1, - psf_type='GAUSSIAN', kernel_point_source=None, truncation=5, point_source_supersampling_factor=1): + """Basic access point to observation properties.""" + + def __init__( + self, + exposure_time, + sky_brightness=None, + seeing=None, + num_exposures=1, + psf_type="GAUSSIAN", + kernel_point_source=None, + truncation=5, + point_source_supersampling_factor=1, + ): """ :param exposure_time: exposure time per image (in seconds) @@ -56,18 +64,25 @@ def __init__(self, exposure_time, sky_brightness=None, seeing=None, num_exposure self._kernel_point_source = kernel_point_source self._point_source_supersampling_factor = point_source_supersampling_factor - def update_observation(self, exposure_time=None, sky_brightness=None, seeing=None, num_exposures=None, - psf_type=None, kernel_point_source=None): - """ - updates class instance with new properties if specific argument is not None + def update_observation( + self, + exposure_time=None, + sky_brightness=None, + seeing=None, + num_exposures=None, + psf_type=None, + kernel_point_source=None, + ): + """Updates class instance with new properties if specific argument is not None. :param exposure_time: exposure time per image (in seconds) :param sky_brightness: sky brightness (in magnitude per square arcseconds) - :param seeing: full width at half maximum of the PSF (if not specific psf_model is specified) + :param seeing: full width at half maximum of the PSF (if not specific psf_model + is specified) :param num_exposures: number of exposures that are combined :param psf_type: string, type of PSF ('GAUSSIAN' and 'PIXEL' supported) - :param kernel_point_source: 2d numpy array, model of PSF centered with odd number of pixels per axis - (optional when psf_type='PIXEL' is chosen) + :param kernel_point_source: 2d numpy array, model of PSF centered with odd + number of pixels per axis (optional when psf_type='PIXEL' is chosen) :return: None, updated class instance """ if exposure_time is not None: @@ -86,13 +101,12 @@ def update_observation(self, exposure_time=None, sky_brightness=None, seeing=Non @property def _sky_brightness(self): if self._sky_brightness_ is None: - raise ValueError('sky_brightness is not set in the class instance!') + raise ValueError("sky_brightness is not set in the class instance!") return self._sky_brightness_ @property def exposure_time(self): - """ - total exposure time + """Total exposure time. :return: summed exposure time """ @@ -100,33 +114,37 @@ def exposure_time(self): @property def kwargs_psf(self): - """ - keyword arguments to initiate a PSF() class + """Keyword arguments to initiate a PSF() class. :return: kwargs_psf """ - if self._psf_type == 'GAUSSIAN': + if self._psf_type == "GAUSSIAN": psf_type = "GAUSSIAN" fwhm = self._seeing truncation = self._truncation - kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm, 'truncation': truncation} - elif self._psf_type == 'PIXEL': + kwargs_psf = {"psf_type": psf_type, "fwhm": fwhm, "truncation": truncation} + elif self._psf_type == "PIXEL": if self._kernel_point_source is not None: - kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': self._kernel_point_source, - 'point_source_supersampling_factor': self._point_source_supersampling_factor} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": self._kernel_point_source, + "point_source_supersampling_factor": self._point_source_supersampling_factor, + } else: - raise ValueError("You need to create the class instance with a psf_model!") - elif self._psf_type == 'NONE': - kwargs_psf = {'psf_type': "NONE"} + raise ValueError( + "You need to create the class instance with a psf_model!" + ) + elif self._psf_type == "NONE": + kwargs_psf = {"psf_type": "NONE"} else: raise ValueError("psf_type %s not supported!" % self._psf_type) return kwargs_psf @property def psf_class(self): - """ - creates instance of PSF() class based on knowledge of the observations - For the full possibility of how to create such an instance, see the PSF() class documentation + """Creates instance of PSF() class based on knowledge of the observations For + the full possibility of how to create such an instance, see the PSF() class + documentation. :return: instance of PSF() class """ @@ -136,12 +154,25 @@ def psf_class(self): @export class SingleBand(Instrument, Observation): - """ - class that combines Instrument and Observation - """ - def __init__(self, pixel_scale, exposure_time, magnitude_zero_point, read_noise=None, ccd_gain=None, - sky_brightness=None, seeing=None, num_exposures=1, psf_type='GAUSSIAN', kernel_point_source=None, - truncation=5, point_source_supersampling_factor=1, data_count_unit='e-', background_noise=None): + """Class that combines Instrument and Observation.""" + + def __init__( + self, + pixel_scale, + exposure_time, + magnitude_zero_point, + read_noise=None, + ccd_gain=None, + sky_brightness=None, + seeing=None, + num_exposures=1, + psf_type="GAUSSIAN", + kernel_point_source=None, + truncation=5, + point_source_supersampling_factor=1, + data_count_unit="e-", + background_noise=None, + ): """ :param read_noise: std of noise generated by read-out (in units of electrons) @@ -163,14 +194,25 @@ def __init__(self, pixel_scale, exposure_time, magnitude_zero_point, read_noise= sky brightness etc. in units of the data_count_units (e- or ADU) If you set this parameter, it will use this value regardless of the values of read_noise, sky_brightness """ - Instrument.__init__(self, pixel_scale, read_noise, ccd_gain) # read_noise and ccd_gain can be None - Observation.__init__(self, exposure_time=exposure_time, sky_brightness=sky_brightness, - seeing=seeing, num_exposures=num_exposures, - psf_type=psf_type, kernel_point_source=kernel_point_source, - point_source_supersampling_factor=point_source_supersampling_factor, - truncation=truncation) - if data_count_unit not in ['e-', 'ADU']: - raise ValueError("count_unit type %s not supported! Please choose e- or ADU." % data_count_unit) + Instrument.__init__( + self, pixel_scale, read_noise, ccd_gain + ) # read_noise and ccd_gain can be None + Observation.__init__( + self, + exposure_time=exposure_time, + sky_brightness=sky_brightness, + seeing=seeing, + num_exposures=num_exposures, + psf_type=psf_type, + kernel_point_source=kernel_point_source, + point_source_supersampling_factor=point_source_supersampling_factor, + truncation=truncation, + ) + if data_count_unit not in ["e-", "ADU"]: + raise ValueError( + "count_unit type %s not supported! Please choose e- or ADU." + % data_count_unit + ) self._data_count_unit = data_count_unit self._background_noise = background_noise self._magnitude_zero_point = magnitude_zero_point @@ -182,7 +224,7 @@ def sky_brightness(self): :return: sky brightness (counts per square arcseconds in unit of data (e- or ADU's) per unit time) """ cps = self._sky_brightness_cps - if self._data_count_unit == 'ADU': + if self._data_count_unit == "ADU": cps /= self.ccd_gain return cps @@ -192,28 +234,38 @@ def _sky_brightness_cps(self): :return: sky brightness in electrons per second """ - cps = data_util.magnitude2cps(self._sky_brightness, magnitude_zero_point=self._magnitude_zero_point) + cps = data_util.magnitude2cps( + self._sky_brightness, magnitude_zero_point=self._magnitude_zero_point + ) return cps @property def background_noise(self): - """ - Gaussian sigma of noise level per pixel in counts (e- or ADU) per second + """Gaussian sigma of noise level per pixel in counts (e- or ADU) per second. :return: sqrt(variance) of background noise level in data units """ if self._background_noise is None: if self._read_noise is None: - raise ValueError('read_noise is not specified to evaluate background noise!') - bkg_noise = data_util.bkg_noise(self._read_noise, self._exposure_time, self._sky_brightness_cps, - self.pixel_scale, num_exposures=self._num_exposures) - if self._data_count_unit == 'ADU': + raise ValueError( + "read_noise is not specified to evaluate background noise!" + ) + bkg_noise = data_util.bkg_noise( + self._read_noise, + self._exposure_time, + self._sky_brightness_cps, + self.pixel_scale, + num_exposures=self._num_exposures, + ) + if self._data_count_unit == "ADU": bkg_noise /= self.ccd_gain return bkg_noise else: if self._read_noise is not None: - warnings.warn('read noise is specified but not used for noise properties. Background noise is estimated' - ' from "background_noise" argument.') + warnings.warn( + "read noise is specified but not used for noise properties. Background noise is estimated" + ' from "background_noise" argument.' + ) return self._background_noise def flux_noise(self, flux): @@ -223,31 +275,37 @@ def flux_noise(self, flux): :return: Gaussian approximation of Poisson statistics in IIDs sqrt(variance) """ flux_iid = self.flux_iid(flux) - variance = flux_iid # the variance of a Poisson distribution is the IID count number + variance = ( + flux_iid # the variance of a Poisson distribution is the IID count number + ) if isinstance(variance, int) or isinstance(variance, float): variance = max(variance, 0) else: - variance[flux_iid < 0] = 0 # make sure negative pixels do not lead to variances (or nans) in the return + variance[ + flux_iid < 0 + ] = 0 # make sure negative pixels do not lead to variances (or nans) in the return noise = np.sqrt(variance) / self.exposure_time - if self._data_count_unit == 'ADU': + if self._data_count_unit == "ADU": noise /= self.ccd_gain return noise def flux_iid(self, flux_per_second): - """ - IID counts. This can be used by lenstronomy to estimate the Poisson errors + """IID counts. This can be used by lenstronomy to estimate the Poisson errors keeping the assumption that the counts are IIDs (even if they are not). - :param flux_per_second: flux count per second in the units set in this class (ADU or e-) + :param flux_per_second: flux count per second in the units set in this class + (ADU or e-) :return: IID count number """ - if self._data_count_unit == 'ADU': + if self._data_count_unit == "ADU": exp_time = self.ccd_gain * self.exposure_time else: exp_time = self.exposure_time return exp_time * flux_per_second - def noise_for_model(self, model, background_noise=True, poisson_noise=True, seed=None): + def noise_for_model( + self, model, background_noise=True, poisson_noise=True, seed=None + ): """ :param model: 2d numpy array of modelled image (with pixels in units of data specified in class) @@ -275,11 +333,10 @@ def estimate_noise(self, image): :param image: noisy data, background subtracted :return: estimated noise map sqrt(variance) for each pixel as estimated from the instrument and observation """ - return np.sqrt(self.background_noise**2 + self.flux_noise(image)**2) + return np.sqrt(self.background_noise**2 + self.flux_noise(image) ** 2) def magnitude2cps(self, magnitude): - """ - converts an apparent magnitude to counts per second (in units of the data) + """Converts an apparent magnitude to counts per second (in units of the data) The zero point of an instrument, by definition, is the magnitude of an object that produces one count (or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of @@ -290,7 +347,9 @@ def magnitude2cps(self, magnitude): :return: counts per second of object """ # compute counts in units of e- or ADS (depending on data and magnitude zero point defined) - cps = data_util.magnitude2cps(magnitude, magnitude_zero_point=self._magnitude_zero_point) - if self._data_count_unit == 'ADU': + cps = data_util.magnitude2cps( + magnitude, magnitude_zero_point=self._magnitude_zero_point + ) + if self._data_count_unit == "ADU": cps /= self.ccd_gain return cps diff --git a/lenstronomy/SimulationAPI/observation_constructor.py b/lenstronomy/SimulationAPI/observation_constructor.py index 7dca0a33d..2e137a297 100644 --- a/lenstronomy/SimulationAPI/observation_constructor.py +++ b/lenstronomy/SimulationAPI/observation_constructor.py @@ -1,10 +1,10 @@ import lenstronomy.Util.util as util -instrument_name_list = ['LSST'] -observation_name_list = ['LSST_g_band', 'LSST_r_band', 'LSST_i_band'] +instrument_name_list = ["LSST"] +observation_name_list = ["LSST_g_band", "LSST_r_band", "LSST_i_band"] -__all__ = ['observation_constructor'] +__all__ = ["observation_constructor"] def observation_constructor(instrument_name, observation_name): @@ -15,45 +15,54 @@ def observation_constructor(instrument_name, observation_name): :return: instance of the SimulationAPI.data_type instance """ - if instrument_name == 'LSST': + if instrument_name == "LSST": kwargs_instrument = LSST_camera else: - raise ValueError("instrument name %s not supported! Choose among %s" % (instrument_name, instrument_name_list)) + raise ValueError( + "instrument name %s not supported! Choose among %s" + % (instrument_name, instrument_name_list) + ) - if observation_name == 'LSST_g_band': + if observation_name == "LSST_g_band": kwargs_observation = LSST_g_band_obs - elif observation_name == 'LSST_r_band': + elif observation_name == "LSST_r_band": kwargs_observation = LSST_r_band_obs - elif observation_name == 'LSST_i_band': + elif observation_name == "LSST_i_band": kwargs_observation = LSST_i_band_obs else: - raise ValueError('observation name %s not supported! Choose among %s' % - (observation_name, observation_name_list)) + raise ValueError( + "observation name %s not supported! Choose among %s" + % (observation_name, observation_name_list) + ) kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observation) return kwargs_data -LSST_camera = {'read_noise': 10, - 'pixel_scale': 0.263, - 'ccd_gain': 4.5} - -LSST_g_band_obs = {'exposure_time': 90., - 'sky_brightness': 21.7, - 'magnitude_zero_point': 30, - 'num_exposures': 10, - 'seeing': 0.9, - 'psf_type': 'GAUSSIAN'} - -LSST_r_band_obs = {'exposure_time': 90., - 'sky_brightness': 20.7, - 'magnitude_zero_point': 30, - 'num_exposures': 10, - 'seeing': 0.9, - 'psf_type': 'GAUSSIAN'} - -LSST_i_band_obs = {'exposure_time': 90., - 'sky_brightness': 20.1, - 'magnitude_zero_point': 30, - 'num_exposures': 10, - 'seeing': 0.9, - 'psf_type': 'GAUSSIAN'} +LSST_camera = {"read_noise": 10, "pixel_scale": 0.263, "ccd_gain": 4.5} + +LSST_g_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 21.7, + "magnitude_zero_point": 30, + "num_exposures": 10, + "seeing": 0.9, + "psf_type": "GAUSSIAN", +} + +LSST_r_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 20.7, + "magnitude_zero_point": 30, + "num_exposures": 10, + "seeing": 0.9, + "psf_type": "GAUSSIAN", +} + +LSST_i_band_obs = { + "exposure_time": 90.0, + "sky_brightness": 20.1, + "magnitude_zero_point": 30, + "num_exposures": 10, + "seeing": 0.9, + "psf_type": "GAUSSIAN", +} diff --git a/lenstronomy/SimulationAPI/point_source_variability.py b/lenstronomy/SimulationAPI/point_source_variability.py index 34ba2208c..3b3a844b7 100644 --- a/lenstronomy/SimulationAPI/point_source_variability.py +++ b/lenstronomy/SimulationAPI/point_source_variability.py @@ -3,21 +3,34 @@ import numpy as np -__all__ = ['PointSourceVariability'] +__all__ = ["PointSourceVariability"] class PointSourceVariability(object): + """This class enables to plug in a variable point source in the source plane to be + added on top of a fixed lens and extended surface brightness model. The class + inherits SimAPI and additionally requires the lens and light model parameters as + well as a position in the source plane. + + The intrinsic source variability can be defined by the user and additional + uncorrelated variability in the image plane can be plugged in as well (e.g. due to + micro-lensing) """ - This class enables to plug in a variable point source in the source plane to be added on top of a fixed lens and - extended surface brightness model. The class inherits SimAPI and additionally requires the lens and light model - parameters as well as a position in the source plane. - The intrinsic source variability can be defined by the user and additional uncorrelated variability in the image - plane can be plugged in as well (e.g. due to micro-lensing) - """ - - def __init__(self, source_x, source_y, variability_func, numpix, kwargs_single_band, kwargs_model, kwargs_numerics, - kwargs_lens, kwargs_source_mag=None, kwargs_lens_light_mag=None, kwargs_ps_mag=None): + def __init__( + self, + source_x, + source_y, + variability_func, + numpix, + kwargs_single_band, + kwargs_model, + kwargs_numerics, + kwargs_lens, + kwargs_source_mag=None, + kwargs_lens_light_mag=None, + kwargs_ps_mag=None, + ): """ :param source_x: RA of source position @@ -36,25 +49,37 @@ def __init__(self, source_x, source_y, variability_func, numpix, kwargs_single_b # create background SimAPI class instance sim_api_bkg = SimAPI(numpix, kwargs_single_band, kwargs_model) image_model_bkg = sim_api_bkg.image_model_class(kwargs_numerics) - kwargs_lens_light, kwargs_source, kwargs_ps = sim_api_bkg.magnitude2amplitude(kwargs_lens_light_mag, - kwargs_source_mag, - kwargs_ps_mag) - self._image_bkg = image_model_bkg.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps) + kwargs_lens_light, kwargs_source, kwargs_ps = sim_api_bkg.magnitude2amplitude( + kwargs_lens_light_mag, kwargs_source_mag, kwargs_ps_mag + ) + self._image_bkg = image_model_bkg.image( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) # compute image positions of point source x_center, y_center = sim_api_bkg.data_class.center search_window = np.max(sim_api_bkg.data_class.width) lensModel = image_model_bkg.LensModel solver = LensEquationSolver(lensModel=lensModel) - image_x, image_y = solver.image_position_from_source(source_x, source_y, kwargs_lens, min_distance=0.1, - search_window=search_window, precision_limit=10**(-10), - num_iter_max=100, arrival_time_sort=True, - x_center=x_center, y_center=y_center) + image_x, image_y = solver.image_position_from_source( + source_x, + source_y, + kwargs_lens, + min_distance=0.1, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=100, + arrival_time_sort=True, + x_center=x_center, + y_center=y_center, + ) mag = lensModel.magnification(image_x, image_y, kwargs_lens) dt_days = lensModel.arrival_time(image_x, image_y, kwargs_lens) - dt_days -= np.min(dt_days) # shift the arrival times such that the first image arrives at t=0 and the other + dt_days -= np.min( + dt_days + ) # shift the arrival times such that the first image arrives at t=0 and the other # times at t>=0 # add image plane source model - kwargs_model_ps = {'point_source_model_list': ['LENSED_POSITION']} + kwargs_model_ps = {"point_source_model_list": ["LENSED_POSITION"]} self.sim_api_ps = SimAPI(numpix, kwargs_single_band, kwargs_model_ps) self._image_model_ps = self.sim_api_ps.image_model_class(kwargs_numerics) self._kwargs_lens = kwargs_lens @@ -90,7 +115,9 @@ def image_time(self, time=0): :return: image with time variable source at given time """ kwargs_ps_time = self.point_source_time(time) - point_source = self._image_model_ps.point_source(kwargs_ps_time, kwargs_lens=self._kwargs_lens) + point_source = self._image_model_ps.point_source( + kwargs_ps_time, kwargs_lens=self._kwargs_lens + ) return point_source + self.image_bkg def point_source_time(self, t): @@ -100,9 +127,11 @@ def point_source_time(self, t): :return: image plane parameters of the point source observed at t """ mag = np.zeros_like(self._dt_days) - kwargs_ps = [{'ra_image': self._image_x, 'dec_image': self._image_y}] + kwargs_ps = [{"ra_image": self._image_x, "dec_image": self._image_y}] for i, dt in enumerate(self._dt_days): t_i = -dt + t mag[i] = self._variability_func(t_i) - kwargs_ps[0]['point_amp'] = self.sim_api_ps.magnitude2cps(mag) * np.abs(self._mag) + kwargs_ps[0]["point_amp"] = self.sim_api_ps.magnitude2cps(mag) * np.abs( + self._mag + ) return kwargs_ps diff --git a/lenstronomy/SimulationAPI/sim_api.py b/lenstronomy/SimulationAPI/sim_api.py index 67c33ae57..d43f2dde0 100644 --- a/lenstronomy/SimulationAPI/sim_api.py +++ b/lenstronomy/SimulationAPI/sim_api.py @@ -5,24 +5,27 @@ import copy import numpy as np -__all__ = ['SimAPI'] +__all__ = ["SimAPI"] class SimAPI(DataAPI, ModelAPI): + """This class manages the model parameters in regard of the data specified in + SingleBand. + + In particular, this API translates models specified in units of astronomical + magnitudes into the amplitude parameters used in the LightModel module of + lenstronomy. Optionally, this class can also handle inputs with cosmology dependent + lensing quantities and translates them to the optical quantities being used in the + lenstronomy LensModel module. All other model choices are equivalent to the ones + provided by LightModel, LensModel, PointSource modules """ - This class manages the model parameters in regard of the data specified in SingleBand. In particular, - this API translates models specified in units of astronomical magnitudes into the amplitude parameters used in the - LightModel module of lenstronomy. - Optionally, this class can also handle inputs with cosmology dependent lensing quantities and translates them to - the optical quantities being used in the lenstronomy LensModel module. - All other model choices are equivalent to the ones provided by LightModel, LensModel, PointSource modules - """ + def __init__(self, numpix, kwargs_single_band, kwargs_model): """ - + :param numpix: number of pixels per axis - :param kwargs_single_band: keyword arguments specifying the class instance of DataAPI - :param kwargs_model: keyword arguments specifying the class instance of ModelAPI + :param kwargs_single_band: keyword arguments specifying the class instance of DataAPI + :param kwargs_model: keyword arguments specifying the class instance of ModelAPI """ DataAPI.__init__(self, numpix, **kwargs_single_band) ModelAPI.__init__(self, **kwargs_model) @@ -33,55 +36,71 @@ def image_model_class(self, kwargs_numerics=None): :param kwargs_numerics: keyword arguments list of Numerics module :return: instance of the ImageModel class with all the specified configurations """ - return ImageModel(self.data_class, self.psf_class, self.lens_model_class, self.source_model_class, - self.lens_light_model_class, self.point_source_model_class, kwargs_numerics=kwargs_numerics) + return ImageModel( + self.data_class, + self.psf_class, + self.lens_model_class, + self.source_model_class, + self.lens_light_model_class, + self.point_source_model_class, + kwargs_numerics=kwargs_numerics, + ) - def magnitude2amplitude(self, kwargs_lens_light_mag=None, kwargs_source_mag=None, kwargs_ps_mag=None): - """ - 'magnitude' definition are in APPARENT magnitudes as observed on the sky, not intrinsic! + def magnitude2amplitude( + self, kwargs_lens_light_mag=None, kwargs_source_mag=None, kwargs_ps_mag=None + ): + """'magnitude' definition are in APPARENT magnitudes as observed on the sky, not + intrinsic! - :param kwargs_lens_light_mag: keyword argument list as for LightModel module except that 'amp' parameters are - 'magnitude' parameters. - :param kwargs_source_mag: keyword argument list as for LightModel module except that 'amp' parameters are - 'magnitude' parameters. - :param kwargs_ps_mag: keyword argument list as for PointSource module except that 'amp' parameters are - 'magnitude' parameters. - :return: value of the lenstronomy 'amp' parameter such that the total flux of the profile type results in this - magnitude for all the light models. These keyword arguments conform with the lenstronomy LightModel syntax. + :param kwargs_lens_light_mag: keyword argument list as for LightModel module + except that 'amp' parameters are 'magnitude' parameters. + :param kwargs_source_mag: keyword argument list as for LightModel module except + that 'amp' parameters are 'magnitude' parameters. + :param kwargs_ps_mag: keyword argument list as for PointSource module except + that 'amp' parameters are 'magnitude' parameters. + :return: value of the lenstronomy 'amp' parameter such that the total flux of + the profile type results in this magnitude for all the light models. These + keyword arguments conform with the lenstronomy LightModel syntax. """ kwargs_lens_light = copy.deepcopy(kwargs_lens_light_mag) if kwargs_lens_light_mag is not None: for i, kwargs_mag in enumerate(kwargs_lens_light_mag): kwargs_new = kwargs_lens_light[i] - del kwargs_new['magnitude'] - cps_norm = self.lens_light_model_class.total_flux(kwargs_list=kwargs_lens_light, norm=True, k=i)[0] - magnitude = kwargs_mag['magnitude'] + del kwargs_new["magnitude"] + cps_norm = self.lens_light_model_class.total_flux( + kwargs_list=kwargs_lens_light, norm=True, k=i + )[0] + magnitude = kwargs_mag["magnitude"] cps = self.magnitude2cps(magnitude) amp = cps / cps_norm - kwargs_new['amp'] = amp + kwargs_new["amp"] = amp kwargs_source = copy.deepcopy(kwargs_source_mag) if kwargs_source_mag is not None: for i, kwargs_mag in enumerate(kwargs_source_mag): kwargs_new = kwargs_source[i] - del kwargs_new['magnitude'] - cps_norm = self.source_model_class.total_flux(kwargs_list=kwargs_source, norm=True, k=i)[0] - magnitude = kwargs_mag['magnitude'] + del kwargs_new["magnitude"] + cps_norm = self.source_model_class.total_flux( + kwargs_list=kwargs_source, norm=True, k=i + )[0] + magnitude = kwargs_mag["magnitude"] cps = self.magnitude2cps(magnitude) amp = cps / cps_norm - kwargs_new['amp'] = amp + kwargs_new["amp"] = amp kwargs_ps = copy.deepcopy(kwargs_ps_mag) if kwargs_ps_mag is not None: amp_list = [] for i, kwargs_mag in enumerate(kwargs_ps_mag): kwargs_new = kwargs_ps[i] - del kwargs_new['magnitude'] + del kwargs_new["magnitude"] cps_norm = 1 - magnitude = np.array(kwargs_mag['magnitude']) + magnitude = np.array(kwargs_mag["magnitude"]) cps = self.magnitude2cps(magnitude) amp = cps / cps_norm amp_list.append(amp) - kwargs_ps = self.point_source_model_class.set_amplitudes(amp_list, kwargs_ps) + kwargs_ps = self.point_source_model_class.set_amplitudes( + amp_list, kwargs_ps + ) return kwargs_lens_light, kwargs_source, kwargs_ps diff --git a/lenstronomy/Util/analysis_util.py b/lenstronomy/Util/analysis_util.py index 78c8acc12..db0068094 100644 --- a/lenstronomy/Util/analysis_util.py +++ b/lenstronomy/Util/analysis_util.py @@ -4,6 +4,7 @@ from lenstronomy.Util import param_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -19,12 +20,12 @@ def half_light_radius(lens_light, x_grid, y_grid, center_x=0, center_y=0): :return: """ lens_light[lens_light < 0] = 0 - total_flux_2 = np.sum(lens_light)/2. - r_max = np.max(np.sqrt((x_grid-center_x)**2 + (y_grid-center_y)**2)) + total_flux_2 = np.sum(lens_light) / 2.0 + r_max = np.max(np.sqrt((x_grid - center_x) ** 2 + (y_grid - center_y) ** 2)) for i in range(1000): - r = i/500. * r_max + r = i / 500.0 * r_max mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x, center_y, r) - flux_enclosed = np.sum(np.array(lens_light)*mask) + flux_enclosed = np.sum(np.array(lens_light) * mask) if flux_enclosed > total_flux_2: return r return -1 @@ -32,8 +33,7 @@ def half_light_radius(lens_light, x_grid, y_grid, center_x=0, center_y=0): @export def radial_profile(light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None): - """ - computes radial profile + """Computes radial profile. :param light_grid: array of surface brightness :param x_grid: x-axis coordinates @@ -43,15 +43,15 @@ def radial_profile(light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None): :param n: number of discrete steps :return: I(r), r with r in units of the coordinate grid """ - r_max = np.max(np.sqrt((x_grid-center_x)**2 + (y_grid-center_y)**2)) + r_max = np.max(np.sqrt((x_grid - center_x) ** 2 + (y_grid - center_y) ** 2)) if n is None: n = int(np.sqrt(len(x_grid))) I_r = np.zeros(n) I_enclosed = 0 - r = np.linspace(1./n*r_max, r_max, n) + r = np.linspace(1.0 / n * r_max, r_max, n) for i, r_i in enumerate(r): mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x, center_y, r_i) - flux_enclosed = np.sum(np.array(light_grid)*mask) + flux_enclosed = np.sum(np.array(light_grid) * mask) I_r[i] = flux_enclosed - I_enclosed I_enclosed = flux_enclosed return I_r, r @@ -59,9 +59,7 @@ def radial_profile(light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None): @export def azimuthalAverage(image, center=None): - """ - - Calculate the azimuthally averaged radial profile. + """Calculate the azimuthally averaged radial profile. image - The 2D image center - The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the @@ -72,7 +70,7 @@ def azimuthalAverage(image, center=None): y, x = np.indices(image.shape) if not center: - center = np.array([(x.max()-x.min())/2.0, (x.max()-x.min())/2.0]) + center = np.array([(x.max() - x.min()) / 2.0, (x.max() - x.min()) / 2.0]) r = np.hypot(x - center[0], y - center[1]) @@ -86,8 +84,8 @@ def azimuthalAverage(image, center=None): # Find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented - rind = np.where(deltar)[0] # location of changed radius - nr = rind[1:] - rind[:-1] # number of radius bin + rind = np.where(deltar)[0] # location of changed radius + nr = rind[1:] - rind[:-1] # number of radius bin # Cumulative sum to figure out sums for each radius bin csim = np.cumsum(i_sorted, dtype=float) @@ -99,8 +97,7 @@ def azimuthalAverage(image, center=None): @export def moments(I_xy_input, x, y): - """ - compute quadrupole moments from a light distribution + """Compute quadrupole moments from a light distribution. :param I_xy_input: light distribution :param x: x-coordinates of I_xy @@ -112,7 +109,7 @@ def moments(I_xy_input, x, y): I_xy -= background x_ = np.sum(I_xy * x) y_ = np.sum(I_xy * y) - r = (np.max(x) - np.min(x)) / 3. + r = (np.max(x) - np.min(x)) / 3.0 mask = mask_util.mask_azimuthal(x, y, center_x=x_, center_y=y_, r=r) Q_xx = np.sum(I_xy * mask * (x - x_) ** 2) Q_xy = np.sum(I_xy * mask * (x - x_) * (y - y_)) @@ -122,8 +119,7 @@ def moments(I_xy_input, x, y): @export def _ellipticities(I_xy, x, y): - """ - compute ellipticities of a light distribution + """Compute ellipticities of a light distribution. :param I_xy: surface brightness I(x, y) as array :param x: x-coordinates in same shape as I_xy @@ -131,13 +127,16 @@ def _ellipticities(I_xy, x, y): :return: reduced shear moments g1, g2 """ Q_xx, Q_xy, Q_yy, bkg = moments(I_xy, x, y) - norm = Q_xx + Q_yy + 2 * np.sqrt(Q_xx*Q_yy - Q_xy**2) + norm = Q_xx + Q_yy + 2 * np.sqrt(Q_xx * Q_yy - Q_xy**2) e1 = (Q_xx - Q_yy) / norm e2 = 2 * Q_xy / norm - return e1 / (1+bkg), e2 / (1+bkg) + return e1 / (1 + bkg), e2 / (1 + bkg) + @export -def ellipticities(I_xy, x_grid, y_grid, num_iterative=30, iterative=False, center_x=0, center_y=0): +def ellipticities( + I_xy, x_grid, y_grid, num_iterative=30, iterative=False, center_x=0, center_y=0 +): """ :param I_xy: surface brightness I(x, y) as array @@ -149,13 +148,17 @@ def ellipticities(I_xy, x_grid, y_grid, num_iterative=30, iterative=False, cente :type num_iterative: int :return: e1, e2 eccentricities """ - radius = (np.max(x_grid) - np.min(x_grid)) / 2. - mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x=center_x, center_y=center_y, r=radius) + radius = (np.max(x_grid) - np.min(x_grid)) / 2.0 + mask = mask_util.mask_azimuthal( + x_grid, y_grid, center_x=center_x, center_y=center_y, r=radius + ) e1, e2 = _ellipticities(I_xy * mask, x_grid - center_x, y_grid - center_y) phi, q = param_util.ellipticity2phi_q(e1, e2) if iterative: for i in range(num_iterative): - mask = mask_util.mask_eccentric(x_grid, y_grid, center_x, center_y, e1, e2, r=radius * q / np.sqrt(2)) + mask = mask_util.mask_eccentric( + x_grid, y_grid, center_x, center_y, e1, e2, r=radius * q / np.sqrt(2) + ) e1, e2 = _ellipticities(I_xy * mask, x_grid - center_x, y_grid - center_y) phi, q = param_util.ellipticity2phi_q(e1, e2) return e1, e2 @@ -163,8 +166,7 @@ def ellipticities(I_xy, x_grid, y_grid, num_iterative=30, iterative=False, cente @export def bic_model(logL, num_data, num_param): - """ - Bayesian information criteria + """Bayesian information criteria. :param logL: log likelihood value :param num_data: numbers of data @@ -177,8 +179,7 @@ def bic_model(logL, num_data, num_param): @export def profile_center(kwargs_list, center_x=None, center_y=None): - """ - utility routine that results in the centroid estimate for the profile estimates + """Utility routine that results in the centroid estimate for the profile estimates. :param kwargs_list: light parameter keyword argument list (can be light or mass) :param center_x: None or center @@ -186,10 +187,12 @@ def profile_center(kwargs_list, center_x=None, center_y=None): :return: center_x, center_y """ if center_x is None or center_y is None: - if 'center_x' in kwargs_list[0]: - center_x = kwargs_list[0]['center_x'] - center_y = kwargs_list[0]['center_y'] + if "center_x" in kwargs_list[0]: + center_x = kwargs_list[0]["center_x"] + center_y = kwargs_list[0]["center_y"] else: - raise ValueError('The center has to be provided as a function argument or the first profile in the list' - ' must come with a center.') + raise ValueError( + "The center has to be provided as a function argument or the first profile in the list" + " must come with a center." + ) return center_x, center_y diff --git a/lenstronomy/Util/class_creator.py b/lenstronomy/Util/class_creator.py index e0e7be6cd..1d2816c1d 100644 --- a/lenstronomy/Util/class_creator.py +++ b/lenstronomy/Util/class_creator.py @@ -7,23 +7,45 @@ from lenstronomy.ImSim.image_linear_solve import ImageLinearFit from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export -def create_class_instances(lens_model_list=None, z_lens=None, z_source=None, z_source_convention=None, - lens_redshift_list=None, kwargs_interp=None, - multi_plane=False, observed_convention_index=None, source_light_model_list=None, - lens_light_model_list=None, point_source_model_list=None, fixed_magnification_list=None, - flux_from_point_source_list=None, point_source_frame_list=None, - additional_images_list=None, kwargs_lens_eqn_solver=None, - source_deflection_scaling_list=None, source_redshift_list=None, cosmo=None, - index_lens_model_list=None, index_source_light_model_list=None, - index_lens_light_model_list=None, index_point_source_model_list=None, - optical_depth_model_list=None, index_optical_depth_model_list=None, - band_index=0, tau0_index_list=None, all_models=False, point_source_magnification_limit=None, - surface_brightness_smoothing=0.001, sersic_major_axis=None, - tabulated_deflection_angles=None): +def create_class_instances( + lens_model_list=None, + z_lens=None, + z_source=None, + z_source_convention=None, + lens_redshift_list=None, + kwargs_interp=None, + multi_plane=False, + observed_convention_index=None, + source_light_model_list=None, + lens_light_model_list=None, + point_source_model_list=None, + fixed_magnification_list=None, + flux_from_point_source_list=None, + point_source_frame_list=None, + additional_images_list=None, + kwargs_lens_eqn_solver=None, + source_deflection_scaling_list=None, + source_redshift_list=None, + cosmo=None, + index_lens_model_list=None, + index_source_light_model_list=None, + index_lens_light_model_list=None, + index_point_source_model_list=None, + optical_depth_model_list=None, + index_optical_depth_model_list=None, + band_index=0, + tau0_index_list=None, + all_models=False, + point_source_magnification_limit=None, + surface_brightness_smoothing=0.001, + sersic_major_axis=None, + tabulated_deflection_angles=None, +): """ :param lens_model_list: list of strings indicating the type of lens models @@ -87,9 +109,13 @@ def create_class_instances(lens_model_list=None, z_lens=None, z_source=None, z_s lens_redshift_list_i = lens_redshift_list observed_convention_index_i = observed_convention_index else: - lens_model_list_i = [lens_model_list[k] for k in index_lens_model_list[band_index]] + lens_model_list_i = [ + lens_model_list[k] for k in index_lens_model_list[band_index] + ] if lens_redshift_list is not None: - lens_redshift_list_i = [lens_redshift_list[k] for k in index_lens_model_list[band_index]] + lens_redshift_list_i = [ + lens_redshift_list[k] for k in index_lens_model_list[band_index] + ] else: lens_redshift_list_i = lens_redshift_list if observed_convention_index is not None: @@ -101,44 +127,74 @@ def create_class_instances(lens_model_list=None, z_lens=None, z_source=None, z_s counter += 1 else: observed_convention_index_i = observed_convention_index - lens_model_class = LensModel(lens_model_list=lens_model_list_i, z_lens=z_lens, z_source=z_source, - z_source_convention=z_source_convention, - lens_redshift_list=lens_redshift_list_i, - multi_plane=multi_plane, cosmo=cosmo, - observed_convention_index=observed_convention_index_i, kwargs_interp=kwargs_interp, - numerical_alpha_class=tabulated_deflection_angles) - - lens_model_class_all = LensModel(lens_model_list=lens_model_list, z_lens=z_lens, z_source=z_source, - z_source_convention=z_source_convention, lens_redshift_list=lens_redshift_list, - multi_plane=multi_plane, cosmo=cosmo, - observed_convention_index=observed_convention_index, kwargs_interp=kwargs_interp, - numerical_alpha_class=tabulated_deflection_angles) + lens_model_class = LensModel( + lens_model_list=lens_model_list_i, + z_lens=z_lens, + z_source=z_source, + z_source_convention=z_source_convention, + lens_redshift_list=lens_redshift_list_i, + multi_plane=multi_plane, + cosmo=cosmo, + observed_convention_index=observed_convention_index_i, + kwargs_interp=kwargs_interp, + numerical_alpha_class=tabulated_deflection_angles, + ) + + lens_model_class_all = LensModel( + lens_model_list=lens_model_list, + z_lens=z_lens, + z_source=z_source, + z_source_convention=z_source_convention, + lens_redshift_list=lens_redshift_list, + multi_plane=multi_plane, + cosmo=cosmo, + observed_convention_index=observed_convention_index, + kwargs_interp=kwargs_interp, + numerical_alpha_class=tabulated_deflection_angles, + ) if index_source_light_model_list is None or all_models is True: source_light_model_list_i = source_light_model_list source_deflection_scaling_list_i = source_deflection_scaling_list source_redshift_list_i = source_redshift_list else: - source_light_model_list_i = [source_light_model_list[k] for k in index_source_light_model_list[band_index]] + source_light_model_list_i = [ + source_light_model_list[k] + for k in index_source_light_model_list[band_index] + ] if source_deflection_scaling_list is None: source_deflection_scaling_list_i = source_deflection_scaling_list else: - source_deflection_scaling_list_i = [source_deflection_scaling_list[k] for k in index_source_light_model_list[band_index]] + source_deflection_scaling_list_i = [ + source_deflection_scaling_list[k] + for k in index_source_light_model_list[band_index] + ] if source_redshift_list is None: source_redshift_list_i = source_redshift_list else: - source_redshift_list_i = [source_redshift_list[k] for k in index_source_light_model_list[band_index]] - source_model_class = LightModel(light_model_list=source_light_model_list_i, - deflection_scaling_list=source_deflection_scaling_list_i, - source_redshift_list=source_redshift_list_i, smoothing=surface_brightness_smoothing, - sersic_major_axis=sersic_major_axis) + source_redshift_list_i = [ + source_redshift_list[k] + for k in index_source_light_model_list[band_index] + ] + source_model_class = LightModel( + light_model_list=source_light_model_list_i, + deflection_scaling_list=source_deflection_scaling_list_i, + source_redshift_list=source_redshift_list_i, + smoothing=surface_brightness_smoothing, + sersic_major_axis=sersic_major_axis, + ) if index_lens_light_model_list is None or all_models is True: lens_light_model_list_i = lens_light_model_list else: - lens_light_model_list_i = [lens_light_model_list[k] for k in index_lens_light_model_list[band_index]] - lens_light_model_class = LightModel(light_model_list=lens_light_model_list_i, - smoothing=surface_brightness_smoothing, sersic_major_axis=sersic_major_axis) + lens_light_model_list_i = [ + lens_light_model_list[k] for k in index_lens_light_model_list[band_index] + ] + lens_light_model_class = LightModel( + light_model_list=lens_light_model_list_i, + smoothing=surface_brightness_smoothing, + sersic_major_axis=sersic_major_axis, + ) point_source_model_list_i = point_source_model_list fixed_magnification_list_i = fixed_magnification_list @@ -146,35 +202,63 @@ def create_class_instances(lens_model_list=None, z_lens=None, z_source=None, z_s point_source_frame_list_i = point_source_frame_list if index_point_source_model_list is not None and not all_models: - point_source_model_list_i = [point_source_model_list[k] for k in index_point_source_model_list[band_index]] + point_source_model_list_i = [ + point_source_model_list[k] + for k in index_point_source_model_list[band_index] + ] if fixed_magnification_list is not None: - fixed_magnification_list_i = [fixed_magnification_list[k] for k in index_point_source_model_list[band_index]] + fixed_magnification_list_i = [ + fixed_magnification_list[k] + for k in index_point_source_model_list[band_index] + ] if additional_images_list is not None: - additional_images_list_i = [additional_images_list[k] for k in index_point_source_model_list[band_index]] + additional_images_list_i = [ + additional_images_list[k] + for k in index_point_source_model_list[band_index] + ] if point_source_frame_list is not None: - point_source_frame_list_i = [point_source_frame_list[k] for k in index_point_source_model_list[band_index]] - point_source_class = PointSource(point_source_type_list=point_source_model_list_i, lensModel=lens_model_class_all, - fixed_magnification_list=fixed_magnification_list_i, - flux_from_point_source_list=flux_from_point_source_list, - additional_images_list=additional_images_list_i, - magnification_limit=point_source_magnification_limit, - kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, - point_source_frame_list=point_source_frame_list_i, - index_lens_model_list=index_lens_model_list) + point_source_frame_list_i = [ + point_source_frame_list[k] + for k in index_point_source_model_list[band_index] + ] + point_source_class = PointSource( + point_source_type_list=point_source_model_list_i, + lensModel=lens_model_class_all, + fixed_magnification_list=fixed_magnification_list_i, + flux_from_point_source_list=flux_from_point_source_list, + additional_images_list=additional_images_list_i, + magnification_limit=point_source_magnification_limit, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + point_source_frame_list=point_source_frame_list_i, + index_lens_model_list=index_lens_model_list, + ) if tau0_index_list is None: tau0_index = 0 else: tau0_index = tau0_index_list[band_index] if index_optical_depth_model_list is not None: - optical_depth_model_list_i = [optical_depth_model_list[k] for k in index_optical_depth_model_list[band_index]] + optical_depth_model_list_i = [ + optical_depth_model_list[k] + for k in index_optical_depth_model_list[band_index] + ] else: optical_depth_model_list_i = optical_depth_model_list - extinction_class = DifferentialExtinction(optical_depth_model=optical_depth_model_list_i, tau0_index=tau0_index) - return lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class + extinction_class = DifferentialExtinction( + optical_depth_model=optical_depth_model_list_i, tau0_index=tau0_index + ) + return ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) @export -def create_image_model(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, image_likelihood_mask=None): +def create_image_model( + kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, image_likelihood_mask=None +): """ :param kwargs_data: ImageData keyword arguments @@ -187,15 +271,38 @@ def create_image_model(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, i """ data_class = ImageData(**kwargs_data) psf_class = PSF(**kwargs_psf) - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = create_class_instances(**kwargs_model) - imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, - point_source_class, extinction_class, kwargs_numerics, likelihood_mask=image_likelihood_mask) + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = create_class_instances(**kwargs_model) + imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + kwargs_numerics, + likelihood_mask=image_likelihood_mask, + ) return imageModel @export -def create_im_sim(multi_band_list, multi_band_type, kwargs_model, bands_compute=None, image_likelihood_mask_list=None, - band_index=0, kwargs_pixelbased=None, linear_solver=True): +def create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + kwargs_pixelbased=None, + linear_solver=True, +): """ @@ -214,23 +321,47 @@ def create_im_sim(multi_band_list, multi_band_type, kwargs_model, bands_compute= that they get overwritten by the linear solver solution. :return: MultiBand class instance """ - if linear_solver is False and multi_band_type not in ['single-band', 'multi-linear']: - raise ValueError('setting "linear_solver" to False is only supported in "single-band" mode ' - 'or if "multi-linear" model has only one band.') + if linear_solver is False and multi_band_type not in [ + "single-band", + "multi-linear", + ]: + raise ValueError( + 'setting "linear_solver" to False is only supported in "single-band" mode ' + 'or if "multi-linear" model has only one band.' + ) - if multi_band_type == 'multi-linear': + if multi_band_type == "multi-linear": from lenstronomy.ImSim.MultiBand.multi_linear import MultiLinear - multiband = MultiLinear(multi_band_list, kwargs_model, compute_bool=bands_compute, - likelihood_mask_list=image_likelihood_mask_list, linear_solver=linear_solver) - elif multi_band_type == 'joint-linear': + + multiband = MultiLinear( + multi_band_list, + kwargs_model, + compute_bool=bands_compute, + likelihood_mask_list=image_likelihood_mask_list, + linear_solver=linear_solver, + ) + elif multi_band_type == "joint-linear": from lenstronomy.ImSim.MultiBand.joint_linear import JointLinear - multiband = JointLinear(multi_band_list, kwargs_model, compute_bool=bands_compute, - likelihood_mask_list=image_likelihood_mask_list) - elif multi_band_type == 'single-band': - from lenstronomy.ImSim.MultiBand.single_band_multi_model import SingleBandMultiModel - multiband = SingleBandMultiModel(multi_band_list, kwargs_model, likelihood_mask_list=image_likelihood_mask_list, - band_index=band_index, kwargs_pixelbased=kwargs_pixelbased, - linear_solver=linear_solver) + + multiband = JointLinear( + multi_band_list, + kwargs_model, + compute_bool=bands_compute, + likelihood_mask_list=image_likelihood_mask_list, + ) + elif multi_band_type == "single-band": + from lenstronomy.ImSim.MultiBand.single_band_multi_model import ( + SingleBandMultiModel, + ) + + multiband = SingleBandMultiModel( + multi_band_list, + kwargs_model, + likelihood_mask_list=image_likelihood_mask_list, + band_index=band_index, + kwargs_pixelbased=kwargs_pixelbased, + linear_solver=linear_solver, + ) else: raise ValueError("type %s is not supported!" % multi_band_type) return multiband diff --git a/lenstronomy/Util/constants.py b/lenstronomy/Util/constants.py index 4b9927b3d..fd46896e9 100644 --- a/lenstronomy/Util/constants.py +++ b/lenstronomy/Util/constants.py @@ -1,15 +1,15 @@ -__author__ = 'sibirrer' - - +__author__ = "sibirrer" """ this class contains physical constants and conversion factors between units """ import numpy as np -__all__ = ('G c M_sun M_earth M_jupiter AU Mpc kpc pc day_s arcsec ' - 'a_ES F_ES delay_arcsec2days'.split()) +__all__ = ( + "G c M_sun M_earth M_jupiter AU Mpc kpc pc day_s arcsec " + "a_ES F_ES delay_arcsec2days".split() +) -G = 6.67384*10**(-11) # Gravitational constant [m^3 kg^-1 s^-2] +G = 6.67384 * 10 ** (-11) # Gravitational constant [m^3 kg^-1 s^-2] c = 299792458 # [m/s] M_sun = 1.9891 * 10**30 # solar mass in [kg] @@ -32,11 +32,11 @@ def delay_arcsec2days(delay_arcsec, ddt): - """ - given a delay in arcsec^2 and a Delay distance, the delay is computed in days + """Given a delay in arcsec^2 and a Delay distance, the delay is computed in days. - :param delay_arcsec: gravitational delay in units of arcsec^2 (e.g. Fermat potential) + :param delay_arcsec: gravitational delay in units of arcsec^2 (e.g. Fermat + potential) :param ddt: Time delay distance (in units of Mpc) :return: time-delay in units of days """ - return ddt * Mpc / c * delay_arcsec / day_s * arcsec ** 2 + return ddt * Mpc / c * delay_arcsec / day_s * arcsec**2 diff --git a/lenstronomy/Util/coolest_interface.py b/lenstronomy/Util/coolest_interface.py index 50ba74ba6..d4cfdc8f7 100644 --- a/lenstronomy/Util/coolest_interface.py +++ b/lenstronomy/Util/coolest_interface.py @@ -1,4 +1,6 @@ -from coolest.template.json import JSONSerializer # install from https://github.com/aymgal/COOLEST +from coolest.template.json import ( + JSONSerializer, +) # install from https://github.com/aymgal/COOLEST from astropy.cosmology import FlatLambdaCDM from astropy.io import fits import lenstronomy.Util.coolest_read_util as read @@ -9,13 +11,12 @@ def create_lenstronomy_from_coolest(file_name): - """ - Creates lenstronomy typical kwargs from a COOLEST (JSON) file - - :param file_name: str, name (with path) of the .json file containing the COOLEST information - - :return: return_dict, dictionary with typical lenstronomy kwarg (as kwargs_data, kwargs_psf, kwargs_params, kwargs_results, kwargs_model etc) + """Creates lenstronomy typical kwargs from a COOLEST (JSON) file. + :param file_name: str, name (with path) of the .json file containing the COOLEST + information + :return: return_dict, dictionary with typical lenstronomy kwarg (as kwargs_data, + kwargs_psf, kwargs_params, kwargs_results, kwargs_model etc) """ creation_lens_source_light = False creation_cosmo = False @@ -27,7 +28,7 @@ def create_lenstronomy_from_coolest(file_name): decoder = JSONSerializer(file_name, indent=2) lens_coolest = decoder.load() - print(f'LENS COOLEST : {lens_coolest.mode}') + print(f"LENS COOLEST : {lens_coolest.mode}") # IMAGE @@ -43,21 +44,31 @@ def create_lenstronomy_from_coolest(file_name): try: image = fits.open(image_path)[0].data if (np.shape(image)[0] != nx) or (np.shape(image)[1] != ny): - print(f'image shape {np.shape(image)} is different from the coolest file {nx}, {ny}') + print( + f"image shape {np.shape(image)} is different from the coolest file {nx}, {ny}" + ) except: image = image_path - print(f'could not find image file {image_path}. Saving file name instead.') - ra_at_xy_0 = - ( list(lens_observation.pixels.field_of_view_x)[0] + pixel_size / 2. ) - dec_at_xy_0 = list(lens_observation.pixels.field_of_view_y)[0] + pixel_size / 2. + print( + f"could not find image file {image_path}. Saving file name instead." + ) + ra_at_xy_0 = -( + list(lens_observation.pixels.field_of_view_x)[0] + pixel_size / 2.0 + ) + dec_at_xy_0 = ( + list(lens_observation.pixels.field_of_view_y)[0] + pixel_size / 2.0 + ) transform_pix2angle = np.array([[-1, 0], [0, 1]]) * pixel_size - kwargs_data = {'ra_at_xy_0': ra_at_xy_0, - 'dec_at_xy_0': dec_at_xy_0, - 'transform_pix2angle': transform_pix2angle, - 'image_data': image} - print('Data creation') + kwargs_data = { + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + "transform_pix2angle": transform_pix2angle, + "image_data": image, + } + print("Data creation") - # NOISE + # NOISE if lens_observation.noise is not None: if lens_observation.noise.type == "NoiseMap": creation_data = True @@ -66,18 +77,22 @@ def create_lenstronomy_from_coolest(file_name): noise = fits.open(noise_path)[0].data except: noise = noise_path - print(f'could not find noise file {noise_path}. Saving file name instead.') + print( + f"could not find noise file {noise_path}. Saving file name instead." + ) noise_pixel_size = lens_observation.noise.noise_map.pixel_size noise_nx = lens_observation.noise.noise_map.num_pix_x noise_ny = lens_observation.noise.noise_map.num_pix_y if pixel_size != noise_pixel_size: - print(f"noise pixel size {noise_pixel_size} is different from image pixel size {pixel_size}") + print( + f"noise pixel size {noise_pixel_size} is different from image pixel size {pixel_size}" + ) if nx != noise_nx: print(f"noise nx {noise_nx} is different from image nx {nx}") if ny != noise_ny: print(f"noise ny {noise_ny} is different from image ny {ny}") - kwargs_data['noise_map'] = noise - print('Noise (in Data) creation') + kwargs_data["noise_map"] = noise + print("Noise (in Data) creation") else: print(f"noise type {lens_observation.noise.type} is unknown") @@ -92,19 +107,26 @@ def create_lenstronomy_from_coolest(file_name): psf = fits.open(psf_path)[0].data except: psf = psf_path - print(f'could not find PSF file {psf_path}. Saving file name instead.') + print( + f"could not find PSF file {psf_path}. Saving file name instead." + ) psf_pixel_size = lens_instrument.psf.pixels.pixel_size psf_nx = lens_instrument.psf.pixels.num_pix_x psf_ny = lens_instrument.psf.pixels.num_pix_y super_sampling_factor = 1 if pixel_size != psf_pixel_size: super_sampling_factor = int(pixel_size / psf_pixel_size) - print(f"PSF pixel size {psf_pixel_size} is different from image pixel size {pixel_size}. " - f"Assuming super sampling factor of {super_sampling_factor}.") - - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf, - 'point_source_supersampling_factor': super_sampling_factor} - print('PSF creation') + print( + f"PSF pixel size {psf_pixel_size} is different from image pixel size {pixel_size}. " + f"Assuming super sampling factor of {super_sampling_factor}." + ) + + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": psf, + "point_source_supersampling_factor": super_sampling_factor, + } + print("PSF creation") else: print(f"PSF type {lens_instrument.psf.type} is unknown") @@ -118,7 +140,7 @@ def create_lenstronomy_from_coolest(file_name): else: print(f"Cosmology name {lens_cosmo.astropy_name} is unknown") -#LIKELIHOODS not yet well supported by COOLEST + # LIKELIHOODS not yet well supported by COOLEST # # LIKELIHOODS # if "likelihoods" not in exclude_keys: # likelihoods = lens_coolest.likelihoods @@ -169,7 +191,9 @@ def create_lenstronomy_from_coolest(file_name): multi_plane = False creation_redshift_list = True - min_redshift, max_redshift, redshift_list = create_redshift_info(lensing_entities_list) + min_redshift, max_redshift, redshift_list = create_redshift_info( + lensing_entities_list + ) for lensing_entity in lensing_entities_list: if lensing_entity.type == "galaxy": @@ -178,114 +202,193 @@ def create_lenstronomy_from_coolest(file_name): # SOURCE OF LIGHT light_list = galaxy.light_model for light in light_list: - print('Source Light : ') - if light.type == 'Sersic': - read.update_kwargs_sersic(light, source_model_list, kwargs_source, - kwargs_source_init, kwargs_source_up, - kwargs_source_down, kwargs_source_fixed, - kwargs_source_sigma, cleaning=True) - elif light.type == 'Shapelets': - read.update_kwargs_shapelets(light, source_model_list, kwargs_source, - kwargs_source_init, kwargs_source_up, - kwargs_source_down, kwargs_source_fixed, - kwargs_source_sigma, cleaning=True) - elif light.type == 'LensedPS': - read.update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, - kwargs_ps_init, kwargs_ps_up, - kwargs_ps_down, kwargs_ps_fixed, - kwargs_ps_sigma, cleaning=True) + print("Source Light : ") + if light.type == "Sersic": + read.update_kwargs_sersic( + light, + source_model_list, + kwargs_source, + kwargs_source_init, + kwargs_source_up, + kwargs_source_down, + kwargs_source_fixed, + kwargs_source_sigma, + cleaning=True, + ) + elif light.type == "Shapelets": + read.update_kwargs_shapelets( + light, + source_model_list, + kwargs_source, + kwargs_source_init, + kwargs_source_up, + kwargs_source_down, + kwargs_source_fixed, + kwargs_source_sigma, + cleaning=True, + ) + elif light.type == "LensedPS": + read.update_kwargs_lensed_ps( + light, + ps_model_list, + kwargs_ps, + kwargs_ps_init, + kwargs_ps_up, + kwargs_ps_down, + kwargs_ps_fixed, + kwargs_ps_sigma, + cleaning=True, + ) else: - print(f'Light Type {light.type} not yet implemented.') + print(f"Light Type {light.type} not yet implemented.") if galaxy.redshift < max_redshift: # LENSING GALAXY if galaxy.redshift > min_redshift: multi_plane = True - print('Multiplane lensing to consider.') + print("Multiplane lensing to consider.") mass_list = galaxy.mass_model for mass in mass_list: - print('Lens Mass : ') - if mass.type == 'PEMD': - read.update_kwargs_pemd(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, - kwargs_lens_down, kwargs_lens_fixed, kwargs_lens_sigma, cleaning=True) - elif mass.type == 'SIE': - read.update_kwargs_sie(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, - kwargs_lens_down, kwargs_lens_fixed, kwargs_lens_sigma, cleaning=True) + print("Lens Mass : ") + if mass.type == "PEMD": + read.update_kwargs_pemd( + mass, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=True, + ) + elif mass.type == "SIE": + read.update_kwargs_sie( + mass, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=True, + ) else: - print(f'Mass Type {mass.type} not yet implemented.') + print(f"Mass Type {mass.type} not yet implemented.") if galaxy.redshift == min_redshift: # LENSING LIGHT GALAXY light_list = galaxy.light_model for light in light_list: - print('Lens Light : ') - if light.type == 'Sersic': - read.update_kwargs_sersic(light, lens_light_model_list, kwargs_lens_light, kwargs_lens_light_init, - kwargs_lens_light_up, kwargs_lens_light_down, kwargs_lens_light_fixed, - kwargs_lens_light_sigma, cleaning=True) + print("Lens Light : ") + if light.type == "Sersic": + read.update_kwargs_sersic( + light, + lens_light_model_list, + kwargs_lens_light, + kwargs_lens_light_init, + kwargs_lens_light_up, + kwargs_lens_light_down, + kwargs_lens_light_fixed, + kwargs_lens_light_sigma, + cleaning=True, + ) # elif light.type == 'LensedPS': # read.update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, kwargs_ps_init, kwargs_ps_up, # kwargs_ps_down, kwargs_ps_fixed, kwargs_ps_sigma, cleaning=True) else: - print(f'Light Type {light.type} not yet implemented.') + print(f"Light Type {light.type} not yet implemented.") # if (galaxy.redshift <= min_redshift) or (galaxy.redshift >= max_redshift): # print(f'REDSHIFT {galaxy.redshift} is not in the range ] {min_red} , {max_red} [') - elif lensing_entity.type == "MassField": mass_field_list = lensing_entity.mass_model for mass_field_idx in mass_field_list: - print('Shear : ') - if mass_field_idx.type == 'ExternalShear': - read.update_kwargs_shear(mass_field_idx, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, - kwargs_lens_down, kwargs_lens_fixed, kwargs_lens_sigma, cleaning=True) + print("Shear : ") + if mass_field_idx.type == "ExternalShear": + read.update_kwargs_shear( + mass_field_idx, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=True, + ) else: print(f"type of Shear {mass_field_idx.type} not implemented") - - - else: print(f"lensing entity of type {lensing_entity.type} is unknown.") return_dict = {} if creation_lens_source_light is True: - return_dict['kwargs_model'] = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': ps_model_list} - - lens_params = [kwargs_lens_init, kwargs_lens_sigma, - kwargs_lens_fixed, kwargs_lens_down, kwargs_lens_up] - source_params = [kwargs_source_init, kwargs_source_sigma, - kwargs_source_fixed, kwargs_source_down, kwargs_source_up] - lens_light_params = [kwargs_lens_light_init, kwargs_lens_light_sigma, - kwargs_lens_light_fixed, kwargs_lens_light_down, kwargs_lens_light_up] - ps_params = [kwargs_ps_init, kwargs_ps_sigma, kwargs_ps_fixed, kwargs_ps_down, kwargs_ps_up] - - kwargs_params = {'lens_model': lens_params, - 'source_model': source_params, - 'lens_light_model': lens_light_params, - 'point_source_model': ps_params} - return_dict['kwargs_params'] = kwargs_params - - kwargs_result = {'kwargs_lens': kwargs_lens, - 'kwargs_source': kwargs_source, - 'kwargs_lens_light': kwargs_lens_light, - 'kwargs_ps': kwargs_ps} - return_dict['kwargs_result'] = kwargs_result + return_dict["kwargs_model"] = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": ps_model_list, + } + + lens_params = [ + kwargs_lens_init, + kwargs_lens_sigma, + kwargs_lens_fixed, + kwargs_lens_down, + kwargs_lens_up, + ] + source_params = [ + kwargs_source_init, + kwargs_source_sigma, + kwargs_source_fixed, + kwargs_source_down, + kwargs_source_up, + ] + lens_light_params = [ + kwargs_lens_light_init, + kwargs_lens_light_sigma, + kwargs_lens_light_fixed, + kwargs_lens_light_down, + kwargs_lens_light_up, + ] + ps_params = [ + kwargs_ps_init, + kwargs_ps_sigma, + kwargs_ps_fixed, + kwargs_ps_down, + kwargs_ps_up, + ] + + kwargs_params = { + "lens_model": lens_params, + "source_model": source_params, + "lens_light_model": lens_light_params, + "point_source_model": ps_params, + } + return_dict["kwargs_params"] = kwargs_params + + kwargs_result = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + "kwargs_ps": kwargs_ps, + } + return_dict["kwargs_result"] = kwargs_result if creation_redshift_list is True: - return_dict['redshift_list'] = redshift_list + return_dict["redshift_list"] = redshift_list # if creation_kwargs_likelihood is True: # return_dict['kwargs_likelihood'] = kwargs_likelihood if creation_cosmo is True: - return_dict['Cosmo'] = cosmo + return_dict["Cosmo"] = cosmo if creation_data is True: - return_dict['kwargs_data'] = kwargs_data + return_dict["kwargs_data"] = kwargs_data if creation_instrument is True: - return_dict['kwargs_psf'] = kwargs_psf + return_dict["kwargs_psf"] = kwargs_psf # time delay not implemented @@ -298,10 +401,11 @@ def create_lenstronomy_from_coolest(file_name): return return_dict -def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, - ending='_update'): - """ - Function to update a json file already containing a model with the results of this model fitting +def update_coolest_from_lenstronomy( + file_name, kwargs_result, kwargs_mcmc=None, ending="_update" +): + """Function to update a json file already containing a model with the results of + this model fitting. :param file_name: str, name (with path) of the json file to update :param kwargs_results: dict, lenstronomy kwargs_results {'kwargs_lens': [{..},{..}], 'kwargs_source': [{..}],...} @@ -321,17 +425,24 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, decoder = JSONSerializer(file_name, indent=2) lens_coolest = decoder.load() - available_profiles = ['LensedPS', 'Sersic', 'Shapelets', 'PEMD', 'SIE', 'SIS', 'ExternalShear'] - if lens_coolest.mode == 'MAP': - print(f'LENS COOLEST : {lens_coolest.mode}') + available_profiles = [ + "LensedPS", + "Sersic", + "Shapelets", + "PEMD", + "SIE", + "SIS", + "ExternalShear", + ] + if lens_coolest.mode == "MAP": + print(f"LENS COOLEST : {lens_coolest.mode}") else: - print(f'LENS COOLEST IS NOT MAP, BUT IS {lens_coolest.mode}. CHANGING INTO MAP') - lens_coolest.mode ='MAP' + print(f"LENS COOLEST IS NOT MAP, BUT IS {lens_coolest.mode}. CHANGING INTO MAP") + lens_coolest.mode = "MAP" lensing_entities_list = lens_coolest.lensing_entities if lensing_entities_list is not None: - lens_model_list = [] kwargs_lens = [] kwargs_lens_up = [] @@ -369,7 +480,9 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, multi_plane = False creation_redshift_list = True - min_redshift, max_redshift, redshift_list = create_redshift_info(lensing_entities_list) + min_redshift, max_redshift, redshift_list = create_redshift_info( + lensing_entities_list + ) for lensing_entity in lensing_entities_list: if lensing_entity.type == "galaxy": @@ -379,31 +492,43 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, # SOURCE OF LIGHT light_list = galaxy.light_model for light in light_list: - # ASSUME same list of models as in the json !! - if light.type == 'LensedPS': - kwargs_ps = kwargs_result['kwargs_ps'][idx_ps] + if light.type == "LensedPS": + kwargs_ps = kwargs_result["kwargs_ps"][idx_ps] kwargs_ps_mcmc = None - elif light.type in ['Sersic', 'Shapelets']: - kwargs_source = kwargs_result['kwargs_source'][idx_source] + elif light.type in ["Sersic", "Shapelets"]: + kwargs_source = kwargs_result["kwargs_source"][idx_source] kwargs_source_mcmc = None else: - print(f'Light Type {light.type} not yet implemented.') - - if (kwargs_mcmc is not None) & (light.type in available_profiles): - if light.type == 'LensedPS': - kwargs_ps_mcmc = [arg[idx_ps] for arg in kwargs_mcmc['args_ps']] - elif light.type in ['Sersic', 'Shapelets']: - kwargs_source_mcmc = [arg[idx_source] for arg in kwargs_mcmc['args_source']] - - if light.type == 'Sersic': - update.sersic_update(light, kwargs_source, kwargs_source_mcmc) + print(f"Light Type {light.type} not yet implemented.") + + if (kwargs_mcmc is not None) & ( + light.type in available_profiles + ): + if light.type == "LensedPS": + kwargs_ps_mcmc = [ + arg[idx_ps] for arg in kwargs_mcmc["args_ps"] + ] + elif light.type in ["Sersic", "Shapelets"]: + kwargs_source_mcmc = [ + arg[idx_source] + for arg in kwargs_mcmc["args_source"] + ] + + if light.type == "Sersic": + update.sersic_update( + light, kwargs_source, kwargs_source_mcmc + ) idx_source += 1 - elif light.type == 'Shapelets': - update.shapelets_update(light, kwargs_source, kwargs_source_mcmc) + elif light.type == "Shapelets": + update.shapelets_update( + light, kwargs_source, kwargs_source_mcmc + ) idx_source += 1 - elif light.type == 'LensedPS': - update.lensed_point_source_update(light, kwargs_ps, kwargs_ps_mcmc) + elif light.type == "LensedPS": + update.lensed_point_source_update( + light, kwargs_ps, kwargs_ps_mcmc + ) idx_ps += 1 else: pass @@ -412,44 +537,53 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, # LENSING GALAXY if galaxy.redshift > min_redshift: multi_plane = True - print('Multiplane lensing to consider.') + print("Multiplane lensing to consider.") mass_list = galaxy.mass_model for mass in mass_list: - kwargs_lens_mcmc = None - if (kwargs_mcmc is not None) & (mass.type in available_profiles): - kwargs_lens_mcmc = [arg[idx_lens] for arg in kwargs_mcmc['args_lens']] - - if mass.type == 'PEMD': - kwargs_lens = kwargs_result['kwargs_lens'][idx_lens] + if (kwargs_mcmc is not None) & ( + mass.type in available_profiles + ): + kwargs_lens_mcmc = [ + arg[idx_lens] for arg in kwargs_mcmc["args_lens"] + ] + + if mass.type == "PEMD": + kwargs_lens = kwargs_result["kwargs_lens"][idx_lens] update.pemd_update(mass, kwargs_lens, kwargs_lens_mcmc) idx_lens += 1 - elif mass.type == 'SIE': - kwargs_lens = kwargs_result['kwargs_lens'][idx_lens] + elif mass.type == "SIE": + kwargs_lens = kwargs_result["kwargs_lens"][idx_lens] update.sie_update(mass, kwargs_lens, kwargs_lens_mcmc) idx_lens += 1 else: - print(f'Mass Type {mass.type} not yet implemented.') - - + print(f"Mass Type {mass.type} not yet implemented.") if galaxy.redshift == min_redshift: # LENSING LIGHT GALAXY light_list = galaxy.light_model for light in light_list: - if light.type in ['Sersic']: - kwargs_lens_light = kwargs_result['kwargs_lens_light'][idx_lens_light] + if light.type in ["Sersic"]: + kwargs_lens_light = kwargs_result["kwargs_lens_light"][ + idx_lens_light + ] kwargs_lens_light_mcmc = None else: - print(f'Light Type {light.type} not yet implemented.') - - if (kwargs_mcmc is not None) & (light.type in available_profiles): - if light.type in ['Sersic']: - kwargs_lens_light_mcmc = [arg[idx_lens_light] for arg in - kwargs_mcmc['args_lens_light']] - - if light.type == 'Sersic': - update.sersic_update(light, kwargs_lens_light, kwargs_lens_light_mcmc) + print(f"Light Type {light.type} not yet implemented.") + + if (kwargs_mcmc is not None) & ( + light.type in available_profiles + ): + if light.type in ["Sersic"]: + kwargs_lens_light_mcmc = [ + arg[idx_lens_light] + for arg in kwargs_mcmc["args_lens_light"] + ] + + if light.type == "Sersic": + update.sersic_update( + light, kwargs_lens_light, kwargs_lens_light_mcmc + ) idx_lens_light += 1 else: pass @@ -460,14 +594,19 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, elif lensing_entity.type == "MassField": mass_field_list = lensing_entity.mass_model for mass_field_idx in mass_field_list: - - kwargs_lens = kwargs_result['kwargs_lens'][idx_lens] + kwargs_lens = kwargs_result["kwargs_lens"][idx_lens] kwargs_lens_mcmc = None - if (kwargs_mcmc is not None) & (mass_field_idx.type in available_profiles): - kwargs_lens_mcmc = [arg[idx_lens] for arg in kwargs_mcmc['args_lens']] - - if mass_field_idx.type == 'ExternalShear': - update.shear_update(mass_field_idx, kwargs_lens, kwargs_lens_mcmc) + if (kwargs_mcmc is not None) & ( + mass_field_idx.type in available_profiles + ): + kwargs_lens_mcmc = [ + arg[idx_lens] for arg in kwargs_mcmc["args_lens"] + ] + + if mass_field_idx.type == "ExternalShear": + update.shear_update( + mass_field_idx, kwargs_lens, kwargs_lens_mcmc + ) idx_lens += 1 else: print(f"type of Shear {mass_field_idx.type} not implemented") @@ -475,17 +614,26 @@ def update_coolest_from_lenstronomy(file_name, kwargs_result, kwargs_mcmc=None, else: print(f"Lensing entity of type {lensing_entity.type} is unknown.") - encoder = JSONSerializer(file_name + ending, - obj=lens_coolest, indent=2) + encoder = JSONSerializer(file_name + ending, obj=lens_coolest, indent=2) lens_coolest_encoded = encoder.dump_jsonpickle() return -def create_kwargs_mcmc_from_chain_list(chain_list, kwargs_model, kwargs_params, kwargs_data, kwargs_psf, - kwargs_numerics, kwargs_constraints, image_likelihood_mask=None, idx_chain=-1, - likelihood_threshold=None): - """ - function to construct kwargs_mcmc in the right format for the "update_coolest_from_lenstronomy" function + +def create_kwargs_mcmc_from_chain_list( + chain_list, + kwargs_model, + kwargs_params, + kwargs_data, + kwargs_psf, + kwargs_numerics, + kwargs_constraints, + image_likelihood_mask=None, + idx_chain=-1, + likelihood_threshold=None, +): + """Function to construct kwargs_mcmc in the right format for the + "update_coolest_from_lenstronomy" function. :param chain_list: list, output of FittingSequence.fitting_sequence() :param kwargs_model: the usual lenstronomy kwargs @@ -495,60 +643,99 @@ def create_kwargs_mcmc_from_chain_list(chain_list, kwargs_model, kwargs_params, :param kwargs_numerics: the usual lenstronomy kwargs :param kwargs_constraints: the usual lenstronomy kwargs :param image_likelihood_mask: the usual lenstronomy kwargs - :param idx_chain: int, index of the MCMC chain in the chain_list, default is the last one. - Can be useful if several PSO and MCMC are perfomed in the fitting sequence. - :param likelihood_threshold: float, likelihood limit (negative) underwhich the MCMC point is not considered. - Can be useful if a few chains are stucked in another (less good) minimum - - :return: kwargs_mcmc, list containing all the relevant MCMC points in a userfriendly format - (with linear parameters etc) - + :param idx_chain: int, index of the MCMC chain in the chain_list, default is the + last one. Can be useful if several PSO and MCMC are perfomed in the fitting + sequence. + :param likelihood_threshold: float, likelihood limit (negative) underwhich the MCMC + point is not considered. Can be useful if a few chains are stucked in another + (less good) minimum + :return: kwargs_mcmc, list containing all the relevant MCMC points in a userfriendly + format (with linear parameters etc) """ par_buf = chain_list[idx_chain][1] dist_buf = chain_list[idx_chain][3] - kwargs_lens_init, kwargs_lens_sigma, kwargs_fixed_lens, kwargs_lower_lens, kwargs_upper_lens = kwargs_params[ - 'lens_model'] - kwargs_source_init, kwargs_source_sigma, kwargs_fixed_source, kwargs_lower_source, kwargs_upper_source = \ - kwargs_params['source_model'] - kwargs_lens_light_init, kwargs_lens_light_sigma, kwargs_fixed_lens_light, kwargs_lower_lens_light, kwargs_upper_lens_light = \ - kwargs_params['lens_light_model'] - kwargs_ps_init, kwargs_ps_sigma, kwargs_fixed_ps, kwargs_lower_ps, kwargs_upper_ps = kwargs_params[ - 'point_source_model'] - - param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, - kwargs_fixed_source=kwargs_fixed_source, kwargs_fixed_lens_light=kwargs_fixed_lens_light, - kwargs_fixed_ps=kwargs_fixed_ps, - kwargs_lower_lens=kwargs_lower_lens, kwargs_lower_source=kwargs_lower_source, - kwargs_lower_lens_light=kwargs_lower_lens_light, kwargs_lower_ps=kwargs_lower_ps, - kwargs_upper_lens=kwargs_upper_lens, kwargs_upper_source=kwargs_upper_source, - kwargs_upper_lens_light=kwargs_upper_lens_light, kwargs_upper_ps=kwargs_upper_ps, - kwargs_lens_init=kwargs_lens_init, **kwargs_constraints) - - image_linear = class_util.create_image_model(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, - image_likelihood_mask=image_likelihood_mask) + ( + kwargs_lens_init, + kwargs_lens_sigma, + kwargs_fixed_lens, + kwargs_lower_lens, + kwargs_upper_lens, + ) = kwargs_params["lens_model"] + ( + kwargs_source_init, + kwargs_source_sigma, + kwargs_fixed_source, + kwargs_lower_source, + kwargs_upper_source, + ) = kwargs_params["source_model"] + ( + kwargs_lens_light_init, + kwargs_lens_light_sigma, + kwargs_fixed_lens_light, + kwargs_lower_lens_light, + kwargs_upper_lens_light, + ) = kwargs_params["lens_light_model"] + ( + kwargs_ps_init, + kwargs_ps_sigma, + kwargs_fixed_ps, + kwargs_lower_ps, + kwargs_upper_ps, + ) = kwargs_params["point_source_model"] + + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_fixed_ps=kwargs_fixed_ps, + kwargs_lower_lens=kwargs_lower_lens, + kwargs_lower_source=kwargs_lower_source, + kwargs_lower_lens_light=kwargs_lower_lens_light, + kwargs_lower_ps=kwargs_lower_ps, + kwargs_upper_lens=kwargs_upper_lens, + kwargs_upper_source=kwargs_upper_source, + kwargs_upper_lens_light=kwargs_upper_lens_light, + kwargs_upper_ps=kwargs_upper_ps, + kwargs_lens_init=kwargs_lens_init, + **kwargs_constraints, + ) + + image_linear = class_util.create_image_model( + kwargs_data, + kwargs_psf, + kwargs_numerics, + kwargs_model, + image_likelihood_mask=image_likelihood_mask, + ) args_lens = [] args_source = [] args_lens_light = [] args_ps = [] for w in range(len(dist_buf)): if likelihood_threshold is not None: - if dist_buf[w] < likelihood_threshold : + if dist_buf[w] < likelihood_threshold: pass kwargs_return = param_class.args2kwargs(par_buf[w]) image_linear.image_linear_solve(**kwargs_return) - args_lens.append(kwargs_return['kwargs_lens']) - args_source.append(kwargs_return['kwargs_source']) - args_lens_light.append(kwargs_return['kwargs_lens_light']) - args_ps.append(kwargs_return['kwargs_ps']) - kwargs_mcmc_results = {'args_lens': args_lens, 'args_source': args_source, 'args_lens_light': args_lens_light, - 'args_ps': args_ps} + args_lens.append(kwargs_return["kwargs_lens"]) + args_source.append(kwargs_return["kwargs_source"]) + args_lens_light.append(kwargs_return["kwargs_lens_light"]) + args_ps.append(kwargs_return["kwargs_ps"]) + kwargs_mcmc_results = { + "args_lens": args_lens, + "args_source": args_source, + "args_lens_light": args_lens_light, + "args_ps": args_ps, + } return kwargs_mcmc_results + def create_redshift_info(lensing_entities_list): - """ - Side fuction to create the minimum, maximum and whole redshift list of galaxies in the COOLEST template - Note that the redshifts helps knowing which galaxy is a lens, or a source, and if multiplane has to be considered + """Side fuction to create the minimum, maximum and whole redshift list of galaxies + in the COOLEST template Note that the redshifts helps knowing which galaxy is a + lens, or a source, and if multiplane has to be considered. :param lensing_entities_list: coolest.template.classes.lensing_entity_list.LensingEntityList object :return: min_redshift, max_redshift, redshift_list ; minimum, maximum and full list of lensing entities redshifts @@ -559,5 +746,3 @@ def create_redshift_info(lensing_entities_list): min_redshift = np.min(redshift_list) max_redshift = np.max(redshift_list) return min_redshift, max_redshift, redshift_list - - diff --git a/lenstronomy/Util/coolest_read_util.py b/lenstronomy/Util/coolest_read_util.py index daf7bc050..b9f2589d1 100644 --- a/lenstronomy/Util/coolest_read_util.py +++ b/lenstronomy/Util/coolest_read_util.py @@ -4,9 +4,8 @@ def shapelet_amp_coolest_to_lenstronomy(value): - """ - Transforms shapelets coefficients from COOLEST conventions (x to the right) - to lenstronomy conventions (x following ra, to the left) + """Transforms shapelets coefficients from COOLEST conventions (x to the right) to + lenstronomy conventions (x following ra, to the left) :param value: amplitude of the shapelet (float or np.array) in COOLEST conventions :return: amplitude of the shapelet (float or np.array) in lenstronomy conventions @@ -43,17 +42,17 @@ def degree_coolest_to_radian_lenstronomy(value): if value is None: return None else: - lenstro_oriented_degree = - value + 90. - if lenstro_oriented_degree >= 180.: - lenstro_oriented_degree -= 180. - elif lenstro_oriented_degree < 0.: + lenstro_oriented_degree = -value + 90.0 + if lenstro_oriented_degree >= 180.0: + lenstro_oriented_degree -= 180.0 + elif lenstro_oriented_degree < 0.0: lenstro_oriented_degree += 180 - return lenstro_oriented_degree * np.pi / 180. + return lenstro_oriented_degree * np.pi / 180.0 def qphi_coolest_to_e1e2_lenstronomy(q, phi): - """ - Transform q and phi (axis ratio, position angle East-of-North) to e1,e2 in lenstronomy + """Transform q and phi (axis ratio, position angle East-of-North) to e1,e2 in + lenstronomy. :param q: float, axis ratio :param phi: float, position angle in COOLEST conventions @@ -68,9 +67,8 @@ def qphi_coolest_to_e1e2_lenstronomy(q, phi): def gamma_phi_coolest_to_g1_g2_lenstronomy(gamma_ext, phi_ext): - """ - Transform gamma_ext and phi_ext (shear strength, position angle East-of-North) - to gamma1,gamma2 in lenstronomy + """Transform gamma_ext and phi_ext (shear strength, position angle East-of-North) to + gamma1,gamma2 in lenstronomy. :param gamma_ext: float, shear strenght :param phi_ext: float, shear angle in COOLEST conventions @@ -80,26 +78,27 @@ def gamma_phi_coolest_to_g1_g2_lenstronomy(gamma_ext, phi_ext): return None, None else: angle = degree_coolest_to_radian_lenstronomy(phi_ext) - gamma1, gamma2 = shear_polar2cartesian(angle,gamma_ext) + gamma1, gamma2 = shear_polar2cartesian(angle, gamma_ext) return gamma1, gamma2 def ellibounds_coolest_to_lenstronomy(q_down, q_up, phi_down, phi_up): - """ - Transforms upper and lower bounds on coolest ellipticity parameters (q, phi) towards lenstronomy bound on e1, e2 - The mapping can not be perfect but it's the best we can do + """Transforms upper and lower bounds on coolest ellipticity parameters (q, phi) + towards lenstronomy bound on e1, e2 The mapping can not be perfect but it's the best + we can do. :param q_down: float, lower bound of axis ratio :param q_up: float, upper bound of axis ratio :param phi_down: float, lower bound of position angle in COOLEST conventions :param phi_up: float, upper bound of position angle in COOLEST conventions - :return: e1_down, e1_up, e2_down, e2_up, bounds for lenstronomy usual ellipticity parameters + :return: e1_down, e1_up, e2_down, e2_up, bounds for lenstronomy usual ellipticity + parameters """ if None in [q_down, q_up, phi_down, phi_up]: return None, None, None, None else: - e1_down = - (1 - q_down) / (1 + q_down) - e2_down = - (1 - q_down) / (1 + q_down) + e1_down = -(1 - q_down) / (1 + q_down) + e2_down = -(1 - q_down) / (1 + q_down) e1_up = (1 - q_down) / (1 + q_down) e2_up = (1 - q_down) / (1 + q_down) @@ -109,23 +108,26 @@ def ellibounds_coolest_to_lenstronomy(q_down, q_up, phi_down, phi_up): return e1_down, e1_up, e2_down, e2_up -def shearbounds_coolest_to_lenstronomy(gamma_ext_down, gamma_ext_up, phi_ext_down, phi_ext_up): - """ - Transforms upper and lower bounds on coolest shear parameters (gamma_ext, phi_ext) towards lenstronomy bounds - on gamma_1, gamma_2 - The mapping can not be perfect but it's the best we can do +def shearbounds_coolest_to_lenstronomy( + gamma_ext_down, gamma_ext_up, phi_ext_down, phi_ext_up +): + """Transforms upper and lower bounds on coolest shear parameters (gamma_ext, + phi_ext) towards lenstronomy bounds on gamma_1, gamma_2 The mapping can not be + perfect but it's the best we can do. :param gamma_ext_down: float, lower bound of shear strenght :param gamma_ext_up: float, upper bound of shear strenght - :param phi_ext_down: float, lower bound of shear position angle in COOLEST conventions + :param phi_ext_down: float, lower bound of shear position angle in COOLEST + conventions :param phi_ext_up: float, upper bound of shear position angle in COOLEST conventions - :return: gamma1_down, gamma1_up, gamma2_down, gamma2_up ; bounds for lenstronomy usual shear parameters + :return: gamma1_down, gamma1_up, gamma2_down, gamma2_up ; bounds for lenstronomy + usual shear parameters """ if None in [gamma_ext_down, gamma_ext_up, phi_ext_down, phi_ext_up]: return None, None, None, None else: - gamma1_down = - gamma_ext_up - gamma2_down = - gamma_ext_up + gamma1_down = -gamma_ext_up + gamma2_down = -gamma_ext_up gamma1_up = gamma_ext_up gamma2_up = gamma_ext_up @@ -135,8 +137,17 @@ def shearbounds_coolest_to_lenstronomy(gamma_ext_down, gamma_ext_up, phi_ext_dow return gamma1_down, gamma1_up, gamma2_down, gamma2_up -def update_kwargs_shear(shear_idx, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, kwargs_lens_down, - kwargs_lens_fixed, kwargs_lens_sigma, cleaning=False): +def update_kwargs_shear( + shear_idx, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=False, +): """ Update the lens model list and kwargs with SHEAR mass model (gamma_ext - phi_ext) @@ -151,29 +162,42 @@ def update_kwargs_shear(shear_idx, lens_model_list, kwargs_lens, kwargs_lens_ini :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed :return: updated list and kwargs """ - lens_model_list.append('SHEAR') + lens_model_list.append("SHEAR") for shear_name, shear_param in shear_idx.parameters.items(): - if shear_name == 'gamma_ext': - gammaext = getattr(shear_param.point_estimate, 'value') - gammaext_up = getattr(shear_param.definition_range, 'max_value') - gammaext_down = getattr(shear_param.definition_range, 'min_value') - gammaext_fixed = gammaext if getattr(shear_param, 'fixed') else None - elif shear_name == 'phi_ext': - psiext = getattr(shear_param.point_estimate, 'value') - psiext_up = getattr(shear_param.definition_range, 'max_value') - psiext_down = getattr(shear_param.definition_range, 'min_value') - psiext_fixed = psiext if getattr(shear_param, 'fixed') else None + if shear_name == "gamma_ext": + gammaext = getattr(shear_param.point_estimate, "value") + gammaext_up = getattr(shear_param.definition_range, "max_value") + gammaext_down = getattr(shear_param.definition_range, "min_value") + gammaext_fixed = gammaext if getattr(shear_param, "fixed") else None + elif shear_name == "phi_ext": + psiext = getattr(shear_param.point_estimate, "value") + psiext_up = getattr(shear_param.definition_range, "max_value") + psiext_down = getattr(shear_param.definition_range, "min_value") + psiext_fixed = psiext if getattr(shear_param, "fixed") else None else: print(f"{shear_name} not known") gamma1, gamma2 = gamma_phi_coolest_to_g1_g2_lenstronomy(gammaext, psiext) - gamma1_fixed, gamma2_fixed = gamma_phi_coolest_to_g1_g2_lenstronomy(gammaext_fixed, psiext_fixed) - gamma1_down, gamma1_up, gamma2_down, gamma2_up = shearbounds_coolest_to_lenstronomy(gammaext_down, gammaext_up, psiext_down, - psiext_up) - - kw_1 = {'gamma1': gamma1, 'gamma2': gamma2, 'ra_0': 0., 'dec_0': 0.} - kw_up_1 = {'gamma1': gamma1_up, 'gamma2': gamma2_up, 'ra_0': 0., 'dec_0': 0.} - kw_down_1 = {'gamma1': gamma1_down, 'gamma2': gamma2_down, 'ra_0': 0., 'dec_0': 0.} - kw_fixed_1 = {'gamma1': gamma1_fixed, 'gamma2': gamma2_fixed, 'ra_0': 0., 'dec_0': 0.} + gamma1_fixed, gamma2_fixed = gamma_phi_coolest_to_g1_g2_lenstronomy( + gammaext_fixed, psiext_fixed + ) + gamma1_down, gamma1_up, gamma2_down, gamma2_up = shearbounds_coolest_to_lenstronomy( + gammaext_down, gammaext_up, psiext_down, psiext_up + ) + + kw_1 = {"gamma1": gamma1, "gamma2": gamma2, "ra_0": 0.0, "dec_0": 0.0} + kw_up_1 = {"gamma1": gamma1_up, "gamma2": gamma2_up, "ra_0": 0.0, "dec_0": 0.0} + kw_down_1 = { + "gamma1": gamma1_down, + "gamma2": gamma2_down, + "ra_0": 0.0, + "dec_0": 0.0, + } + kw_fixed_1 = { + "gamma1": gamma1_fixed, + "gamma2": gamma2_fixed, + "ra_0": 0.0, + "dec_0": 0.0, + } kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -182,9 +206,9 @@ def update_kwargs_shear(shear_idx, lens_model_list, kwargs_lens, kwargs_lens_ini kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'gamma1': 0.0, 'gamma2': -0.0, 'ra_0': 0., 'dec_0': 0.} - kw_up_default = {'gamma1': 0.5, 'gamma2': 0.5, 'ra_0': 100, 'dec_0': 100} - kw_down_default = {'gamma1': -0.5, 'gamma2': -0.5, 'ra_0': -100, 'dec_0': -100} + kw_init_default = {"gamma1": 0.0, "gamma2": -0.0, "ra_0": 0.0, "dec_0": 0.0} + kw_up_default = {"gamma1": 0.5, "gamma2": 0.5, "ra_0": 100, "dec_0": 100} + kw_down_default = {"gamma1": -0.5, "gamma2": -0.5, "ra_0": -100, "dec_0": -100} for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -203,16 +227,24 @@ def update_kwargs_shear(shear_idx, lens_model_list, kwargs_lens, kwargs_lens_ini kwargs_lens_up.append(kw_up) kwargs_lens_down.append(kw_down) kwargs_lens_fixed.append(kw_fixed) - kwargs_lens_sigma.append({'gamma1': 0.1, 'gamma2': 0.1, 'ra_0': 0., 'dec_0': 0.}) - print('\t Shear correctly added') + kwargs_lens_sigma.append({"gamma1": 0.1, "gamma2": 0.1, "ra_0": 0.0, "dec_0": 0.0}) + print("\t Shear correctly added") return -def update_kwargs_pemd(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, kwargs_lens_down, kwargs_lens_fixed, - kwargs_lens_sigma, cleaning=False): - """ - Update the lens list and kwargs with PEMD mass model +def update_kwargs_pemd( + mass, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=False, +): + """Update the lens list and kwargs with PEMD mass model. :param mass: coolest.template.classes.profiles.mass.PEMD object :param lens_model_list: the usual lenstronomy lens_model_list @@ -222,57 +254,87 @@ def update_kwargs_pemd(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwa :param kwargs_lens_down: the usual lenstronomy kwargs :param kwargs_lens_fixed: the usual lenstronomy kwargs :param kwargs_lens_sigma: the usual lenstronomy kwargs - :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed - + :param cleaning: bool, if True, will update the empty fields with default values + + cleans the kwargs_fixed :return: updated list and kwargs """ - lens_model_list.append('PEMD') + lens_model_list.append("PEMD") for mass_name, mass_param in mass.parameters.items(): - if mass_name == 'theta_E': - te = getattr(mass_param.point_estimate, 'value') - te_up = getattr(mass_param.definition_range, 'max_value') - te_down = getattr(mass_param.definition_range, 'min_value') - te_fixed = te if getattr(mass_param, 'fixed') else None - elif mass_name == 'gamma': - gamma = getattr(mass_param.point_estimate, 'value') - gamma_up = getattr(mass_param.definition_range, 'max_value') - gamma_down = getattr(mass_param.definition_range, 'min_value') - gamma_fixed = gamma if getattr(mass_param, 'fixed') else None - elif mass_name == 'q': - q = getattr(mass_param.point_estimate, 'value') - q_up = getattr(mass_param.definition_range, 'max_value') - q_down = getattr(mass_param.definition_range, 'min_value') - q_fixed = q if getattr(mass_param, 'fixed') else None - elif mass_name == 'phi': - phi = getattr(mass_param.point_estimate, 'value') - phi_up = getattr(mass_param.definition_range, 'max_value') - phi_down = getattr(mass_param.definition_range, 'min_value') - phi_fixed = phi if getattr(mass_param, 'fixed') else None - elif mass_name == 'center_x': - center_x = - getattr(mass_param.point_estimate, 'value') if getattr(mass_param.point_estimate, - 'value') is not None else None - center_x_up = getattr(mass_param.definition_range, 'max_value') - center_x_down = getattr(mass_param.definition_range, 'min_value') - center_x_fixed = center_x if getattr(mass_param, 'fixed') else None - elif mass_name == 'center_y': - center_y = getattr(mass_param.point_estimate, 'value') - center_y_up = getattr(mass_param.definition_range, 'max_value') - center_y_down = getattr(mass_param.definition_range, 'min_value') - center_y_fixed = center_y if getattr(mass_param, 'fixed') else None + if mass_name == "theta_E": + te = getattr(mass_param.point_estimate, "value") + te_up = getattr(mass_param.definition_range, "max_value") + te_down = getattr(mass_param.definition_range, "min_value") + te_fixed = te if getattr(mass_param, "fixed") else None + elif mass_name == "gamma": + gamma = getattr(mass_param.point_estimate, "value") + gamma_up = getattr(mass_param.definition_range, "max_value") + gamma_down = getattr(mass_param.definition_range, "min_value") + gamma_fixed = gamma if getattr(mass_param, "fixed") else None + elif mass_name == "q": + q = getattr(mass_param.point_estimate, "value") + q_up = getattr(mass_param.definition_range, "max_value") + q_down = getattr(mass_param.definition_range, "min_value") + q_fixed = q if getattr(mass_param, "fixed") else None + elif mass_name == "phi": + phi = getattr(mass_param.point_estimate, "value") + phi_up = getattr(mass_param.definition_range, "max_value") + phi_down = getattr(mass_param.definition_range, "min_value") + phi_fixed = phi if getattr(mass_param, "fixed") else None + elif mass_name == "center_x": + center_x = ( + -getattr(mass_param.point_estimate, "value") + if getattr(mass_param.point_estimate, "value") is not None + else None + ) + center_x_up = getattr(mass_param.definition_range, "max_value") + center_x_down = getattr(mass_param.definition_range, "min_value") + center_x_fixed = center_x if getattr(mass_param, "fixed") else None + elif mass_name == "center_y": + center_y = getattr(mass_param.point_estimate, "value") + center_y_up = getattr(mass_param.definition_range, "max_value") + center_y_down = getattr(mass_param.definition_range, "min_value") + center_y_fixed = center_y if getattr(mass_param, "fixed") else None else: print(f"{mass_name} not known") e1, e2 = qphi_coolest_to_e1e2_lenstronomy(q, phi) e1_fixed, e2_fixed = qphi_coolest_to_e1e2_lenstronomy(q_fixed, phi_fixed) - e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy(q_down, q_up, phi_down, phi_up) - - kw_1 = {'theta_E': te, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': center_x, 'center_y': center_y} - kw_up_1 = {'theta_E': te_up, 'gamma': gamma_up, 'e1': e1_up, 'e2': e2_up, 'center_x': center_x_up, - 'center_y': center_y_up} - kw_down_1 = {'theta_E': te_down, 'gamma': gamma_down, 'e1': e1_down, 'e2': e2_down, - 'center_x': center_x_down, 'center_y': center_y_down} - kw_fixed_1 = {'theta_E': te_fixed, 'gamma': gamma_fixed, 'e1': e1_fixed, 'e2': e2_fixed, - 'center_x': center_x_fixed, 'center_y': center_y_fixed} + e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy( + q_down, q_up, phi_down, phi_up + ) + + kw_1 = { + "theta_E": te, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": center_x, + "center_y": center_y, + } + kw_up_1 = { + "theta_E": te_up, + "gamma": gamma_up, + "e1": e1_up, + "e2": e2_up, + "center_x": center_x_up, + "center_y": center_y_up, + } + kw_down_1 = { + "theta_E": te_down, + "gamma": gamma_down, + "e1": e1_down, + "e2": e2_down, + "center_x": center_x_down, + "center_y": center_y_down, + } + kw_fixed_1 = { + "theta_E": te_fixed, + "gamma": gamma_fixed, + "e1": e1_fixed, + "e2": e2_fixed, + "center_x": center_x_fixed, + "center_y": center_y_fixed, + } kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -281,9 +343,30 @@ def update_kwargs_pemd(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwa kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'theta_E': 1., 'gamma': 2., 'e1': 0.0, 'e2': -0.0, 'center_x': 0., 'center_y': 0.} - kw_up_default = {'theta_E': 100, 'gamma': 2.5, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} - kw_down_default = {'theta_E': 0, 'gamma': 1.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} + kw_init_default = { + "theta_E": 1.0, + "gamma": 2.0, + "e1": 0.0, + "e2": -0.0, + "center_x": 0.0, + "center_y": 0.0, + } + kw_up_default = { + "theta_E": 100, + "gamma": 2.5, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + kw_down_default = { + "theta_E": 0, + "gamma": 1.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -302,17 +385,34 @@ def update_kwargs_pemd(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwa kwargs_lens_up.append(kw_up) kwargs_lens_down.append(kw_down) kwargs_lens_fixed.append(kw_fixed) - kwargs_lens_sigma.append({'theta_E': 1.5, 'gamma': 0.2, 'e1': 0.3, 'e2': 0.3, 'center_x': 0.5, 'center_y': 0.5}) - - print('\t PEMD correctly added') + kwargs_lens_sigma.append( + { + "theta_E": 1.5, + "gamma": 0.2, + "e1": 0.3, + "e2": 0.3, + "center_x": 0.5, + "center_y": 0.5, + } + ) + + print("\t PEMD correctly added") return -def update_kwargs_sie(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwargs_lens_up, kwargs_lens_down, kwargs_lens_fixed, - kwargs_lens_sigma, cleaning=False): - """ - Update the lens list and kwargs with SIE mass model +def update_kwargs_sie( + mass, + lens_model_list, + kwargs_lens, + kwargs_lens_init, + kwargs_lens_up, + kwargs_lens_down, + kwargs_lens_fixed, + kwargs_lens_sigma, + cleaning=False, +): + """Update the lens list and kwargs with SIE mass model. :param mass: coolest.template.classes.profiles.mass.SIE object :param lens_model_list: the usual lenstronomy lens_model_list @@ -322,52 +422,78 @@ def update_kwargs_sie(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwar :param kwargs_lens_down: the usual lenstronomy kwargs :param kwargs_lens_fixed: the usual lenstronomy kwargs :param kwargs_lens_sigma: the usual lenstronomy kwargs - :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed - + :param cleaning: bool, if True, will update the empty fields with default values + + cleans the kwargs_fixed :return: updated list and kwargs """ - lens_model_list.append('SIE') + lens_model_list.append("SIE") for mass_name, mass_param in mass.parameters.items(): - if mass_name == 'theta_E': - te = getattr(mass_param.point_estimate, 'value') - te_up = getattr(mass_param.definition_range, 'max_value') - te_down = getattr(mass_param.definition_range, 'min_value') - te_fixed = te if getattr(mass_param, 'fixed') else None - elif mass_name == 'q': - q = getattr(mass_param.point_estimate, 'value') - q_up = getattr(mass_param.definition_range, 'max_value') - q_down = getattr(mass_param.definition_range, 'min_value') - q_fixed = q if getattr(mass_param, 'fixed') else None - elif mass_name == 'phi': - phi = getattr(mass_param.point_estimate, 'value') - phi_up = getattr(mass_param.definition_range, 'max_value') - phi_down = getattr(mass_param.definition_range, 'min_value') - phi_fixed = phi if getattr(mass_param, 'fixed') else None - elif mass_name == 'center_x': - center_x = - getattr(mass_param.point_estimate, 'value') if getattr(mass_param.point_estimate, - 'value') is not None else None - center_x_up = getattr(mass_param.definition_range, 'max_value') - center_x_down = getattr(mass_param.definition_range, 'min_value') - center_x_fixed = center_x if getattr(mass_param, 'fixed') else None - elif mass_name == 'center_y': - center_y = getattr(mass_param.point_estimate, 'value') - center_y_up = getattr(mass_param.definition_range, 'max_value') - center_y_down = getattr(mass_param.definition_range, 'min_value') - center_y_fixed = center_y if getattr(mass_param, 'fixed') else None + if mass_name == "theta_E": + te = getattr(mass_param.point_estimate, "value") + te_up = getattr(mass_param.definition_range, "max_value") + te_down = getattr(mass_param.definition_range, "min_value") + te_fixed = te if getattr(mass_param, "fixed") else None + elif mass_name == "q": + q = getattr(mass_param.point_estimate, "value") + q_up = getattr(mass_param.definition_range, "max_value") + q_down = getattr(mass_param.definition_range, "min_value") + q_fixed = q if getattr(mass_param, "fixed") else None + elif mass_name == "phi": + phi = getattr(mass_param.point_estimate, "value") + phi_up = getattr(mass_param.definition_range, "max_value") + phi_down = getattr(mass_param.definition_range, "min_value") + phi_fixed = phi if getattr(mass_param, "fixed") else None + elif mass_name == "center_x": + center_x = ( + -getattr(mass_param.point_estimate, "value") + if getattr(mass_param.point_estimate, "value") is not None + else None + ) + center_x_up = getattr(mass_param.definition_range, "max_value") + center_x_down = getattr(mass_param.definition_range, "min_value") + center_x_fixed = center_x if getattr(mass_param, "fixed") else None + elif mass_name == "center_y": + center_y = getattr(mass_param.point_estimate, "value") + center_y_up = getattr(mass_param.definition_range, "max_value") + center_y_down = getattr(mass_param.definition_range, "min_value") + center_y_fixed = center_y if getattr(mass_param, "fixed") else None else: print(f"{mass_name} not known") e1, e2 = qphi_coolest_to_e1e2_lenstronomy(q, phi) e1_fixed, e2_fixed = qphi_coolest_to_e1e2_lenstronomy(q_fixed, phi_fixed) - e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy(q_down, q_up, phi_down, phi_up) - - kw_1 = {'theta_E': te, 'e1': e1, 'e2': e2, 'center_x': center_x, 'center_y': center_y} - kw_up_1 = {'theta_E': te_up, 'e1': e1_up, 'e2': e2_up, 'center_x': center_x_up, - 'center_y': center_y_up} - kw_down_1 = {'theta_E': te_down, 'e1': e1_down, 'e2': e2_down, - 'center_x': center_x_down, 'center_y': center_y_down} - kw_fixed_1 = {'theta_E': te_fixed, 'e1': e1_fixed, 'e2': e2_fixed, - 'center_x': center_x_fixed, 'center_y': center_y_fixed} + e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy( + q_down, q_up, phi_down, phi_up + ) + + kw_1 = { + "theta_E": te, + "e1": e1, + "e2": e2, + "center_x": center_x, + "center_y": center_y, + } + kw_up_1 = { + "theta_E": te_up, + "e1": e1_up, + "e2": e2_up, + "center_x": center_x_up, + "center_y": center_y_up, + } + kw_down_1 = { + "theta_E": te_down, + "e1": e1_down, + "e2": e2_down, + "center_x": center_x_down, + "center_y": center_y_down, + } + kw_fixed_1 = { + "theta_E": te_fixed, + "e1": e1_fixed, + "e2": e2_fixed, + "center_x": center_x_fixed, + "center_y": center_y_fixed, + } kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -376,9 +502,27 @@ def update_kwargs_sie(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwar kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'theta_E': 1., 'e1': 0.0, 'e2': -0.0, 'center_x': 0., 'center_y': 0.} - kw_up_default = {'theta_E': 100, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, 'center_y': 100} - kw_down_default = {'theta_E': 0, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, 'center_y': -100} + kw_init_default = { + "theta_E": 1.0, + "e1": 0.0, + "e2": -0.0, + "center_x": 0.0, + "center_y": 0.0, + } + kw_up_default = { + "theta_E": 100, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + kw_down_default = { + "theta_E": 0, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -397,82 +541,124 @@ def update_kwargs_sie(mass, lens_model_list, kwargs_lens, kwargs_lens_init, kwar kwargs_lens_up.append(kw_up) kwargs_lens_down.append(kw_down) kwargs_lens_fixed.append(kw_fixed) - kwargs_lens_sigma.append({'theta_E': 1.5, 'e1': 0.3, 'e2': 0.3, 'center_x': 0.5, 'center_y': 0.5}) + kwargs_lens_sigma.append( + {"theta_E": 1.5, "e1": 0.3, "e2": 0.3, "center_x": 0.5, "center_y": 0.5} + ) - print('\t SIE correctly added') + print("\t SIE correctly added") return -def update_kwargs_sersic(light, light_model_list, kwargs_light, kwargs_light_init, kwargs_light_up, kwargs_light_down, - kwargs_light_fixed, kwargs_light_sigma, cleaning=False): - """ - Update the source list and kwargs with SERSIC_ELLISPE light model +def update_kwargs_sersic( + light, + light_model_list, + kwargs_light, + kwargs_light_init, + kwargs_light_up, + kwargs_light_down, + kwargs_light_fixed, + kwargs_light_sigma, + cleaning=False, +): + """Update the source list and kwargs with SERSIC_ELLISPE light model. :param light: coolest.template.classes.profiles.light.Sersic object - :param light_model_list: the usual lenstronomy lens_light_model_list or source_light_model_list + :param light_model_list: the usual lenstronomy lens_light_model_list or + source_light_model_list :param kwargs_light: the usual lenstronomy kwargs :param kwargs_light_init: the usual lenstronomy kwargs :param kwargs_light_up: the usual lenstronomy kwargs :param kwargs_light_down: the usual lenstronomy kwargs :param kwargs_light_fixed: the usual lenstronomy kwargs :param kwargs_light_sigma: the usual lenstronomy kwargs - :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed - + :param cleaning: bool, if True, will update the empty fields with default values + + cleans the kwargs_fixed :return: updated list and kwargs """ - light_model_list.append('SERSIC_ELLIPSE') + light_model_list.append("SERSIC_ELLIPSE") for light_name, light_param in light.parameters.items(): - if light_name == 'I_eff': - amp = getattr(light_param.point_estimate, 'value') - amp_up = getattr(light_param.definition_range, 'max_value') - amp_down = getattr(light_param.definition_range, 'min_value') - amp_fixed = amp if getattr(light_param, 'fixed') else None - elif light_name == 'theta_eff': - R = getattr(light_param.point_estimate, 'value') - R_up = getattr(light_param.definition_range, 'max_value') - R_down = getattr(light_param.definition_range, 'min_value') - R_fixed = R if getattr(light_param, 'fixed') else None - elif light_name == 'n': - n = getattr(light_param.point_estimate, 'value') - n_up = getattr(light_param.definition_range, 'max_value') - n_down = getattr(light_param.definition_range, 'min_value') - n_fixed = n if getattr(light_param, 'fixed') else None - elif light_name == 'q': - q = getattr(light_param.point_estimate, 'value') - q_up = getattr(light_param.definition_range, 'max_value') - q_down = getattr(light_param.definition_range, 'min_value') - q_fixed = q if getattr(light_param, 'fixed') else None - elif light_name == 'phi': - phi = getattr(light_param.point_estimate, 'value') - phi_up = getattr(light_param.definition_range, 'max_value') - phi_down = getattr(light_param.definition_range, 'min_value') - phi_fixed = phi if getattr(light_param, 'fixed') else None - elif light_name == 'center_x': - cx = - getattr(light_param.point_estimate, 'value') if getattr(light_param.point_estimate, - 'value') is not None else None - cx_up = getattr(light_param.definition_range, 'max_value') - cx_down = getattr(light_param.definition_range, 'min_value') - cx_fixed = cx if getattr(light_param, 'fixed') else None - elif light_name == 'center_y': - cy = getattr(light_param.point_estimate, 'value') - cy_up = getattr(light_param.definition_range, 'max_value') - cy_down = getattr(light_param.definition_range, 'min_value') - cy_fixed = cy if getattr(light_param, 'fixed') else None + if light_name == "I_eff": + amp = getattr(light_param.point_estimate, "value") + amp_up = getattr(light_param.definition_range, "max_value") + amp_down = getattr(light_param.definition_range, "min_value") + amp_fixed = amp if getattr(light_param, "fixed") else None + elif light_name == "theta_eff": + R = getattr(light_param.point_estimate, "value") + R_up = getattr(light_param.definition_range, "max_value") + R_down = getattr(light_param.definition_range, "min_value") + R_fixed = R if getattr(light_param, "fixed") else None + elif light_name == "n": + n = getattr(light_param.point_estimate, "value") + n_up = getattr(light_param.definition_range, "max_value") + n_down = getattr(light_param.definition_range, "min_value") + n_fixed = n if getattr(light_param, "fixed") else None + elif light_name == "q": + q = getattr(light_param.point_estimate, "value") + q_up = getattr(light_param.definition_range, "max_value") + q_down = getattr(light_param.definition_range, "min_value") + q_fixed = q if getattr(light_param, "fixed") else None + elif light_name == "phi": + phi = getattr(light_param.point_estimate, "value") + phi_up = getattr(light_param.definition_range, "max_value") + phi_down = getattr(light_param.definition_range, "min_value") + phi_fixed = phi if getattr(light_param, "fixed") else None + elif light_name == "center_x": + cx = ( + -getattr(light_param.point_estimate, "value") + if getattr(light_param.point_estimate, "value") is not None + else None + ) + cx_up = getattr(light_param.definition_range, "max_value") + cx_down = getattr(light_param.definition_range, "min_value") + cx_fixed = cx if getattr(light_param, "fixed") else None + elif light_name == "center_y": + cy = getattr(light_param.point_estimate, "value") + cy_up = getattr(light_param.definition_range, "max_value") + cy_down = getattr(light_param.definition_range, "min_value") + cy_fixed = cy if getattr(light_param, "fixed") else None else: - print(f'Parameter {light_name} unknown in SersicEllipse profile.') + print(f"Parameter {light_name} unknown in SersicEllipse profile.") e1, e2 = qphi_coolest_to_e1e2_lenstronomy(q, phi) e1_fixed, e2_fixed = qphi_coolest_to_e1e2_lenstronomy(q_fixed, phi_fixed) - e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy(q_down, q_up, phi_down, phi_up) - - kw_1 = {'amp': amp, 'R_sersic': R, 'n_sersic': n, 'e1': e1, 'e2': e2, 'center_x': cx, 'center_y': cy} - kw_up_1 = {'R_sersic': R_up, 'n_sersic': n_up, 'e1': e1_up, 'e2': e2_up, - 'center_x': cx_up, 'center_y': cy_up} - kw_down_1 = {'R_sersic': R_down, 'n_sersic': n_down, 'e1': e1_down, 'e2': e2_down, - 'center_x': cx_down, 'center_y': cy_down} - kw_fixed_1 = {'R_sersic': R_fixed, 'n_sersic': n_fixed, 'e1': e1_fixed, - 'e2': e2_fixed, 'center_x': cx_fixed, 'center_y': cy_fixed} + e1_down, e1_up, e2_down, e2_up = ellibounds_coolest_to_lenstronomy( + q_down, q_up, phi_down, phi_up + ) + + kw_1 = { + "amp": amp, + "R_sersic": R, + "n_sersic": n, + "e1": e1, + "e2": e2, + "center_x": cx, + "center_y": cy, + } + kw_up_1 = { + "R_sersic": R_up, + "n_sersic": n_up, + "e1": e1_up, + "e2": e2_up, + "center_x": cx_up, + "center_y": cy_up, + } + kw_down_1 = { + "R_sersic": R_down, + "n_sersic": n_down, + "e1": e1_down, + "e2": e2_down, + "center_x": cx_down, + "center_y": cy_down, + } + kw_fixed_1 = { + "R_sersic": R_fixed, + "n_sersic": n_fixed, + "e1": e1_fixed, + "e2": e2_fixed, + "center_x": cx_fixed, + "center_y": cy_fixed, + } kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -481,12 +667,33 @@ def update_kwargs_sersic(light, light_model_list, kwargs_light, kwargs_light_ini kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'amp': 1., 'R_sersic': 1., 'n_sersic': 3.5, 'e1': 0.0, 'e2': -0.0, 'center_x': 0., - 'center_y': 0.} - kw_up_default = {'amp': 100, 'R_sersic': 100, 'n_sersic': 8, 'e1': 0.5, 'e2': 0.5, 'center_x': 100, - 'center_y': 100} - kw_down_default = {'amp': 0, 'R_sersic': 0, 'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5, 'center_x': -100, - 'center_y': -100} + kw_init_default = { + "amp": 1.0, + "R_sersic": 1.0, + "n_sersic": 3.5, + "e1": 0.0, + "e2": -0.0, + "center_x": 0.0, + "center_y": 0.0, + } + kw_up_default = { + "amp": 100, + "R_sersic": 100, + "n_sersic": 8, + "e1": 0.5, + "e2": 0.5, + "center_x": 100, + "center_y": 100, + } + kw_down_default = { + "amp": 0, + "R_sersic": 0, + "n_sersic": 0.5, + "e1": -0.5, + "e2": -0.5, + "center_x": -100, + "center_y": -100, + } for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -506,61 +713,92 @@ def update_kwargs_sersic(light, light_model_list, kwargs_light, kwargs_light_ini kwargs_light_down.append(kw_down) kwargs_light_fixed.append(kw_fixed) kwargs_light_sigma.append( - {'R_sersic': 1.5, 'n_sersic': 1.5, 'e1': 0.2, 'e2': 0.2, 'center_x': 0.5, 'center_y': 0.5}) - print('\t Sersic (Ellipse) correctly added') + { + "R_sersic": 1.5, + "n_sersic": 1.5, + "e1": 0.2, + "e2": 0.2, + "center_x": 0.5, + "center_y": 0.5, + } + ) + print("\t Sersic (Ellipse) correctly added") return -def update_kwargs_shapelets(light, light_model_list, kwargs_light, kwargs_light_init, kwargs_light_up, kwargs_light_down, - kwargs_light_fixed, kwargs_light_sigma, cleaning=False): - """ - Update the source list and kwargs with SHAPELETS light model +def update_kwargs_shapelets( + light, + light_model_list, + kwargs_light, + kwargs_light_init, + kwargs_light_up, + kwargs_light_down, + kwargs_light_fixed, + kwargs_light_sigma, + cleaning=False, +): + """Update the source list and kwargs with SHAPELETS light model. :param light: coolest.template.classes.profiles.light.Shapelets object - :param light_model_list: the usual lenstronomy lens_light_model_list or source_light_model_list + :param light_model_list: the usual lenstronomy lens_light_model_list or + source_light_model_list :param kwargs_light: the usual lenstronomy kwargs :param kwargs_light_init: the usual lenstronomy kwargs :param kwargs_light_up: the usual lenstronomy kwargs :param kwargs_light_down: the usual lenstronomy kwargs :param kwargs_light_fixed: the usual lenstronomy kwargs :param kwargs_light_sigma: the usual lenstronomy kwargs - :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed - + :param cleaning: bool, if True, will update the empty fields with default values + + cleans the kwargs_fixed :return: updated list and kwargs """ - light_model_list.append('SHAPELETS') + light_model_list.append("SHAPELETS") for light_name, light_param in light.parameters.items(): - if light_name == 'beta': - b = getattr(light_param.point_estimate, 'value') - b_up = getattr(light_param.definition_range, 'max_value') - b_down = getattr(light_param.definition_range, 'min_value') - b_fixed = b if getattr(light_param, 'fixed') else None - elif light_name == 'n_max': - nmax = getattr(light_param.point_estimate, 'value') - nmax_fixed = nmax if getattr(light_param, 'fixed') else None - elif light_name == 'center_x': - cx = - getattr(light_param.point_estimate, 'value') if getattr(light_param.point_estimate, - 'value') is not None else None - cx_up = getattr(light_param.definition_range, 'max_value') - cx_down = getattr(light_param.definition_range, 'min_value') - cx_fixed = cx if getattr(light_param, 'fixed') else None - elif light_name == 'center_y': - cy = getattr(light_param.point_estimate, 'value') - cy_up = getattr(light_param.definition_range, 'max_value') - cy_down = getattr(light_param.definition_range, 'min_value') - cy_fixed = cy if getattr(light_param, 'fixed') else None - elif light_name == 'amps': - amp = shapelet_amp_coolest_to_lenstronomy(getattr(light_param.point_estimate, 'value')) - amp_up = shapelet_amp_coolest_to_lenstronomy(getattr(light_param.definition_range, 'max_value')) - amp_down = shapelet_amp_coolest_to_lenstronomy(getattr(light_param.definition_range, 'min_value')) - amp_fixed = amp if getattr(light_param, 'fixed') else None + if light_name == "beta": + b = getattr(light_param.point_estimate, "value") + b_up = getattr(light_param.definition_range, "max_value") + b_down = getattr(light_param.definition_range, "min_value") + b_fixed = b if getattr(light_param, "fixed") else None + elif light_name == "n_max": + nmax = getattr(light_param.point_estimate, "value") + nmax_fixed = nmax if getattr(light_param, "fixed") else None + elif light_name == "center_x": + cx = ( + -getattr(light_param.point_estimate, "value") + if getattr(light_param.point_estimate, "value") is not None + else None + ) + cx_up = getattr(light_param.definition_range, "max_value") + cx_down = getattr(light_param.definition_range, "min_value") + cx_fixed = cx if getattr(light_param, "fixed") else None + elif light_name == "center_y": + cy = getattr(light_param.point_estimate, "value") + cy_up = getattr(light_param.definition_range, "max_value") + cy_down = getattr(light_param.definition_range, "min_value") + cy_fixed = cy if getattr(light_param, "fixed") else None + elif light_name == "amps": + amp = shapelet_amp_coolest_to_lenstronomy( + getattr(light_param.point_estimate, "value") + ) + amp_up = shapelet_amp_coolest_to_lenstronomy( + getattr(light_param.definition_range, "max_value") + ) + amp_down = shapelet_amp_coolest_to_lenstronomy( + getattr(light_param.definition_range, "min_value") + ) + amp_fixed = amp if getattr(light_param, "fixed") else None else: - print(f'Parameter {light_name} unknown in Shapelets profile.') - - kw_1 = {'amp': amp, 'beta': b, 'center_x': cx, 'center_y': cy, 'n_max': nmax} - kw_up_1 = {'beta': b_up, 'center_x': cx_up, 'center_y': cy_up} - kw_down_1 = {'beta': b_down, 'center_x': cx_down, 'center_y': cy_down} - kw_fixed_1 = {'beta': b_fixed, 'center_x': cx_fixed, 'center_y': cy_fixed, 'n_max': nmax} + print(f"Parameter {light_name} unknown in Shapelets profile.") + + kw_1 = {"amp": amp, "beta": b, "center_x": cx, "center_y": cy, "n_max": nmax} + kw_up_1 = {"beta": b_up, "center_x": cx_up, "center_y": cy_up} + kw_down_1 = {"beta": b_down, "center_x": cx_down, "center_y": cy_down} + kw_fixed_1 = { + "beta": b_fixed, + "center_x": cx_fixed, + "center_y": cy_fixed, + "n_max": nmax, + } kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -569,9 +807,27 @@ def update_kwargs_shapelets(light, light_model_list, kwargs_light, kwargs_light_ kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'amp': 1., 'beta': 0.1, 'center_x': 0., 'center_y': 0., 'n_max': -1} - kw_up_default = {'amp': 100, 'beta': 100, 'center_x': 100, 'center_y': 100, 'n_max': 1000} - kw_down_default = {'amp': 0, 'beta': 0, 'center_x': -100, 'center_y': -100, 'n_max': -1} + kw_init_default = { + "amp": 1.0, + "beta": 0.1, + "center_x": 0.0, + "center_y": 0.0, + "n_max": -1, + } + kw_up_default = { + "amp": 100, + "beta": 100, + "center_x": 100, + "center_y": 100, + "n_max": 1000, + } + kw_down_default = { + "amp": 0, + "beta": 0, + "center_x": -100, + "center_y": -100, + "n_max": -1, + } for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -590,15 +846,24 @@ def update_kwargs_shapelets(light, light_model_list, kwargs_light, kwargs_light_ kwargs_light_up.append(kw_up) kwargs_light_down.append(kw_down) kwargs_light_fixed.append(kw_fixed) - kwargs_light_sigma.append({'beta': 0.5, 'center_x': 0.5, 'center_y': 0.5}) - print('\t Shapelets correctly added') + kwargs_light_sigma.append({"beta": 0.5, "center_x": 0.5, "center_y": 0.5}) + print("\t Shapelets correctly added") return -def update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, kwargs_ps_init, kwargs_ps_up, kwargs_ps_down, kwargs_ps_fixed, - kwargs_ps_sigma, cleaning=False): - """ - Update the source list and kwargs with lensed point source "LENSED_POSITION" light model +def update_kwargs_lensed_ps( + light, + ps_model_list, + kwargs_ps, + kwargs_ps_init, + kwargs_ps_up, + kwargs_ps_down, + kwargs_ps_fixed, + kwargs_ps_sigma, + cleaning=False, +): + """Update the source list and kwargs with lensed point source "LENSED_POSITION" + light model. :param light: coolest.template.classes.profiles.lightLensedPS object :param ps_model_list: the usual lenstronomy point_source_model_list @@ -608,46 +873,54 @@ def update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, kwargs_ps_init, kwa :param kwargs_ps_down: the usual lenstronomy kwargs :param kwargs_ps_fixed: the usual lenstronomy kwargs :param kwargs_ps_sigma: the usual lenstronomy kwargs - :param cleaning: bool, if True, will update the empty fields with default values + cleans the kwargs_fixed - + :param cleaning: bool, if True, will update the empty fields with default values + + cleans the kwargs_fixed :return: updated list and kwargs """ - ps_model_list.append('LENSED_POSITION') + ps_model_list.append("LENSED_POSITION") try: - num_ps = len(getattr(light.parameters['ra_list'].point_estimate, 'value')) + num_ps = len(getattr(light.parameters["ra_list"].point_estimate, "value")) except: num_ps = 4 for light_name, light_param in light.parameters.items(): - if light_name == 'ra_list': - ra = - np.array(getattr(light_param.point_estimate, 'value')) if getattr(light_param.point_estimate, - 'value') is not None else None - ra_up = getattr(light_param.definition_range, 'max_value') - ra_down = getattr(light_param.definition_range, 'min_value') - ra_fixed = ra if getattr(light_param, 'fixed') else None - elif light_name == 'dec_list': - dec = getattr(light_param.point_estimate, 'value') - dec_up = getattr(light_param.definition_range, 'max_value') - dec_down = getattr(light_param.definition_range, 'min_value') - dec_fixed = dec if getattr(light_param, 'fixed') else None - elif light_name == 'amps': - amp = getattr(light_param.point_estimate, 'value') - amp_up = getattr(light_param.definition_range, 'max_value') - amp_down = getattr(light_param.definition_range, 'min_value') - amp_fixed = amp if getattr(light_param, 'fixed') else None + if light_name == "ra_list": + ra = ( + -np.array(getattr(light_param.point_estimate, "value")) + if getattr(light_param.point_estimate, "value") is not None + else None + ) + ra_up = getattr(light_param.definition_range, "max_value") + ra_down = getattr(light_param.definition_range, "min_value") + ra_fixed = ra if getattr(light_param, "fixed") else None + elif light_name == "dec_list": + dec = getattr(light_param.point_estimate, "value") + dec_up = getattr(light_param.definition_range, "max_value") + dec_down = getattr(light_param.definition_range, "min_value") + dec_fixed = dec if getattr(light_param, "fixed") else None + elif light_name == "amps": + amp = getattr(light_param.point_estimate, "value") + amp_up = getattr(light_param.definition_range, "max_value") + amp_down = getattr(light_param.definition_range, "min_value") + amp_fixed = amp if getattr(light_param, "fixed") else None else: - print(f'Parameter {light_name} unknown in LensedPS profile.') - - kw_1 = {'point_amp': np.array(amp) if amp is not None else None, - 'ra_image': np.array(ra) if ra is not None else None, - 'dec_image': np.array(dec) if dec is not None else None} - kw_up_1 = {'ra_image': np.array(ra_up) if ra_up is not None else None, - 'dec_image': np.array(dec_up) if dec_up is not None else None} - kw_down_1 = {'ra_image': np.array(ra_down) if ra_down is not None else None, - 'dec_image': np.array(dec_down) if dec_down is not None else None} - kw_fixed_1 = {'ra_image': ra_fixed, - 'dec_image': dec_fixed} + print(f"Parameter {light_name} unknown in LensedPS profile.") + + kw_1 = { + "point_amp": np.array(amp) if amp is not None else None, + "ra_image": np.array(ra) if ra is not None else None, + "dec_image": np.array(dec) if dec is not None else None, + } + kw_up_1 = { + "ra_image": np.array(ra_up) if ra_up is not None else None, + "dec_image": np.array(dec_up) if dec_up is not None else None, + } + kw_down_1 = { + "ra_image": np.array(ra_down) if ra_down is not None else None, + "dec_image": np.array(dec_down) if dec_down is not None else None, + } + kw_fixed_1 = {"ra_image": ra_fixed, "dec_image": dec_fixed} kw_ = kw_1.copy() kw_init = kw_1.copy() @@ -656,9 +929,19 @@ def update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, kwargs_ps_init, kwa kw_fixed = kw_fixed_1.copy() if cleaning is True: - kw_init_default = {'point_amp': np.ones(num_ps), 'ra_image': np.ones(num_ps), 'dec_image': np.ones(num_ps)} - kw_up_default = {'ra_image': np.ones(num_ps) * 10, 'dec_image': np.ones(num_ps) * 10} - kw_down_default = {'ra_image': np.ones(num_ps) * (-10), 'dec_image': np.ones(num_ps) * (-10)} + kw_init_default = { + "point_amp": np.ones(num_ps), + "ra_image": np.ones(num_ps), + "dec_image": np.ones(num_ps), + } + kw_up_default = { + "ra_image": np.ones(num_ps) * 10, + "dec_image": np.ones(num_ps) * 10, + } + kw_down_default = { + "ra_image": np.ones(num_ps) * (-10), + "dec_image": np.ones(num_ps) * (-10), + } for key, val in kw_1.items(): if val is None: kw_init[key] = kw_init_default[key] @@ -677,7 +960,6 @@ def update_kwargs_lensed_ps(light, ps_model_list, kwargs_ps, kwargs_ps_init, kwa kwargs_ps_up.append(kw_up) kwargs_ps_down.append(kw_down) kwargs_ps_fixed.append(kw_fixed) - kwargs_ps_sigma.append({'ra_image': np.ones(num_ps), 'dec_image': np.ones(num_ps)}) - print('\t Lensed point sources correctly added') + kwargs_ps_sigma.append({"ra_image": np.ones(num_ps), "dec_image": np.ones(num_ps)}) + print("\t Lensed point sources correctly added") return - diff --git a/lenstronomy/Util/coolest_update_util.py b/lenstronomy/Util/coolest_update_util.py index 71b5161ad..3e7147b95 100644 --- a/lenstronomy/Util/coolest_update_util.py +++ b/lenstronomy/Util/coolest_update_util.py @@ -5,11 +5,11 @@ def shapelet_amp_lenstronomy_to_coolest(value): - """ - Transforms shapelets coefficients from lenstronomy conventions (x following ra, to the left) to - COOLEST conventions (x to the right) + """Transforms shapelets coefficients from lenstronomy conventions (x following ra, + to the left) to COOLEST conventions (x to the right) - :param value: amplitude of the shapelet (float or np.array) in lenstronomy conventions + :param value: amplitude of the shapelet (float or np.array) in lenstronomy + conventions :return: amplitude of the shapelet (float or np.array) in COOLEST conventions """ if value is None: @@ -19,7 +19,6 @@ def shapelet_amp_lenstronomy_to_coolest(value): k = 0 # this is the index of a coefficient for a given order new_value = [] for coeff in value: - if n % 2 == 0: if k % 2 == 1: coeff = -coeff @@ -57,57 +56,56 @@ def radian_lenstronomy_to_degree(value): :return: float, angle almost in COOLEST conventions (without folding) """ - lenstro_degree = value * 180. / np.pi - coolest_oriented_degree = lenstro_degree - 90. + lenstro_degree = value * 180.0 / np.pi + coolest_oriented_degree = lenstro_degree - 90.0 coolest_oriented_degree *= -1 return coolest_oriented_degree def folding_coolest(value): - """ - Folds the angle (already in degree with COOLEST East of North convention) into COOLEST range, ]-90;90] + """Folds the angle (already in degree with COOLEST East of North convention) into + COOLEST range, ]-90;90] - :param value: float, angle almost in COOLEST conventions (without folding in ]-90;90]) + :param value: float, angle almost in COOLEST conventions (without folding in + ]-90;90]) :return: float, angle in COOLEST conventions (with folding in ]-90;90]) """ coolest_oriented_degree = value if type(coolest_oriented_degree) == type(np.array([])): for idx, val in enumerate(coolest_oriented_degree): - if val <= -90.: - coolest_oriented_degree[idx] += 180. - elif val > 90.: - coolest_oriented_degree[idx] -= 180. + if val <= -90.0: + coolest_oriented_degree[idx] += 180.0 + elif val > 90.0: + coolest_oriented_degree[idx] -= 180.0 else: if coolest_oriented_degree <= -90: - coolest_oriented_degree += 180. + coolest_oriented_degree += 180.0 elif coolest_oriented_degree > 90: - coolest_oriented_degree -= 180. + coolest_oriented_degree -= 180.0 return coolest_oriented_degree def e1e2_lenstronomy_to_qphi_coolest(e1, e2): - """ - Transform e1,e2 in lenstronomy to q and phi (axis ratio, position angle East-of-North) + """Transform e1,e2 in lenstronomy to q and phi (axis ratio, position angle East-of- + North) :param e1: lenstronomy usual ellipticity parameters :param e2: lenstronomy usual ellipticity parameters - :return: q, phi ; axis ratio and position angle in COOLEST conventions """ - angle,q = ellipticity2phi_q(e1,e2) + angle, q = ellipticity2phi_q(e1, e2) phi = radian_lenstronomy_to_degree_coolest(angle) return q, phi def g1g2_lenstronomy_to_gamma_phi_coolest(gamma1, gamma2): - """ - Transform gamma1,gamma2 in lenstronomy to gamma_ext and phi_ext (shear strength, position angle East-of-North) - with folding + """Transform gamma1,gamma2 in lenstronomy to gamma_ext and phi_ext (shear strength, + position angle East-of-North) with folding. :param gamma1: lenstronomy usual shear parameters :param gamma2: lenstronomy usual shear parameters - - :return: gamma_ext, phi_ext ; shear strenght and shear position angle in COOLEST conventions (with folding) + :return: gamma_ext, phi_ext ; shear strenght and shear position angle in COOLEST + conventions (with folding) """ angle, gamma_ext = shear_cartesian2polar(gamma1, gamma2) phi_ext = radian_lenstronomy_to_degree_coolest(angle) @@ -115,20 +113,19 @@ def g1g2_lenstronomy_to_gamma_phi_coolest(gamma1, gamma2): def g1g2_lenstronomy_to_gamma_phi(gamma1, gamma2): - """ - Transform gamma1,gamma2 in lenstronomy to gamma_ext and phi_ext (shear strength, position angle East-of-North) - without folding + """Transform gamma1,gamma2 in lenstronomy to gamma_ext and phi_ext (shear strength, + position angle East-of-North) without folding. :param gamma1: lenstronomy usual shear parameters :param gamma2: lenstronomy usual shear parameters - - :return: gamma_ext, phi_ext ; shear strenght and shear position angle almost in COOLEST conventions (without folding) + :return: gamma_ext, phi_ext ; shear strenght and shear position angle almost in + COOLEST conventions (without folding) """ - angle = np.arctan2(gamma2, gamma1) / 2. + angle = np.arctan2(gamma2, gamma1) / 2.0 phi_ext = radian_lenstronomy_to_degree(angle) - gamma_ext = np.sqrt(gamma1 ** 2 + gamma2 ** 2) + gamma_ext = np.sqrt(gamma1**2 + gamma2**2) return gamma_ext, phi_ext @@ -142,303 +139,427 @@ def shear_update(shear_idx, kwargs_lens, kwargs_lens_mcmc=None): :return: updated shear_idx """ - gamma_ext, phi_ext = g1g2_lenstronomy_to_gamma_phi_coolest(float(kwargs_lens['gamma1']), - float(kwargs_lens['gamma2'])) - shear_idx.parameters['gamma_ext'].set_point_estimate(PointEstimate(float(gamma_ext))) - shear_idx.parameters['phi_ext'].set_point_estimate(PointEstimate(float(phi_ext))) + gamma_ext, phi_ext = g1g2_lenstronomy_to_gamma_phi_coolest( + float(kwargs_lens["gamma1"]), float(kwargs_lens["gamma2"]) + ) + shear_idx.parameters["gamma_ext"].set_point_estimate( + PointEstimate(float(gamma_ext)) + ) + shear_idx.parameters["phi_ext"].set_point_estimate(PointEstimate(float(phi_ext))) if kwargs_lens_mcmc is not None: - g1 = [arg['gamma1'] for arg in kwargs_lens_mcmc] - g2 = [arg['gamma2'] for arg in kwargs_lens_mcmc] + g1 = [arg["gamma1"] for arg in kwargs_lens_mcmc] + g2 = [arg["gamma2"] for arg in kwargs_lens_mcmc] g_ext, p_ext = g1g2_lenstronomy_to_gamma_phi(np.array(g1), np.array(g2)) g_ext_mean = np.mean(g_ext) g_ext_16, g_ext_50, g_ext_84 = np.quantile(g_ext, [0.16, 0.5, 0.84]) - shear_idx.parameters['gamma_ext'].set_posterior(PosteriorStatistics(float(g_ext_mean), float(g_ext_50), - float(g_ext_16), float(g_ext_84))) + shear_idx.parameters["gamma_ext"].set_posterior( + PosteriorStatistics( + float(g_ext_mean), float(g_ext_50), float(g_ext_16), float(g_ext_84) + ) + ) p_ext_mean = folding_coolest(np.mean(p_ext)) - p_ext_16, p_ext_50, p_ext_84 = folding_coolest(np.quantile(p_ext, [0.16, 0.5, 0.84])) - shear_idx.parameters['phi_ext'].set_posterior(PosteriorStatistics(float(p_ext_mean), float(p_ext_50), - float(p_ext_16), float(p_ext_84))) - print('shear correctly updated') + p_ext_16, p_ext_50, p_ext_84 = folding_coolest( + np.quantile(p_ext, [0.16, 0.5, 0.84]) + ) + shear_idx.parameters["phi_ext"].set_posterior( + PosteriorStatistics( + float(p_ext_mean), float(p_ext_50), float(p_ext_16), float(p_ext_84) + ) + ) + print("shear correctly updated") return def pemd_update(mass, kwargs_lens, kwargs_lens_mcmc=None): - """ - Update the COOLEST PEMD mass model with results in kwargs_lens + """Update the COOLEST PEMD mass model with results in kwargs_lens. :param mass: coolest.template.classes.profiles.mass.PEMD object :param kwargs_lens: dictionnary with the point estimate - :return: updated mass """ - q, phi = e1e2_lenstronomy_to_qphi_coolest(float(kwargs_lens['e1']), float(kwargs_lens['e2'])) - mass.parameters['theta_E'].set_point_estimate(PointEstimate(float(kwargs_lens['theta_E']))) - mass.parameters['gamma'].set_point_estimate(PointEstimate(float(kwargs_lens['gamma']))) - mass.parameters['q'].set_point_estimate(PointEstimate(float(q))) - mass.parameters['phi'].set_point_estimate(PointEstimate(float(phi))) - mass.parameters['center_x'].set_point_estimate(PointEstimate(-float(kwargs_lens['center_x']))) - mass.parameters['center_y'].set_point_estimate(PointEstimate(float(kwargs_lens['center_y']))) + q, phi = e1e2_lenstronomy_to_qphi_coolest( + float(kwargs_lens["e1"]), float(kwargs_lens["e2"]) + ) + mass.parameters["theta_E"].set_point_estimate( + PointEstimate(float(kwargs_lens["theta_E"])) + ) + mass.parameters["gamma"].set_point_estimate( + PointEstimate(float(kwargs_lens["gamma"])) + ) + mass.parameters["q"].set_point_estimate(PointEstimate(float(q))) + mass.parameters["phi"].set_point_estimate(PointEstimate(float(phi))) + mass.parameters["center_x"].set_point_estimate( + PointEstimate(-float(kwargs_lens["center_x"])) + ) + mass.parameters["center_y"].set_point_estimate( + PointEstimate(float(kwargs_lens["center_y"])) + ) if kwargs_lens_mcmc is not None: - te = [arg['theta_E'] for arg in kwargs_lens_mcmc] + te = [arg["theta_E"] for arg in kwargs_lens_mcmc] te_mean = np.mean(te) te_16, te_50, te_84 = np.quantile(te, [0.16, 0.5, 0.84]) - mass.parameters['theta_E'].set_posterior(PosteriorStatistics(float(te_mean), float(te_50), - float(te_16), float(te_84))) + mass.parameters["theta_E"].set_posterior( + PosteriorStatistics( + float(te_mean), float(te_50), float(te_16), float(te_84) + ) + ) - g = [arg['gamma'] for arg in kwargs_lens_mcmc] + g = [arg["gamma"] for arg in kwargs_lens_mcmc] g_mean = np.mean(g) g_16, g_50, g_84 = np.quantile(g, [0.16, 0.5, 0.84]) - mass.parameters['gamma'].set_posterior(PosteriorStatistics(float(g_mean), float(g_50), - float(g_16), float(g_84))) + mass.parameters["gamma"].set_posterior( + PosteriorStatistics(float(g_mean), float(g_50), float(g_16), float(g_84)) + ) - e1 = [arg['e1'] for arg in kwargs_lens_mcmc] - e2 = [arg['e2'] for arg in kwargs_lens_mcmc] + e1 = [arg["e1"] for arg in kwargs_lens_mcmc] + e2 = [arg["e2"] for arg in kwargs_lens_mcmc] ql, phil = e1e2_lenstronomy_to_qphi_coolest(np.array(e1), np.array(e2)) ql_mean = np.mean(ql) ql_16, ql_50, ql_84 = np.quantile(ql, [0.16, 0.5, 0.84]) - mass.parameters['q'].set_posterior(PosteriorStatistics(float(ql_mean), float(ql_50), - float(ql_16), float(ql_84))) + mass.parameters["q"].set_posterior( + PosteriorStatistics( + float(ql_mean), float(ql_50), float(ql_16), float(ql_84) + ) + ) phil_mean = np.mean(phil) phil_16, phil_50, phil_84 = np.quantile(phil, [0.16, 0.5, 0.84]) - mass.parameters['phi'].set_posterior(PosteriorStatistics(float(phil_mean), float(phil_50), - float(phil_16), float(phil_84))) + mass.parameters["phi"].set_posterior( + PosteriorStatistics( + float(phil_mean), float(phil_50), float(phil_16), float(phil_84) + ) + ) - cx = [arg['center_x'] for arg in kwargs_lens_mcmc] + cx = [arg["center_x"] for arg in kwargs_lens_mcmc] cx_mean = np.mean(cx) cx_16, cx_50, cx_84 = np.quantile(cx, [0.16, 0.5, 0.84]) - mass.parameters['center_x'].set_posterior(PosteriorStatistics(-float(cx_mean), -float(cx_50), - -float(cx_16), -float(cx_84))) + mass.parameters["center_x"].set_posterior( + PosteriorStatistics( + -float(cx_mean), -float(cx_50), -float(cx_16), -float(cx_84) + ) + ) - cy = [arg['center_y'] for arg in kwargs_lens_mcmc] + cy = [arg["center_y"] for arg in kwargs_lens_mcmc] cy_mean = np.mean(cy) cy_16, cy_50, cy_84 = np.quantile(cy, [0.16, 0.5, 0.84]) - mass.parameters['center_y'].set_posterior(PosteriorStatistics(float(cy_mean), float(cy_50), - float(cy_16), float(cy_84))) + mass.parameters["center_y"].set_posterior( + PosteriorStatistics( + float(cy_mean), float(cy_50), float(cy_16), float(cy_84) + ) + ) - print('PEMD correctly updated') + print("PEMD correctly updated") return def sie_update(mass, kwargs_lens, kwargs_lens_mcmc=None): - """ - Update the COOLEST SIE mass model with results in kwargs_lens + """Update the COOLEST SIE mass model with results in kwargs_lens. :param mass: coolest.template.classes.profiles.mass.SIE object :param kwargs_lens : dictionnary with the point estimate - :return: updated mass """ - q, phi = e1e2_lenstronomy_to_qphi_coolest(float(kwargs_lens['e1']), float(kwargs_lens['e2'])) - mass.parameters['theta_E'].set_point_estimate(PointEstimate(float(kwargs_lens['theta_E']))) - mass.parameters['q'].set_point_estimate(PointEstimate(float(q))) - mass.parameters['phi'].set_point_estimate(PointEstimate(float(phi))) - mass.parameters['center_x'].set_point_estimate(PointEstimate(-float(kwargs_lens['center_x']))) - mass.parameters['center_y'].set_point_estimate(PointEstimate(float(kwargs_lens['center_y']))) + q, phi = e1e2_lenstronomy_to_qphi_coolest( + float(kwargs_lens["e1"]), float(kwargs_lens["e2"]) + ) + mass.parameters["theta_E"].set_point_estimate( + PointEstimate(float(kwargs_lens["theta_E"])) + ) + mass.parameters["q"].set_point_estimate(PointEstimate(float(q))) + mass.parameters["phi"].set_point_estimate(PointEstimate(float(phi))) + mass.parameters["center_x"].set_point_estimate( + PointEstimate(-float(kwargs_lens["center_x"])) + ) + mass.parameters["center_y"].set_point_estimate( + PointEstimate(float(kwargs_lens["center_y"])) + ) if kwargs_lens_mcmc is not None: - te = [arg['theta_E'] for arg in kwargs_lens_mcmc] + te = [arg["theta_E"] for arg in kwargs_lens_mcmc] te_mean = np.mean(te) te_16, te_50, te_84 = np.quantile(te, [0.16, 0.5, 0.84]) - mass.parameters['theta_E'].set_posterior(PosteriorStatistics(float(te_mean), float(te_50), - float(te_16), float(te_84))) - - e1 = [arg['e1'] for arg in kwargs_lens_mcmc] - e2 = [arg['e2'] for arg in kwargs_lens_mcmc] + mass.parameters["theta_E"].set_posterior( + PosteriorStatistics( + float(te_mean), float(te_50), float(te_16), float(te_84) + ) + ) + + e1 = [arg["e1"] for arg in kwargs_lens_mcmc] + e2 = [arg["e2"] for arg in kwargs_lens_mcmc] ql, phil = e1e2_lenstronomy_to_qphi_coolest(np.array(e1), np.array(e2)) ql_mean = np.mean(ql) ql_16, ql_50, ql_84 = np.quantile(ql, [0.16, 0.5, 0.84]) - mass.parameters['q'].set_posterior(PosteriorStatistics(float(ql_mean), float(ql_50), - float(ql_16), float(ql_84))) + mass.parameters["q"].set_posterior( + PosteriorStatistics( + float(ql_mean), float(ql_50), float(ql_16), float(ql_84) + ) + ) phil_mean = np.mean(phil) phil_16, phil_50, phil_84 = np.quantile(phil, [0.16, 0.5, 0.84]) - mass.parameters['phi'].set_posterior(PosteriorStatistics(float(phil_mean), float(phil_50), - float(phil_16), float(phil_84))) + mass.parameters["phi"].set_posterior( + PosteriorStatistics( + float(phil_mean), float(phil_50), float(phil_16), float(phil_84) + ) + ) - cx = [arg['center_x'] for arg in kwargs_lens_mcmc] + cx = [arg["center_x"] for arg in kwargs_lens_mcmc] cx_mean = np.mean(cx) cx_16, cx_50, cx_84 = np.quantile(cx, [0.16, 0.5, 0.84]) - mass.parameters['center_x'].set_posterior(PosteriorStatistics(-float(cx_mean), -float(cx_50), - -float(cx_16), -float(cx_84))) + mass.parameters["center_x"].set_posterior( + PosteriorStatistics( + -float(cx_mean), -float(cx_50), -float(cx_16), -float(cx_84) + ) + ) - cy = [arg['center_y'] for arg in kwargs_lens_mcmc] + cy = [arg["center_y"] for arg in kwargs_lens_mcmc] cy_mean = np.mean(cy) cy_16, cy_50, cy_84 = np.quantile(cy, [0.16, 0.5, 0.84]) - mass.parameters['center_y'].set_posterior(PosteriorStatistics(float(cy_mean), float(cy_50), - float(cy_16), float(cy_84))) + mass.parameters["center_y"].set_posterior( + PosteriorStatistics( + float(cy_mean), float(cy_50), float(cy_16), float(cy_84) + ) + ) - print('SIE correctly updated') + print("SIE correctly updated") return def sersic_update(light, kwargs_light, kwargs_light_mcmc=None): - """ - Update the COOLEST Sersic (ellipse) light model with results in kwargs_light + """Update the COOLEST Sersic (ellipse) light model with results in kwargs_light. :param light: coolest.template.classes.profiles.light.Sersic object :param kwargs_light: dictionnary with the point estimate - :return: updated light """ - q, phi = e1e2_lenstronomy_to_qphi_coolest(float(kwargs_light['e1']), float(kwargs_light['e2'])) - light.parameters['I_eff'].set_point_estimate(PointEstimate(float(kwargs_light['amp']))) - light.parameters['theta_eff'].set_point_estimate(PointEstimate(float(kwargs_light['R_sersic']))) - light.parameters['n'].set_point_estimate(PointEstimate(float(kwargs_light['n_sersic']))) - light.parameters['q'].set_point_estimate(PointEstimate(float(q))) - light.parameters['phi'].set_point_estimate(PointEstimate(float(phi))) - light.parameters['center_x'].set_point_estimate(PointEstimate(-float(kwargs_light['center_x']))) - light.parameters['center_y'].set_point_estimate(PointEstimate(float(kwargs_light['center_y']))) + q, phi = e1e2_lenstronomy_to_qphi_coolest( + float(kwargs_light["e1"]), float(kwargs_light["e2"]) + ) + light.parameters["I_eff"].set_point_estimate( + PointEstimate(float(kwargs_light["amp"])) + ) + light.parameters["theta_eff"].set_point_estimate( + PointEstimate(float(kwargs_light["R_sersic"])) + ) + light.parameters["n"].set_point_estimate( + PointEstimate(float(kwargs_light["n_sersic"])) + ) + light.parameters["q"].set_point_estimate(PointEstimate(float(q))) + light.parameters["phi"].set_point_estimate(PointEstimate(float(phi))) + light.parameters["center_x"].set_point_estimate( + PointEstimate(-float(kwargs_light["center_x"])) + ) + light.parameters["center_y"].set_point_estimate( + PointEstimate(float(kwargs_light["center_y"])) + ) if kwargs_light_mcmc is not None: - a = [arg['amp'] for arg in kwargs_light_mcmc] + a = [arg["amp"] for arg in kwargs_light_mcmc] a_mean = np.mean(a) a_16, a_50, a_84 = np.quantile(a, [0.16, 0.5, 0.84]) - light.parameters['I_eff'].set_posterior(PosteriorStatistics(float(a_mean), float(a_50), - float(a_16), float(a_84))) + light.parameters["I_eff"].set_posterior( + PosteriorStatistics(float(a_mean), float(a_50), float(a_16), float(a_84)) + ) - rs = [arg['R_sersic'] for arg in kwargs_light_mcmc] + rs = [arg["R_sersic"] for arg in kwargs_light_mcmc] rs_mean = np.mean(rs) rs_16, rs_50, rs_84 = np.quantile(rs, [0.16, 0.5, 0.84]) - light.parameters['theta_eff'].set_posterior(PosteriorStatistics(float(rs_mean), float(rs_50), - float(rs_16), float(rs_84))) + light.parameters["theta_eff"].set_posterior( + PosteriorStatistics( + float(rs_mean), float(rs_50), float(rs_16), float(rs_84) + ) + ) - ns = [arg['n_sersic'] for arg in kwargs_light_mcmc] + ns = [arg["n_sersic"] for arg in kwargs_light_mcmc] ns_mean = np.mean(ns) ns_16, ns_50, ns_84 = np.quantile(ns, [0.16, 0.5, 0.84]) - light.parameters['n'].set_posterior(PosteriorStatistics(float(ns_mean), float(ns_50), - float(ns_16), float(ns_84))) - - e1 = [arg['e1'] for arg in kwargs_light_mcmc] - e2 = [arg['e2'] for arg in kwargs_light_mcmc] + light.parameters["n"].set_posterior( + PosteriorStatistics( + float(ns_mean), float(ns_50), float(ns_16), float(ns_84) + ) + ) + + e1 = [arg["e1"] for arg in kwargs_light_mcmc] + e2 = [arg["e2"] for arg in kwargs_light_mcmc] ql, phil = e1e2_lenstronomy_to_qphi_coolest(np.array(e1), np.array(e2)) ql_mean = np.mean(ql) ql_16, ql_50, ql_84 = np.quantile(ql, [0.16, 0.5, 0.84]) - light.parameters['q'].set_posterior(PosteriorStatistics(float(ql_mean), float(ql_50), - float(ql_16), float(ql_84))) + light.parameters["q"].set_posterior( + PosteriorStatistics( + float(ql_mean), float(ql_50), float(ql_16), float(ql_84) + ) + ) phil_mean = np.mean(phil) phil_16, phil_50, phil_84 = np.quantile(phil, [0.16, 0.5, 0.84]) - light.parameters['phi'].set_posterior(PosteriorStatistics(float(phil_mean), float(phil_50), - float(phil_16), float(phil_84))) + light.parameters["phi"].set_posterior( + PosteriorStatistics( + float(phil_mean), float(phil_50), float(phil_16), float(phil_84) + ) + ) - cx = [arg['center_x'] for arg in kwargs_light_mcmc] + cx = [arg["center_x"] for arg in kwargs_light_mcmc] cx_mean = np.mean(cx) cx_16, cx_50, cx_84 = np.quantile(cx, [0.16, 0.5, 0.84]) - light.parameters['center_x'].set_posterior(PosteriorStatistics(-float(cx_mean), -float(cx_50), - -float(cx_16), -float(cx_84))) + light.parameters["center_x"].set_posterior( + PosteriorStatistics( + -float(cx_mean), -float(cx_50), -float(cx_16), -float(cx_84) + ) + ) - cy = [arg['center_y'] for arg in kwargs_light_mcmc] + cy = [arg["center_y"] for arg in kwargs_light_mcmc] cy_mean = np.mean(cy) cy_16, cy_50, cy_84 = np.quantile(cy, [0.16, 0.5, 0.84]) - light.parameters['center_y'].set_posterior(PosteriorStatistics(float(cy_mean), float(cy_50), - float(cy_16), float(cy_84))) + light.parameters["center_y"].set_posterior( + PosteriorStatistics( + float(cy_mean), float(cy_50), float(cy_16), float(cy_84) + ) + ) - print('Sersic (Ellipse) correctly updated') + print("Sersic (Ellipse) correctly updated") return def shapelets_update(light, kwargs_light, kwargs_light_mcmc=None): - """ - Update the COOLEST Shapelets light model with results in kwargs_light + """Update the COOLEST Shapelets light model with results in kwargs_light. :param light: coolest.template.classes.profiles.light.Shapelets object :param kwargs_light: dictionnary with the point estimate - :return: updated light """ - light.parameters['amps'].set_point_estimate( - PointEstimate(shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(kwargs_light['amp'])))) - light.parameters['beta'].set_point_estimate(PointEstimate(float(kwargs_light['beta']))) - light.parameters['n_max'].set_point_estimate(PointEstimate(int(kwargs_light['n_max']))) - light.parameters['center_x'].set_point_estimate(PointEstimate(-float(kwargs_light['center_x']))) - light.parameters['center_y'].set_point_estimate(PointEstimate(float(kwargs_light['center_y']))) + light.parameters["amps"].set_point_estimate( + PointEstimate( + shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(kwargs_light["amp"])) + ) + ) + light.parameters["beta"].set_point_estimate( + PointEstimate(float(kwargs_light["beta"])) + ) + light.parameters["n_max"].set_point_estimate( + PointEstimate(int(kwargs_light["n_max"])) + ) + light.parameters["center_x"].set_point_estimate( + PointEstimate(-float(kwargs_light["center_x"])) + ) + light.parameters["center_y"].set_point_estimate( + PointEstimate(float(kwargs_light["center_y"])) + ) if kwargs_light_mcmc is not None: - a = [arg['amp'] for arg in kwargs_light_mcmc] + a = [arg["amp"] for arg in kwargs_light_mcmc] a_mean = np.mean(a, axis=0) a_16, a_50, a_84 = np.quantile(a, [0.16, 0.5, 0.84], axis=0) - light.parameters['amps'].set_posterior(PosteriorStatistics(shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_mean)), - shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_50)), - shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_16)), - shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_84)))) - - b = [arg['beta'] for arg in kwargs_light_mcmc] + light.parameters["amps"].set_posterior( + PosteriorStatistics( + shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_mean)), + shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_50)), + shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_16)), + shapelet_amp_lenstronomy_to_coolest(np.ndarray.tolist(a_84)), + ) + ) + + b = [arg["beta"] for arg in kwargs_light_mcmc] b_mean = np.mean(b) b_16, b_50, b_84 = np.quantile(b, [0.16, 0.5, 0.84]) - light.parameters['beta'].set_posterior(PosteriorStatistics(float(b_mean), float(b_50), - float(b_16), float(b_84))) + light.parameters["beta"].set_posterior( + PosteriorStatistics(float(b_mean), float(b_50), float(b_16), float(b_84)) + ) - nmax = [arg['n_max'] for arg in kwargs_light_mcmc] + nmax = [arg["n_max"] for arg in kwargs_light_mcmc] nmax_mean = np.mean(nmax) nmax_16, nmax_50, nmax_84 = np.quantile(nmax, [0.16, 0.5, 0.84]) - light.parameters['n_max'].set_posterior(PosteriorStatistics(float(nmax_mean), float(nmax_50), - float(nmax_16), float(nmax_84))) + light.parameters["n_max"].set_posterior( + PosteriorStatistics( + float(nmax_mean), float(nmax_50), float(nmax_16), float(nmax_84) + ) + ) - cx = [arg['center_x'] for arg in kwargs_light_mcmc] + cx = [arg["center_x"] for arg in kwargs_light_mcmc] cx_mean = np.mean(cx) cx_16, cx_50, cx_84 = np.quantile(cx, [0.16, 0.5, 0.84]) - light.parameters['center_x'].set_posterior(PosteriorStatistics(-float(cx_mean), -float(cx_50), - -float(cx_16), -float(cx_84))) + light.parameters["center_x"].set_posterior( + PosteriorStatistics( + -float(cx_mean), -float(cx_50), -float(cx_16), -float(cx_84) + ) + ) - cy = [arg['center_y'] for arg in kwargs_light_mcmc] + cy = [arg["center_y"] for arg in kwargs_light_mcmc] cy_mean = np.mean(cy) cy_16, cy_50, cy_84 = np.quantile(cy, [0.16, 0.5, 0.84]) - light.parameters['center_y'].set_posterior(PosteriorStatistics(float(cy_mean), float(cy_50), - float(cy_16), float(cy_84))) + light.parameters["center_y"].set_posterior( + PosteriorStatistics( + float(cy_mean), float(cy_50), float(cy_16), float(cy_84) + ) + ) - print('Shapelets correctly updated') + print("Shapelets correctly updated") return def lensed_point_source_update(light, kwargs_ps, kwargs_ps_mcmc=None): - """ - Update the COOLEST LensedPS light model with results in kwargs_ps + """Update the COOLEST LensedPS light model with results in kwargs_ps. :param light: coolest.template.classes.profiles.light.LensedPS object :param kwargs_ps: dictionnary with the point estimate - :return: updated light """ - light.parameters['amps'].set_point_estimate(PointEstimate(np.ndarray.tolist(kwargs_ps['point_amp']))) - light.parameters['ra_list'].set_point_estimate(PointEstimate(np.ndarray.tolist(-kwargs_ps['ra_image']))) - light.parameters['dec_list'].set_point_estimate(PointEstimate(np.ndarray.tolist(kwargs_ps['dec_image']))) + light.parameters["amps"].set_point_estimate( + PointEstimate(np.ndarray.tolist(kwargs_ps["point_amp"])) + ) + light.parameters["ra_list"].set_point_estimate( + PointEstimate(np.ndarray.tolist(-kwargs_ps["ra_image"])) + ) + light.parameters["dec_list"].set_point_estimate( + PointEstimate(np.ndarray.tolist(kwargs_ps["dec_image"])) + ) if kwargs_ps_mcmc is not None: - a = [arg['point_amp'] for arg in kwargs_ps_mcmc] + a = [arg["point_amp"] for arg in kwargs_ps_mcmc] a_mean = np.mean(a, axis=0) a_16, a_50, a_84 = np.quantile(a, [0.16, 0.5, 0.84], axis=0) - light.parameters['amps'].set_posterior(PosteriorStatistics(np.ndarray.tolist(a_mean), - np.ndarray.tolist(a_50), - np.ndarray.tolist(a_16), - np.ndarray.tolist(a_84))) - - ra = [arg['ra_image'] for arg in kwargs_ps_mcmc] + light.parameters["amps"].set_posterior( + PosteriorStatistics( + np.ndarray.tolist(a_mean), + np.ndarray.tolist(a_50), + np.ndarray.tolist(a_16), + np.ndarray.tolist(a_84), + ) + ) + + ra = [arg["ra_image"] for arg in kwargs_ps_mcmc] ra_mean = np.mean(ra, axis=0) ra_16, ra_50, ra_84 = np.quantile(ra, [0.16, 0.5, 0.84], axis=0) - light.parameters['ra_list'].set_posterior(PosteriorStatistics(np.ndarray.tolist(-ra_mean), - np.ndarray.tolist(-ra_50), - np.ndarray.tolist(-ra_16), - np.ndarray.tolist(-ra_84))) - dec = [arg['dec_image'] for arg in kwargs_ps_mcmc] + light.parameters["ra_list"].set_posterior( + PosteriorStatistics( + np.ndarray.tolist(-ra_mean), + np.ndarray.tolist(-ra_50), + np.ndarray.tolist(-ra_16), + np.ndarray.tolist(-ra_84), + ) + ) + dec = [arg["dec_image"] for arg in kwargs_ps_mcmc] dec_mean = np.mean(dec, axis=0) dec_16, dec_50, dec_84 = np.quantile(dec, [0.16, 0.5, 0.84], axis=0) - light.parameters['dec_list'].set_posterior(PosteriorStatistics(np.ndarray.tolist(dec_mean), - np.ndarray.tolist(dec_50), - np.ndarray.tolist(dec_16), - np.ndarray.tolist(dec_84))) - - print('Lensed point source correctly updated') + light.parameters["dec_list"].set_posterior( + PosteriorStatistics( + np.ndarray.tolist(dec_mean), + np.ndarray.tolist(dec_50), + np.ndarray.tolist(dec_16), + np.ndarray.tolist(dec_84), + ) + ) + + print("Lensed point source correctly updated") return diff --git a/lenstronomy/Util/correlation.py b/lenstronomy/Util/correlation.py index f68059080..0a00f8466 100644 --- a/lenstronomy/Util/correlation.py +++ b/lenstronomy/Util/correlation.py @@ -1,17 +1,17 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from scipy import fftpack import numpy as np import lenstronomy.Util.analysis_util as analysis_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def correlation_2D(image): - """ - #TODO document normalization output in units + """#TODO document normalization output in units :param image: 2d image :return: 2d fourier transform diff --git a/lenstronomy/Util/data_util.py b/lenstronomy/Util/data_util.py index 9164574d2..41c08fd1e 100644 --- a/lenstronomy/Util/data_util.py +++ b/lenstronomy/Util/data_util.py @@ -2,13 +2,16 @@ import copy from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export -def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scale, num_exposures=1): - """ - computes the expected Gaussian background noise of a pixel in units of counts/second +def bkg_noise( + readout_noise, exposure_time, sky_brightness, pixel_scale, num_exposures=1 +): + """Computes the expected Gaussian background noise of a pixel in units of + counts/second. :param readout_noise: noise added per readout :param exposure_time: exposure time per exposure (in seconds) @@ -18,8 +21,8 @@ def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scale, num_exp :return: estimated Gaussian noise sqrt(variance) """ exposure_time_tot = num_exposures * exposure_time - readout_noise_tot = num_exposures * readout_noise ** 2 # square of readout noise - sky_per_pixel = sky_brightness * pixel_scale ** 2 + readout_noise_tot = num_exposures * readout_noise**2 # square of readout noise + sky_per_pixel = sky_brightness * pixel_scale**2 sky_brightness_tot = exposure_time_tot * sky_per_pixel sigma_bkg = np.sqrt(readout_noise_tot + sky_brightness_tot) / exposure_time_tot return sigma_bkg @@ -27,11 +30,12 @@ def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scale, num_exp @export def flux_noise(cps_pixel, exposure_time): - """ - computes the variance of the shot noise Gaussian approximation of Poisson noise term + """Computes the variance of the shot noise Gaussian approximation of Poisson noise + term. :param cps_pixel: counts per second of the intensity per pixel unit - :param exposure_time: total exposure time (in units seconds or equivalent unit as cps_pixel) + :param exposure_time: total exposure time (in units seconds or equivalent unit as + cps_pixel) :return: sqrt(variance) of pixel value """ return cps_pixel / np.sqrt(exposure_time) @@ -39,8 +43,7 @@ def flux_noise(cps_pixel, exposure_time): @export def magnitude2cps(magnitude, magnitude_zero_point): - """ - converts an apparent magnitude to counts per second + """Converts an apparent magnitude to counts per second. The zero point of an instrument, by definition, is the magnitude of an object that produces one count (or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of @@ -52,7 +55,7 @@ def magnitude2cps(magnitude, magnitude_zero_point): :return: counts per second of astronomical object """ delta_m = magnitude - magnitude_zero_point - counts = 10**(-delta_m / 2.5) + counts = 10 ** (-delta_m / 2.5) return counts @@ -71,8 +74,7 @@ def cps2magnitude(cps, magnitude_zero_point): @export def absolute2apparent_magnitude(absolute_magnitude, d_parsec): - """ - converts absolute to apparent magnitudes + """Converts absolute to apparent magnitudes. :param absolute_magnitude: absolute magnitude of object :param d_parsec: distance to object in units parsec @@ -84,8 +86,7 @@ def absolute2apparent_magnitude(absolute_magnitude, d_parsec): @export def adu2electrons(adu, ccd_gain): - """ - converts analog-to-digital units into electron counts + """Converts analog-to-digital units into electron counts. :param adu: counts in analog-to-digital unit :param ccd_gain: CCD gain, meaning how many electrons are counted per unit ADU @@ -96,8 +97,7 @@ def adu2electrons(adu, ccd_gain): @export def electrons2adu(electrons, ccd_gain): - """ - converts electron counts into analog-to-digital unit + """Converts electron counts into analog-to-digital unit. :param electrons: number of electrons received on detector :param ccd_gain: CCD gain, meaning how many electrons are counted per unit ADU @@ -107,11 +107,12 @@ def electrons2adu(electrons, ccd_gain): def magnitude2amplitude(light_model_class, kwargs_light_mag, magnitude_zero_point): - """ - translates astronomical magnitudes to lenstronomy linear 'amp' parameters for LightModel objects + """Translates astronomical magnitudes to lenstronomy linear 'amp' parameters for + LightModel objects. :param light_model_class: LightModel() class instance - :param kwargs_light_mag: list of light model parameter dictionary with 'magnitude' instead of 'amp' + :param kwargs_light_mag: list of light model parameter dictionary with 'magnitude' + instead of 'amp' :param magnitude_zero_point: magnitude zero point :return: list of light model parameter dictionary with 'amp' """ @@ -119,11 +120,12 @@ def magnitude2amplitude(light_model_class, kwargs_light_mag, magnitude_zero_poin if kwargs_light_mag is not None: for i, kwargs_mag in enumerate(kwargs_light_mag): kwargs_new = kwargs_light_amp[i] - del kwargs_new['magnitude'] - cps_norm = light_model_class.total_flux(kwargs_list=kwargs_light_amp, norm=True, k=i)[0] - magnitude = kwargs_mag['magnitude'] + del kwargs_new["magnitude"] + cps_norm = light_model_class.total_flux( + kwargs_list=kwargs_light_amp, norm=True, k=i + )[0] + magnitude = kwargs_mag["magnitude"] cps = magnitude2cps(magnitude, magnitude_zero_point=magnitude_zero_point) amp = cps / cps_norm - kwargs_new['amp'] = amp + kwargs_new["amp"] = amp return kwargs_light_amp - diff --git a/lenstronomy/Util/derivative_util.py b/lenstronomy/Util/derivative_util.py index d75a3f9c2..1b648d1a3 100644 --- a/lenstronomy/Util/derivative_util.py +++ b/lenstronomy/Util/derivative_util.py @@ -1,17 +1,15 @@ -""" -routines to compute derivatives of spherical functions -""" +"""Routines to compute derivatives of spherical functions.""" import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def d_r_dx(x, y): - """ - derivative of r with respect to x - :param x: + """Derivative of r with respect to x :param x: + :param y: :return: """ @@ -20,8 +18,7 @@ def d_r_dx(x, y): @export def d_r_dy(x, y): - """ - differential dr/dy + """Differential dr/dy. :param x: :param y: @@ -32,41 +29,37 @@ def d_r_dy(x, y): @export def d_r_dxx(x, y): - """ - second derivative dr/dxdx - :param x: + """Second derivative dr/dxdx :param x: + :param y: :return: """ - return y**2 / (x**2 + y**2)**(3./2) + return y**2 / (x**2 + y**2) ** (3.0 / 2) @export def d_r_dyy(x, y): - """ - second derivative dr/dxdx - :param x: + """Second derivative dr/dxdx :param x: + :param y: :return: """ - return x**2 / (x**2 + y**2)**(3./2) + return x**2 / (x**2 + y**2) ** (3.0 / 2) @export def d_r_dxy(x, y): - """ - second derivative dr/dxdx - :param x: + """Second derivative dr/dxdx :param x: + :param y: :return: """ - return -x * y / (x ** 2 + y ** 2) ** (3 / 2.) + return -x * y / (x**2 + y**2) ** (3 / 2.0) @export def d_phi_dx(x, y): - """ - angular derivative in respect to x when phi = arctan2(y, x) + """Angular derivative in respect to x when phi = arctan2(y, x) :param x: :param y: @@ -77,8 +70,7 @@ def d_phi_dx(x, y): @export def d_phi_dy(x, y): - """ - angular derivative in respect to y when phi = arctan2(y, x) + """Angular derivative in respect to y when phi = arctan2(y, x) :param x: :param y: @@ -89,87 +81,76 @@ def d_phi_dy(x, y): @export def d_phi_dxx(x, y): - """ - second derivative of the orientation angle + """Second derivative of the orientation angle. :param x: :param y: :return: """ - return 2 * x * y / (x**2 + y**2)**2 + return 2 * x * y / (x**2 + y**2) ** 2 @export def d_phi_dyy(x, y): - """ - second derivative of the orientation angle in dydy + """Second derivative of the orientation angle in dydy. :param x: :param y: :return: """ - return -2 * x * y / (x ** 2 + y ** 2) ** 2 + return -2 * x * y / (x**2 + y**2) ** 2 @export def d_phi_dxy(x, y): - """ - second derivative of the orientation angle in dxdy + """Second derivative of the orientation angle in dxdy. :param x: :param y: :return: """ - return (-x**2 + y**2) / (x ** 2 + y ** 2) ** 2 + return (-(x**2) + y**2) / (x**2 + y**2) ** 2 @export def d_x_diffr_dx(x, y): - """ - derivative of d(x/r)/dx - equivalent to second order derivatives dr_dxx + """Derivative of d(x/r)/dx equivalent to second order derivatives dr_dxx. :param x: :param y: :return: """ - return y**2 / (x**2 + y**2)**(3/2.) + return y**2 / (x**2 + y**2) ** (3 / 2.0) @export def d_y_diffr_dy(x, y): - """ - derivative of d(y/r)/dy - equivalent to second order derivatives dr_dyy + """Derivative of d(y/r)/dy equivalent to second order derivatives dr_dyy. :param x: :param y: :return: """ - return x**2 / (x**2 + y**2)**(3/2.) + return x**2 / (x**2 + y**2) ** (3 / 2.0) @export def d_y_diffr_dx(x, y): - """ - derivative of d(y/r)/dx - equivalent to second order derivatives dr_dxy + """Derivative of d(y/r)/dx equivalent to second order derivatives dr_dxy. :param x: :param y: :return: """ - return -x*y / (x**2 + y**2)**(3/2.) + return -x * y / (x**2 + y**2) ** (3 / 2.0) @export def d_x_diffr_dy(x, y): - """ - derivative of d(x/r)/dy - equivalent to second order derivatives dr_dyx + """Derivative of d(x/r)/dy equivalent to second order derivatives dr_dyx. :param x: :param y: :return: """ - return -x*y / (x**2 + y**2)**(3/2.) + return -x * y / (x**2 + y**2) ** (3 / 2.0) diff --git a/lenstronomy/Util/image_util.py b/lenstronomy/Util/image_util.py index 77d6d4cfd..a3f0de306 100644 --- a/lenstronomy/Util/image_util.py +++ b/lenstronomy/Util/image_util.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np from scipy import ndimage @@ -7,13 +7,14 @@ import lenstronomy.Util.util as util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def add_layer2image(grid2d, x_pos, y_pos, kernel, order=1): - """ - adds a kernel on the grid2d image at position x_pos, y_pos with an interpolated subgrid pixel shift of order=order + """Adds a kernel on the grid2d image at position x_pos, y_pos with an interpolated + subgrid pixel shift of order=order. :param grid2d: 2d pixel grid (i.e. image) :param x_pos: x-position center (pixel coordinate) of the layer to be added @@ -33,8 +34,8 @@ def add_layer2image(grid2d, x_pos, y_pos, kernel, order=1): @export def add_layer2image_int(grid2d, x_pos, y_pos, kernel): - """ - adds a kernel on the grid2d image at position x_pos, y_pos at integer positions of pixel + """Adds a kernel on the grid2d image at position x_pos, y_pos at integer positions + of pixel. :param grid2d: 2d pixel grid (i.e. image) :param x_pos: x-position center (pixel coordinate) of the layer to be added @@ -54,16 +55,23 @@ def add_layer2image_int(grid2d, x_pos, y_pos, kernel): k_l2_x = int((k_x - 1) / 2) k_l2_y = int((k_y - 1) / 2) - min_x = np.maximum(0, x_int-k_l2_x) - min_y = np.maximum(0, y_int-k_l2_y) - max_x = np.minimum(num_x, x_int+k_l2_x + 1) - max_y = np.minimum(num_y, y_int+k_l2_y + 1) + min_x = np.maximum(0, x_int - k_l2_x) + min_y = np.maximum(0, y_int - k_l2_y) + max_x = np.minimum(num_x, x_int + k_l2_x + 1) + max_y = np.minimum(num_y, y_int + k_l2_y + 1) min_xk = np.maximum(0, -x_int + k_l2_x) min_yk = np.maximum(0, -y_int + k_l2_y) max_xk = np.minimum(k_x, -x_int + k_l2_x + num_x) max_yk = np.minimum(k_y, -y_int + k_l2_y + num_y) - if min_x >= max_x or min_y >= max_y or min_xk >= max_xk or min_yk >= max_yk or (max_x-min_x != max_xk-min_xk) or (max_y-min_y != max_yk-min_yk): + if ( + min_x >= max_x + or min_y >= max_y + or min_xk >= max_xk + or min_yk >= max_yk + or (max_x - min_x != max_xk - min_xk) + or (max_y - min_y != max_yk - min_yk) + ): return grid2d kernel_re_sized = kernel[min_yk:max_yk, min_xk:max_xk] new = grid2d.copy() @@ -73,10 +81,8 @@ def add_layer2image_int(grid2d, x_pos, y_pos, kernel): @export def add_background(image, sigma_bkd): - """ - Generates background noise to image. - To generate a noisy image with background noise, generate - image_noisy = image + add_background(image, sigma_bkd) + """Generates background noise to image. To generate a noisy image with background + noise, generate image_noisy = image + add_background(image, sigma_bkd) :param image: pixel values of image :param sigma_bkd: background noise (sigma) @@ -89,9 +95,9 @@ def add_background(image, sigma_bkd): @export def add_poisson(image, exp_time): - """ - Generates a poison (or Gaussian) distributed noise with mean given by surface brightness. - To generate a noisy image with Poisson noise, perform image_noisy = image + add_poisson(image, exp_time) + """Generates a poison (or Gaussian) distributed noise with mean given by surface + brightness. To generate a noisy image with Poisson noise, perform image_noisy = + image + add_poisson(image, exp_time) :param image: pixel values (photon counts per unit exposure time) :param exp_time: exposure time @@ -99,7 +105,7 @@ def add_poisson(image, exp_time): """ # Gaussian approximation for Poisson distribution, normalized to exposure time - sigma = np.sqrt(np.abs(image)/exp_time) + sigma = np.sqrt(np.abs(image) / exp_time) nx, ny = np.shape(image) poisson = np.random.randn(nx, ny) * sigma return poisson @@ -107,21 +113,16 @@ def add_poisson(image, exp_time): @export def rotateImage(img, angle): - """ - - querries scipy.ndimage.rotate routine - :param img: image to be rotated - :param angle: angle to be rotated (radian) - :return: rotated image - """ + """Querries scipy.ndimage.rotate routine :param img: image to be rotated :param + angle: angle to be rotated (radian) :return: rotated image.""" imgR = ndimage.rotate(img, angle, reshape=False) return imgR @export def re_size_array(x_in, y_in, input_values, x_out, y_out): - """ - resizes 2d array (i.e. image) to new coordinates. So far only works with square output aligned with coordinate axis. + """Resizes 2d array (i.e. image) to new coordinates. So far only works with square + output aligned with coordinate axis. :param x_in: :param y_in: @@ -132,7 +133,7 @@ def re_size_array(x_in, y_in, input_values, x_out, y_out): """ # from skimage.transform import resize # resize(input_values) - interp_2d = interpolate.interp2d(x_in, y_in, input_values, kind='linear') + interp_2d = interpolate.interp2d(x_in, y_in, input_values, kind="linear") # interp_2d = scipy.interpolate.RectBivariateSpline(x_in, y_in, input_values, kx=1, ky=1) out_values = interp_2d.__call__(x_out, y_out) return out_values @@ -140,27 +141,24 @@ def re_size_array(x_in, y_in, input_values, x_out, y_out): @export def symmetry_average(image, symmetry): - """ - symmetry averaged image + """Symmetry averaged image. :param image: :param symmetry: :return: """ img_sym = np.zeros_like(image) - angle = 360./symmetry + angle = 360.0 / symmetry for i in range(symmetry): - img_sym += rotateImage(image, angle*i) + img_sym += rotateImage(image, angle * i) img_sym /= symmetry return img_sym @export def findOverlap(x_mins, y_mins, min_distance): - """ - finds overlapping solutions, deletes multiples and deletes non-solutions and if it is not a solution, - deleted as well - """ + """Finds overlapping solutions, deletes multiples and deletes non-solutions and if + it is not a solution, deleted as well.""" n = len(x_mins) idex = [] for i in range(n): @@ -168,7 +166,10 @@ def findOverlap(x_mins, y_mins, min_distance): pass else: for j in range(0, i): - if abs(x_mins[i] - x_mins[j] < min_distance and abs(y_mins[i] - y_mins[j]) < min_distance): + if abs( + x_mins[i] - x_mins[j] < min_distance + and abs(y_mins[i] - y_mins[j]) < min_distance + ): idex.append(i) break x_mins = np.delete(x_mins, idex, axis=0) @@ -188,7 +189,12 @@ def coordInImage(x_coord, y_coord, num_pix, deltapix): min_ = -deltapix * num_pix / 2 max_ = deltapix * num_pix / 2 for i in range(len(x_coord)): # sum over image positions - if x_coord[i] < min_ or x_coord[i] > max_ or y_coord[i] < min_ or y_coord[i] > max_: + if ( + x_coord[i] < min_ + or x_coord[i] > max_ + or y_coord[i] < min_ + or y_coord[i] > max_ + ): idex.append(i) x_coord = np.delete(x_coord, idex, axis=0) y_coord = np.delete(y_coord, idex, axis=0) @@ -197,37 +203,37 @@ def coordInImage(x_coord, y_coord, num_pix, deltapix): @export def re_size(image, factor=1): - """ - re-sizes image with nx x ny to nx/factor x ny/factor + """Re-sizes image with nx x ny to nx/factor x ny/factor. :param image: 2d image with shape (nx,ny) :param factor: integer >=1 :return: """ if factor < 1: - raise ValueError('scaling factor in re-sizing %s < 1' % factor) + raise ValueError("scaling factor in re-sizing %s < 1" % factor) elif factor == 1: return image f = int(factor) nx, ny = np.shape(image) - if int(nx/f) == nx/f and int(ny/f) == ny/f: - small = image.reshape([int(nx/f), f, int(ny/f), f]).mean(3).mean(1) + if int(nx / f) == nx / f and int(ny / f) == ny / f: + small = image.reshape([int(nx / f), f, int(ny / f), f]).mean(3).mean(1) return small else: - raise ValueError("scaling with factor %s is not possible with grid size %s, %s" % (f, nx, ny)) + raise ValueError( + "scaling with factor %s is not possible with grid size %s, %s" % (f, nx, ny) + ) @export def rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask): - """ - re-bins pixels, updates cutout image, wht_map, sigma_bkg, coordinates, PSF + """Re-bins pixels, updates cutout image, wht_map, sigma_bkg, coordinates, PSF. :param bin_size: number of pixels (per axis) to merge :return: """ - numPix = int(len(image)/bin_size) + numPix = int(len(image) / bin_size) numPix_precut = numPix * bin_size - factor = int(len(image)/numPix) + factor = int(len(image) / numPix) if not numPix * bin_size == len(image): image_precut = image[0:numPix_precut, 0:numPix_precut] else: @@ -235,43 +241,55 @@ def rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex image_resized = re_size(image_precut, factor) image_resized *= bin_size**2 wht_map_resized = re_size(wht_map[0:numPix_precut, 0:numPix_precut], factor) - sigma_bkg_resized = bin_size*sigma_bkg + sigma_bkg_resized = bin_size * sigma_bkg ra_coords_resized = re_size(ra_coords[0:numPix_precut, 0:numPix_precut], factor) dec_coords_resized = re_size(dec_coords[0:numPix_precut, 0:numPix_precut], factor) idex_mask_resized = re_size(idex_mask[0:numPix_precut, 0:numPix_precut], factor) idex_mask_resized[idex_mask_resized > 0] = 1 - return image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized + return ( + image_resized, + wht_map_resized, + sigma_bkg_resized, + ra_coords_resized, + dec_coords_resized, + idex_mask_resized, + ) @export def rebin_coord_transform(factor, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix): - """ - adopt coordinate system and transformation between angular and pixel coordinates of a re-binned image - - """ + """Adopt coordinate system and transformation between angular and pixel coordinates + of a re-binned image.""" factor = int(factor) Mcoord2pix_resized = Mcoord2pix / factor Mpix2coord_resized = Mpix2coord * factor x_at_radec_0_resized = (x_at_radec_0 + 0.5) / factor - 0.5 y_at_radec_0_resized = (y_at_radec_0 + 0.5) / factor - 0.5 - ra_at_xy_0_resized, dec_at_xy_0_resized = util.map_coord2pix(-x_at_radec_0_resized, -y_at_radec_0_resized, 0, 0, - Mpix2coord_resized) - return ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized + ra_at_xy_0_resized, dec_at_xy_0_resized = util.map_coord2pix( + -x_at_radec_0_resized, -y_at_radec_0_resized, 0, 0, Mpix2coord_resized + ) + return ( + ra_at_xy_0_resized, + dec_at_xy_0_resized, + x_at_radec_0_resized, + y_at_radec_0_resized, + Mpix2coord_resized, + Mcoord2pix_resized, + ) @export def stack_images(image_list, wht_list, sigma_list): - """ - stacks images and saves new image as a fits file + """Stacks images and saves new image as a fits file. :return: """ image_stacked = np.zeros_like(image_list[0]) wht_stacked = np.zeros_like(image_stacked) - sigma_stacked = 0. + sigma_stacked = 0.0 for i in range(len(image_list)): - image_stacked += image_list[i]*wht_list[i] - sigma_stacked += sigma_list[i]**2 * np.median(wht_list[i]) + image_stacked += image_list[i] * wht_list[i] + sigma_stacked += sigma_list[i] ** 2 * np.median(wht_list[i]) wht_stacked += wht_list[i] image_stacked /= wht_stacked sigma_stacked /= np.median(wht_stacked) @@ -281,9 +299,8 @@ def stack_images(image_list, wht_list, sigma_list): @export def cut_edges(image, num_pix): - """ - cuts out the edges of a 2d image and returns re-sized image to numPix - center is well defined for odd pixel sizes. + """Cuts out the edges of a 2d image and returns re-sized image to numPix center is + well defined for odd pixel sizes. :param image: 2d numpy array :param num_pix: square size of cut out image @@ -291,12 +308,19 @@ def cut_edges(image, num_pix): """ nx, ny = image.shape if nx < num_pix or ny < num_pix: - raise ValueError('image can not be resized, in routine cut_edges with image shape (%s %s) ' - 'and desired new shape (%s %s)' % (nx, ny, num_pix, num_pix)) + raise ValueError( + "image can not be resized, in routine cut_edges with image shape (%s %s) " + "and desired new shape (%s %s)" % (nx, ny, num_pix, num_pix) + ) if (nx % 2 == 0 and ny % 2 == 1) or (nx % 2 == 1 and ny % 2 == 0): - raise ValueError('image with odd and even axis (%s %s) not supported for re-sizing' % (nx, ny)) + raise ValueError( + "image with odd and even axis (%s %s) not supported for re-sizing" + % (nx, ny) + ) if (nx % 2 == 0 and num_pix % 2 == 1) or (nx % 2 == 1 and num_pix % 2 == 0): - raise ValueError('image can only be re-sized from even to even or odd to odd number.') + raise ValueError( + "image can only be re-sized from even to even or odd to odd number." + ) x_min = int((nx - num_pix) / 2) y_min = int((ny - num_pix) / 2) @@ -308,15 +332,14 @@ def cut_edges(image, num_pix): @export def radial_profile(data, center): - """ - computes radial profile + """Computes radial profile. :param data: 2d numpy array :param center: center [x, y] from which pixel to compute the radial profile :return: radial profile (in units pixel) """ y, x = np.indices(data.shape) - r = np.sqrt((x - center[0])**2 + (y - center[1])**2) + r = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2) r = r.astype(int) tbin = np.bincount(r.ravel(), data.ravel()) @@ -327,11 +350,11 @@ def radial_profile(data, center): @export def gradient_map(image): - """ - computes gradients of images with the sobel transform + """Computes gradients of images with the sobel transform. :param image: 2d numpy array :return: array of same size as input, with gradients between neighboring pixels """ from skimage import filters + return filters.sobel(image) diff --git a/lenstronomy/Util/kernel_util.py b/lenstronomy/Util/kernel_util.py index d1ce925db..4d3b5419d 100644 --- a/lenstronomy/Util/kernel_util.py +++ b/lenstronomy/Util/kernel_util.py @@ -1,6 +1,4 @@ -""" -routines that manipulate convolution kernels -""" +"""Routines that manipulate convolution kernels.""" import numpy as np import copy from scipy import ndimage @@ -12,38 +10,51 @@ from lenstronomy.GalKin import velocity_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def de_shift_kernel(kernel, shift_x, shift_y, iterations=20, fractional_step_size=1): - """ - de-shifts a shifted kernel to the center of a pixel. This is performed iteratively. + """De-shifts a shifted kernel to the center of a pixel. This is performed + iteratively. - The input kernel is the solution of a linear interpolated shift of a sharper kernel centered in the middle of the - pixel. To find the de-shifted kernel, we perform an iterative correction of proposed de-shifted kernels and compare - its shifted version with the input kernel. + The input kernel is the solution of a linear interpolated shift of a sharper kernel + centered in the middle of the pixel. To find the de-shifted kernel, we perform an + iterative correction of proposed de-shifted kernels and compare its shifted version + with the input kernel. - :param kernel: (shifted) kernel, e.g. a star in an image that is not centered in the pixel grid + :param kernel: (shifted) kernel, e.g. a star in an image that is not centered in the + pixel grid :param shift_x: x-offset relative to the center of the pixel (sub-pixel shift) :param shift_y: y-offset relative to the center of the pixel (sub-pixel shift) - :param iterations: number of repeated iterations of shifting a new de-shifted kernel and apply corrections - :param fractional_step_size: float (0, 1] correction factor relative to previous proposal (can be used for stability - :return: de-shifted kernel such that the interpolated shift boy (shift_x, shift_y) results in the input kernel + :param iterations: number of repeated iterations of shifting a new de-shifted kernel + and apply corrections + :param fractional_step_size: float (0, 1] correction factor relative to previous + proposal (can be used for stability + :return: de-shifted kernel such that the interpolated shift boy (shift_x, shift_y) + results in the input kernel """ nx, ny = np.shape(kernel) - kernel_new = np.zeros((nx+2, ny+2)) + (kernel[0, 0] + kernel[0, -1] + kernel[-1, 0] + kernel[-1, -1]) / 4. + kernel_new = ( + np.zeros((nx + 2, ny + 2)) + + (kernel[0, 0] + kernel[0, -1] + kernel[-1, 0] + kernel[-1, -1]) / 4.0 + ) kernel_new[1:-1, 1:-1] = kernel int_shift_x = int(round(shift_x)) frac_x_shift = shift_x - int_shift_x int_shift_y = int(round(shift_y)) frac_y_shift = shift_y - int_shift_y kernel_init = copy.deepcopy(kernel_new) - kernel_init_shifted = copy.deepcopy(ndimage.shift(kernel_init, shift=[int_shift_y, int_shift_x], order=1)) + kernel_init_shifted = copy.deepcopy( + ndimage.shift(kernel_init, shift=[int_shift_y, int_shift_x], order=1) + ) kernel_new = ndimage.shift(kernel_new, shift=[int_shift_y, int_shift_x], order=1) norm = np.sum(kernel_init_shifted) for i in range(iterations): - kernel_shifted_inv = ndimage.shift(kernel_new, shift=[-frac_y_shift, -frac_x_shift], order=1) + kernel_shifted_inv = ndimage.shift( + kernel_new, shift=[-frac_y_shift, -frac_x_shift], order=1 + ) delta = kernel_init_shifted - kernel_norm(kernel_shifted_inv) * norm kernel_new += delta * fractional_step_size kernel_new = kernel_norm(kernel_new) * norm @@ -52,9 +63,9 @@ def de_shift_kernel(kernel, shift_x, shift_y, iterations=20, fractional_step_siz @export def center_kernel(kernel, iterations=20): - """ - given a kernel that might not be perfectly centered, this routine computes its light weighted center and then - moves the center in an iterative process such that it is centered + """Given a kernel that might not be perfectly centered, this routine computes its + light weighted center and then moves the center in an iterative process such that it + is centered. :param kernel: 2d array (odd numbers) :param iterations: int, number of iterations @@ -70,7 +81,9 @@ def center_kernel(kernel, iterations=20): x_w = np.sum(kernel * util.array2image(x_grid)) y_w = np.sum(kernel * util.array2image(y_grid)) # de-shift kernel - kernel_centered = de_shift_kernel(kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations) + kernel_centered = de_shift_kernel( + kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations + ) return kernel_norm(kernel_centered) @@ -88,9 +101,8 @@ def kernel_norm(kernel): @export def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): - """ - creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an - iterative approach + """Creates a higher resolution kernel with subgrid resolution as an interpolation of + the original kernel in an iterative approach. :param kernel: initial kernel :type kernel: 2d numpy array with square odd size @@ -107,10 +119,10 @@ def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) - d_x = 1. / nx - x_in = np.linspace(d_x/2, 1-d_x/2, nx) - d_y = 1. / nx - y_in = np.linspace(d_y/2, 1-d_y/2, ny) + d_x = 1.0 / nx + x_in = np.linspace(d_x / 2, 1 - d_x / 2, nx) + d_y = 1.0 / nx + y_in = np.linspace(d_y / 2, 1 - d_y / 2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: @@ -119,10 +131,10 @@ def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): if ny_new % 2 == 0: ny_new -= 1 - d_x_new = 1. / nx_new - d_y_new = 1. / ny_new - x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new) - y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new) + d_x_new = 1.0 / nx_new + d_y_new = 1.0 / ny_new + x_out = np.linspace(d_x_new / 2.0, 1 - d_x_new / 2.0, nx_new) + y_out = np.linspace(d_y_new / 2.0, 1 - d_y_new / 2.0, ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) @@ -134,7 +146,9 @@ def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) delta = kernel - kernel_pixel temp_kernel = kernel_input + delta - kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid + kernel_subgrid = image_util.re_size_array( + x_in, y_in, temp_kernel, x_out, y_out + ) # /norm_subgrid kernel_subgrid = kernel_norm(kernel_subgrid) kernel_input = temp_kernel @@ -149,14 +163,13 @@ def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): kernel_pixel = kernel_norm(kernel_pixel) delta_kernel = kernel_pixel - kernel_norm(kernel) id = np.ones((subgrid_res, subgrid_res)) - delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2 + delta_kernel_sub = np.kron(delta_kernel, id) / subgrid_res**2 return kernel_norm(kernel_subgrid - delta_kernel_sub) @export def kernel_pixelsize_change(kernel, deltaPix_in, deltaPix_out): - """ - change the pixel size of a given kernel + """Change the pixel size of a given kernel. :param kernel: :param deltaPix_in: @@ -164,11 +177,17 @@ def kernel_pixelsize_change(kernel, deltaPix_in, deltaPix_out): :return: """ numPix = len(kernel) - numPix_new = int(round(numPix * deltaPix_in/deltaPix_out)) + numPix_new = int(round(numPix * deltaPix_in / deltaPix_out)) if numPix_new % 2 == 0: numPix_new -= 1 - x_in = np.linspace(-(numPix-1)/2*deltaPix_in, (numPix-1)/2*deltaPix_in, numPix) - x_out = np.linspace(-(numPix_new-1)/2*deltaPix_out, (numPix_new-1)/2*deltaPix_out, numPix_new) + x_in = np.linspace( + -(numPix - 1) / 2 * deltaPix_in, (numPix - 1) / 2 * deltaPix_in, numPix + ) + x_out = np.linspace( + -(numPix_new - 1) / 2 * deltaPix_out, + (numPix_new - 1) / 2 * deltaPix_out, + numPix_new, + ) kernel_out = image_util.re_size_array(x_in, x_in, kernel, x_out, x_out) kernel_out = kernel_norm(kernel_out) return kernel_out @@ -176,8 +195,7 @@ def kernel_pixelsize_change(kernel, deltaPix_in, deltaPix_out): @export def cut_psf(psf_data, psf_size, normalisation=True): - """ - cut the psf properly + """Cut the psf properly. :param psf_data: image of PSF :param psf_size: size of psf @@ -191,8 +209,8 @@ def cut_psf(psf_data, psf_size, normalisation=True): @export def pixel_kernel(point_source_kernel, subgrid_res=7): - """ - converts a pixelised kernel of a point source to a kernel representing a uniform extended pixel + """Converts a pixelised kernel of a point source to a kernel representing a uniform + extended pixel. :param point_source_kernel: :param subgrid_res: @@ -200,30 +218,34 @@ def pixel_kernel(point_source_kernel, subgrid_res=7): """ kernel_subgrid = subgrid_kernel(point_source_kernel, subgrid_res, num_iter=10) kernel_size = len(point_source_kernel) - kernel_pixel = np.zeros((kernel_size*subgrid_res, kernel_size*subgrid_res)) + kernel_pixel = np.zeros((kernel_size * subgrid_res, kernel_size * subgrid_res)) for i in range(subgrid_res): - k_x = int((kernel_size-1) / 2 * subgrid_res + i) + k_x = int((kernel_size - 1) / 2 * subgrid_res + i) for j in range(subgrid_res): - k_y = int((kernel_size-1) / 2 * subgrid_res + j) - kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_subgrid) - kernel_pixel = util.averaging(kernel_pixel, numGrid=kernel_size*subgrid_res, numPix=kernel_size) + k_y = int((kernel_size - 1) / 2 * subgrid_res + j) + kernel_pixel = image_util.add_layer2image( + kernel_pixel, k_x, k_y, kernel_subgrid + ) + kernel_pixel = util.averaging( + kernel_pixel, numGrid=kernel_size * subgrid_res, numPix=kernel_size + ) return kernel_norm(kernel_pixel) @export def kernel_average_pixel(kernel_super, supersampling_factor): - """ - computes the effective convolution kernel assuming a uniform surface brightness on the scale of a pixel + """Computes the effective convolution kernel assuming a uniform surface brightness + on the scale of a pixel. :param kernel_super: supersampled PSF of a point source (odd number per axis :param supersampling_factor: supersampling factor (int) :return: """ kernel_sum = np.sum(kernel_super) - kernel_size = int(round(len(kernel_super)/float(supersampling_factor) + 0.5)) + kernel_size = int(round(len(kernel_super) / float(supersampling_factor) + 0.5)) if kernel_size % 2 == 0: kernel_size += 1 - n_high = kernel_size*supersampling_factor + n_high = kernel_size * supersampling_factor if n_high % 2 == 0: n_high += 1 kernel_pixel = np.zeros((n_high, n_high)) @@ -231,7 +253,9 @@ def kernel_average_pixel(kernel_super, supersampling_factor): k_x = int((kernel_size - 1) / 2 * supersampling_factor + i) for j in range(supersampling_factor): k_y = int((kernel_size - 1) / 2 * supersampling_factor + j) - kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_super) + kernel_pixel = image_util.add_layer2image( + kernel_pixel, k_x, k_y, kernel_super + ) if supersampling_factor % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_pixel, supersampling_factor) @@ -243,8 +267,7 @@ def kernel_average_pixel(kernel_super, supersampling_factor): @export def kernel_gaussian(num_pix, delta_pix, fwhm): - """ - Gaussian kernel + """Gaussian kernel. :param num_pix: number of pixels :param delta_pix: pixel scale @@ -256,15 +279,16 @@ def kernel_gaussian(num_pix, delta_pix, fwhm): # kernel_numPix += 1 x_grid, y_grid = util.make_grid(num_pix, delta_pix) gaussian = Gaussian() - kernel = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel /= np.sum(kernel) kernel = util.array2image(kernel) return kernel def kernel_moffat(num_pix, delta_pix, fwhm, moffat_beta): - """ - Moffat kernel + """Moffat kernel. :param delta_pix: pixel scale of kernel :param num_pix: number of pixels per axis of the kernel @@ -280,22 +304,28 @@ def kernel_moffat(num_pix, delta_pix, fwhm, moffat_beta): kernel = util.array2image(kernel) return kernel + @export -def split_kernel(kernel_super, supersampling_kernel_size, supersampling_factor, normalized=True): - """ - pixel kernel and subsampling kernel such that the convolution of both applied on an image can be - performed, i.e. smaller subsampling PSF and hole in larger PSF +def split_kernel( + kernel_super, supersampling_kernel_size, supersampling_factor, normalized=True +): + """Pixel kernel and subsampling kernel such that the convolution of both applied on + an image can be performed, i.e. smaller subsampling PSF and hole in larger PSF. :param kernel_super: super-sampled kernel - :param supersampling_kernel_size: size of super-sampled PSF in units of degraded pixels - :param normalized: boolean, if True returns a split kernel that is area normalized=1 representing a convolution - kernel + :param supersampling_kernel_size: size of super-sampled PSF in units of degraded + pixels + :param normalized: boolean, if True returns a split kernel that is area normalized=1 + representing a convolution kernel :return: degraded kernel with hole and super-sampled kernel """ if supersampling_factor <= 1: - raise ValueError('To split a kernel, the supersampling_factor needs to be > 1, given %s' % supersampling_factor) + raise ValueError( + "To split a kernel, the supersampling_factor needs to be > 1, given %s" + % supersampling_factor + ) if supersampling_kernel_size % 2 == 0: - raise ValueError('supersampling_kernel_size needs to be an odd number!') + raise ValueError("supersampling_kernel_size needs to be an odd number!") n_super = len(kernel_super) n_sub = supersampling_kernel_size * supersampling_factor if n_sub % 2 == 0: @@ -304,20 +334,22 @@ def split_kernel(kernel_super, supersampling_kernel_size, supersampling_factor, n_sub = n_super kernel_hole = copy.deepcopy(kernel_super) - n_min = int((n_super-1) / 2 - (n_sub - 1) / 2) - n_max = int((n_super-1) / 2 + (n_sub - 1) / 2 + 1) + n_min = int((n_super - 1) / 2 - (n_sub - 1) / 2) + n_max = int((n_super - 1) / 2 + (n_sub - 1) / 2 + 1) kernel_hole[n_min:n_max, n_min:n_max] = 0 - kernel_hole_resized = degrade_kernel(kernel_hole, degrading_factor=supersampling_factor) + kernel_hole_resized = degrade_kernel( + kernel_hole, degrading_factor=supersampling_factor + ) kernel_subgrid_cut = kernel_super[n_min:n_max, n_min:n_max] if normalized is True: flux_subsampled = np.sum(kernel_subgrid_cut) flux_hole = np.sum(kernel_hole_resized) if flux_hole > 0: - kernel_hole_resized *= (1. - flux_subsampled) / np.sum(kernel_hole_resized) + kernel_hole_resized *= (1.0 - flux_subsampled) / np.sum(kernel_hole_resized) else: kernel_subgrid_cut /= np.sum(kernel_subgrid_cut) else: - kernel_hole_resized /= supersampling_factor ** 2 + kernel_hole_resized /= supersampling_factor**2 return kernel_hole_resized, kernel_subgrid_cut @@ -336,14 +368,12 @@ def degrade_kernel(kernel_super, degrading_factor): else: kernel_low_res = averaging_odd_kernel(kernel_super, degrading_factor) # degrading_factor**2 # multiplicative factor added when providing flux conservation - kernel_low_res *= degrading_factor ** 2 + kernel_low_res *= degrading_factor**2 return kernel_low_res def averaging_odd_kernel(kernel_super, degrading_factor): - """ - - """ + """""" n_kernel = len(kernel_super) numPix = int(round(n_kernel / degrading_factor + 0.5)) if numPix % 2 == 0: @@ -352,18 +382,20 @@ def averaging_odd_kernel(kernel_super, degrading_factor): kernel_super_ = np.zeros((n_high, n_high)) i_start = int((n_high - n_kernel) / 2) - kernel_super_[i_start:i_start + n_kernel, i_start:i_start + n_kernel] = kernel_super + kernel_super_[ + i_start : i_start + n_kernel, i_start : i_start + n_kernel + ] = kernel_super kernel_low_res = util.averaging(kernel_super_, numGrid=n_high, numPix=numPix) return kernel_low_res @export def averaging_even_kernel(kernel_high_res, subgrid_res): - """ - makes a lower resolution kernel based on the kernel_high_res (odd numbers) and the subgrid_res (even number), both - meant to be centered. + """Makes a lower resolution kernel based on the kernel_high_res (odd numbers) and + the subgrid_res (even number), both meant to be centered. - :param kernel_high_res: high resolution kernel with even subsampling resolution, centered + :param kernel_high_res: high resolution kernel with even subsampling resolution, + centered :param subgrid_res: subsampling resolution (even number) :return: averaged undersampling kernel """ @@ -381,18 +413,26 @@ def averaging_even_kernel(kernel_high_res, subgrid_res): kernel_high_res_edges[i_start:-i_start, i_start:-i_start] = kernel_high_res kernel_low_res = np.zeros((n_low, n_low)) # adding pixels that are fully within a single re-binned pixel - for i in range(subgrid_res-1): - for j in range(subgrid_res-1): + for i in range(subgrid_res - 1): + for j in range(subgrid_res - 1): kernel_low_res += kernel_high_res_edges[i::subgrid_res, j::subgrid_res] # adding half of a pixel that has over-lap with two pixels i = subgrid_res - 1 for j in range(subgrid_res - 1): - kernel_low_res[1:, :] += kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 - kernel_low_res[:-1, :] += kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + kernel_low_res[1:, :] += ( + kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + ) + kernel_low_res[:-1, :] += ( + kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + ) j = subgrid_res - 1 for i in range(subgrid_res - 1): - kernel_low_res[:, 1:] += kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 - kernel_low_res[:, :-1] += kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + kernel_low_res[:, 1:] += ( + kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + ) + kernel_low_res[:, :-1] += ( + kernel_high_res_edges[i::subgrid_res, j::subgrid_res] / 2 + ) # adding a quarter of a pixel value that is at the boarder of four pixels i = subgrid_res - 1 j = subgrid_res - 1 @@ -401,13 +441,13 @@ def averaging_even_kernel(kernel_high_res, subgrid_res): kernel_low_res[:-1, 1:] += kernel_edge / 4 kernel_low_res[1:, :-1] += kernel_edge / 4 kernel_low_res[:-1, :-1] += kernel_edge / 4 - return kernel_low_res / subgrid_res ** 2 + return kernel_low_res / subgrid_res**2 @export def cutout_source(x_pos, y_pos, image, kernelsize, shift=True): - """ - cuts out point source (e.g. PSF estimate) out of image and shift it to the center of a pixel + """Cuts out point source (e.g. PSF estimate) out of image and shift it to the center + of a pixel. :param x_pos: :param y_pos: @@ -420,7 +460,7 @@ def cutout_source(x_pos, y_pos, image, kernelsize, shift=True): x_int = int(round(x_pos)) y_int = int(round(y_pos)) n = len(image) - d = (kernelsize - 1)/2 + d = (kernelsize - 1) / 2 x_max = int(np.minimum(x_int + d + 1, n)) x_min = int(np.maximum(x_int - d, 0)) y_max = int(np.minimum(y_int + d + 1, n)) @@ -454,24 +494,25 @@ def fwhm_kernel(kernel): :return: """ n = len(kernel) - center = (n - 1) / 2. + center = (n - 1) / 2.0 I_r = image_util.radial_profile(kernel, center=[center, center]) if n % 2 == 0: - raise ValueError('only works with odd number of pixels in kernel!') - max_flux = kernel[int((n-1)/2), int((n-1)/2)] - I_2 = max_flux / 2. + raise ValueError("only works with odd number of pixels in kernel!") + max_flux = kernel[int((n - 1) / 2), int((n - 1) / 2)] + I_2 = max_flux / 2.0 r = np.linspace(0, (n - 1) / 2, int((n + 1) / 2)) + 0.33 for i in range(1, len(I_r)): if I_r[i] < I_2: fwhm_2 = (I_2 - I_r[i - 1]) / (I_r[i] - I_r[i - 1]) + r[i - 1] return fwhm_2 * 2 - raise ValueError('The kernel did not drop to half the max value - fwhm not determined!') + raise ValueError( + "The kernel did not drop to half the max value - fwhm not determined!" + ) @export def estimate_amp(data, x_pos, y_pos, psf_kernel): - """ - estimates the amplitude of a point source located at x_pos, y_pos + """Estimates the amplitude of a point source located at x_pos, y_pos. :param data: :param x_pos: @@ -480,15 +521,17 @@ def estimate_amp(data, x_pos, y_pos, psf_kernel): :return: """ numPix_x, numPix_y = np.shape(data) - x_int = int(round(x_pos-0.49999)) - y_int = int(round(y_pos-0.49999)) + x_int = int(round(x_pos - 0.49999)) + y_int = int(round(y_pos - 0.49999)) # TODO: make amplitude estimate not sucebtible to rounding effects on which pixels to chose to estimate the amplitude - if x_int > 2 and x_int < numPix_x-2 and y_int > 2 and y_int < numPix_y-2: - mean_image = max(np.sum(data[y_int - 2:y_int+3, x_int-2:x_int+3]), 0) + if x_int > 2 and x_int < numPix_x - 2 and y_int > 2 and y_int < numPix_y - 2: + mean_image = max(np.sum(data[y_int - 2 : y_int + 3, x_int - 2 : x_int + 3]), 0) num = len(psf_kernel) - center = int((num-0.5)/2) - mean_kernel = np.sum(psf_kernel[center-2:center+3, center-2:center+3]) - amp_estimated = mean_image/mean_kernel + center = int((num - 0.5) / 2) + mean_kernel = np.sum( + psf_kernel[center - 2 : center + 3, center - 2 : center + 3] + ) + amp_estimated = mean_image / mean_kernel else: amp_estimated = 0 return amp_estimated @@ -496,40 +539,45 @@ def estimate_amp(data, x_pos, y_pos, psf_kernel): @export def mge_kernel(kernel, order=5): - """ - azimutal Multi-Gaussian expansion of a pixelized kernel + """Azimutal Multi-Gaussian expansion of a pixelized kernel. :param kernel: 2d numpy array :return: """ # radial average n = len(kernel) - center = (n - 1) / 2. + center = (n - 1) / 2.0 psf_r = image_util.radial_profile(kernel, center=[center, center]) # MGE of radial average n_r = len(psf_r) - r_array = np.linspace(start=0., stop=n_r - 1, num=n_r) + r_array = np.linspace(start=0.0, stop=n_r - 1, num=n_r) amps, sigmas, norm = mge.mge_1d(r_array, psf_r, N=order, linspace=True) return amps, sigmas, norm @export def match_kernel_size(image, size): - """ - matching kernel/image to a dedicated size by either expanding the image with zeros at the edges or chopping of the - edges. + """Matching kernel/image to a dedicated size by either expanding the image with + zeros at the edges or chopping of the edges. :param image: 2d array (square with odd number of pixels) :param size: integer (odd number) - :return: image with matched size, either by cutting or by adding zeros in the outskirts + :return: image with matched size, either by cutting or by adding zeros in the + outskirts """ n = len(image) if n == size: return image image_copy = copy.deepcopy(image) if n > size: - return image_copy[int((n-size)/2): int(n - (n-size)/2), int((n-size)/2): int(n - (n-size)/2)] + return image_copy[ + int((n - size) / 2) : int(n - (n - size) / 2), + int((n - size) / 2) : int(n - (n - size) / 2), + ] else: image_add = np.zeros((size, size)) - image_add[int((size - n)/2): int(size - (size - n)/2), int((size - n)/2): int(size - (size - n)/2)] = image_copy + image_add[ + int((size - n) / 2) : int(size - (size - n) / 2), + int((size - n) / 2) : int(size - (size - n) / 2), + ] = image_copy return image_add diff --git a/lenstronomy/Util/magnification_finite_util.py b/lenstronomy/Util/magnification_finite_util.py index 77cc1b697..9125758a9 100644 --- a/lenstronomy/Util/magnification_finite_util.py +++ b/lenstronomy/Util/magnification_finite_util.py @@ -3,62 +3,79 @@ import numpy as np -def auto_raytracing_grid_size(source_fwhm_parcsec, grid_size_scale=0.005, power=1.): - - """ - This function returns the size of a ray tracing grid in units of arcsec appropriate for magnification computations - with finite-size background sources. This fit is calibrated for source sizes (interpreted as the FWHM of a Gaussian) in - the range 0.1 -100 pc. - - :param source_fwhm_parcsec: the full width at half max of a Gaussian background source - :return: an appropriate grid size for finite-size background magnification computation +def auto_raytracing_grid_size(source_fwhm_parcsec, grid_size_scale=0.005, power=1.0): + """This function returns the size of a ray tracing grid in units of arcsec + appropriate for magnification computations with finite-size background sources. This + fit is calibrated for source sizes (interpreted as the FWHM of a Gaussian) in the + range 0.1 -100 pc. + + :param source_fwhm_parcsec: the full width at half max of a Gaussian background + source + :return: an appropriate grid size for finite-size background magnification + computation """ - grid_radius_arcsec = grid_size_scale * source_fwhm_parcsec ** power + grid_radius_arcsec = grid_size_scale * source_fwhm_parcsec**power return grid_radius_arcsec -def auto_raytracing_grid_resolution(source_fwhm_parcsec, grid_resolution_scale=0.0002, ref=10., power=1.): - - """ - This function returns a resolution factor in units arcsec/pixel appropriate for magnification computations with - finite-size background sources. This fit is calibrated for source sizes (interpreted as the FWHM of a Gaussian) in - the range 0.1 -100 pc. +def auto_raytracing_grid_resolution( + source_fwhm_parcsec, grid_resolution_scale=0.0002, ref=10.0, power=1.0 +): + """This function returns a resolution factor in units arcsec/pixel appropriate for + magnification computations with finite-size background sources. This fit is + calibrated for source sizes (interpreted as the FWHM of a Gaussian) in the range 0.1 + -100 pc. - :param source_fwhm_parcsec: the full width at half max of a Gaussian background source - :return: an appropriate grid resolution for finite-size background magnification computation + :param source_fwhm_parcsec: the full width at half max of a Gaussian background + source + :return: an appropriate grid resolution for finite-size background magnification + computation """ grid_resolution = grid_resolution_scale * (source_fwhm_parcsec / ref) ** power return grid_resolution -def setup_mag_finite(cosmo, lens_model, grid_radius_arcsec, grid_resolution, source_fwhm_parsec, source_light_model, z_source, - source_x, source_y, dx, dy, amp_scale, size_scale): - """ - Sets up the ray tracing grid and source light model for magnification_finite_adaptive and - plot_quasar_images routines - :param cosmo: (optional) an instance of astropy.cosmology; if not specified, a default cosmology will be used - :param lens_model: an instance of LensModel - :param grid_radius_arcsec: (optional) the size of the ray tracing region in arcsec; if not specified, an appropriate value - will be estimated from the source size - :param grid_resolution: the grid resolution in units arcsec/pixel; if not specified, an appropriate value will - be estimated from the source size - :param source_fwhm_parsec: the size of the background source [units parsec] - :param source_light_model: the model for background source light; currently implemented are 'SINGLE_GAUSSIAN' and - 'DOUBLE_GAUSSIAN'. +def setup_mag_finite( + cosmo, + lens_model, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, +): + """Sets up the ray tracing grid and source light model for + magnification_finite_adaptive and plot_quasar_images routines :param cosmo: + (optional) an instance of astropy.cosmology; if not specified, a default cosmology + will be used :param lens_model: an instance of LensModel :param grid_radius_arcsec: + (optional) the size of the ray tracing region in arcsec; if not specified, an + appropriate value will be estimated from the source size :param grid_resolution: the + grid resolution in units arcsec/pixel; if not specified, an appropriate value will + be estimated from the source size :param source_fwhm_parsec: the size of the + background source [units parsec] :param source_light_model: the model for background + source light; currently implemented are 'SINGLE_GAUSSIAN' and 'DOUBLE_GAUSSIAN'. + :param z_source: source redshift :param source_x: source x position [arcsec] :param source_y: source y position [arcsec] - :param dx: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source light profile from the first - [arcsec] - :param dy: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source light profile from the first - [arcsec] - :param amp_scale: used with source model 'DOUBLE_GAUSSIAN', the peak brightness of the second source light profile - relative to the first - :param size_scale: used with source model 'DOUBLE_GAUSSIAN', the size of the second source light profile relative - to the first - :return: x coordinate grid, y coordinate grid, source light model, and keywords for the source light model + :param dx: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source + light profile from the first [arcsec] + :param dy: used with source model 'DOUBLE_GAUSSIAN', the offset of the second source + light profile from the first [arcsec] + :param amp_scale: used with source model 'DOUBLE_GAUSSIAN', the peak brightness of + the second source light profile relative to the first + :param size_scale: used with source model 'DOUBLE_GAUSSIAN', the size of the second + source light profile relative to the first + :return: x coordinate grid, y coordinate grid, source light model, and keywords for + the source light model """ if cosmo is None: cosmo = lens_model.cosmo @@ -72,27 +89,54 @@ def setup_mag_finite(cosmo, lens_model, grid_radius_arcsec, grid_resolution, sou source_fwhm_arcsec = source_fwhm_parsec / pc_per_arcsec source_sigma_arcsec = fwhm2sigma(source_fwhm_arcsec) - if source_light_model == 'SINGLE_GAUSSIAN': - kwargs_source = [{'amp': 1., 'center_x': source_x, 'center_y': source_y, 'sigma': source_sigma_arcsec}] - source_model = LightModel(['GAUSSIAN']) - elif source_light_model == 'DOUBLE_GAUSSIAN': - amp_1 = 1. - kwargs_source_1 = [{'amp': amp_1, 'center_x': source_x, 'center_y': source_y, 'sigma': source_sigma_arcsec}] + if source_light_model == "SINGLE_GAUSSIAN": + kwargs_source = [ + { + "amp": 1.0, + "center_x": source_x, + "center_y": source_y, + "sigma": source_sigma_arcsec, + } + ] + source_model = LightModel(["GAUSSIAN"]) + elif source_light_model == "DOUBLE_GAUSSIAN": + amp_1 = 1.0 + kwargs_source_1 = [ + { + "amp": amp_1, + "center_x": source_x, + "center_y": source_y, + "sigma": source_sigma_arcsec, + } + ] # c = amp / (2 * np.pi * sigma**2) - amp_2 = amp_1 * amp_scale * size_scale ** 2 - kwargs_source_2 = [{'amp': amp_2, 'center_x': source_x + dx, 'center_y': source_y + dy, - 'sigma': source_sigma_arcsec * size_scale}] + amp_2 = amp_1 * amp_scale * size_scale**2 + kwargs_source_2 = [ + { + "amp": amp_2, + "center_x": source_x + dx, + "center_y": source_y + dy, + "sigma": source_sigma_arcsec * size_scale, + } + ] kwargs_source = kwargs_source_1 + kwargs_source_2 - source_model = LightModel(['GAUSSIAN'] * 2) + source_model = LightModel(["GAUSSIAN"] * 2) else: - raise Exception('source light model must be specified, currently implemented models are SINGLE_GAUSSIAN ' - 'and DOUBLE_GAUSSIAN') + raise Exception( + "source light model must be specified, currently implemented models are SINGLE_GAUSSIAN " + "and DOUBLE_GAUSSIAN" + ) npix = int(2 * grid_radius_arcsec / grid_resolution) _grid_x = np.linspace(-grid_radius_arcsec, grid_radius_arcsec, npix) _grid_y = np.linspace(-grid_radius_arcsec, grid_radius_arcsec, npix) grid_x_0, grid_y_0 = np.meshgrid(_grid_x, _grid_y) - return grid_x_0, grid_y_0, source_model, kwargs_source, grid_resolution, grid_radius_arcsec - - + return ( + grid_x_0, + grid_y_0, + source_model, + kwargs_source, + grid_resolution, + grid_radius_arcsec, + ) diff --git a/lenstronomy/Util/mask_util.py b/lenstronomy/Util/mask_util.py index 60fabcda6..efe57e9cd 100644 --- a/lenstronomy/Util/mask_util.py +++ b/lenstronomy/Util/mask_util.py @@ -3,6 +3,7 @@ from lenstronomy.Util import param_util from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @@ -20,8 +21,8 @@ def mask_center_2d(center_x, center_y, r, x_grid, y_grid): """ x_shift = x_grid - center_x y_shift = y_grid - center_y - R = np.sqrt(x_shift*x_shift + y_shift*y_shift) - mask = np.empty_like(R, dtype='int') + R = np.sqrt(x_shift * x_shift + y_shift * y_shift) + mask = np.empty_like(R, dtype="int") mask[R > r] = 1 mask[R <= r] = 0 return mask @@ -29,8 +30,7 @@ def mask_center_2d(center_x, center_y, r, x_grid, y_grid): @export def mask_azimuthal(x, y, center_x, center_y, r): - """ - azimuthal mask with =1 inside radius and =0 outside + """Azimuthal mask with =1 inside radius and =0 outside. :param x: x-coordinates (1d or 2d array numpy array) :param y: y-coordinates (1d or 2d array numpy array) @@ -42,8 +42,8 @@ def mask_azimuthal(x, y, center_x, center_y, r): """ x_shift = x - center_x y_shift = y - center_y - R = np.sqrt(x_shift*x_shift + y_shift*y_shift) - mask = np.empty_like(R, dtype='int') + R = np.sqrt(x_shift * x_shift + y_shift * y_shift) + mask = np.empty_like(R, dtype="int") mask[R > r] = 0 mask[R <= r] = 1 return mask @@ -67,7 +67,7 @@ def mask_ellipse(x, y, center_x, center_y, a, b, angle): y_shift = y - center_y x_rot, y_rot = util.rotate(x_shift, y_shift, angle) r_ab = x_rot**2 / a**2 + y_rot**2 / b**2 - mask = np.empty_like(r_ab, dtype='int') + mask = np.empty_like(r_ab, dtype="int") mask[r_ab > 1] = 0 mask[r_ab <= 1] = 1 return mask @@ -75,8 +75,7 @@ def mask_ellipse(x, y, center_x, center_y, a, b, angle): @export def mask_eccentric(x, y, center_x, center_y, e1, e2, r): - """ - elliptical mask with eccentricities as input + """Elliptical mask with eccentricities as input. :param x: x-coordinate array :param y: y-coordinate array @@ -89,15 +88,15 @@ def mask_eccentric(x, y, center_x, center_y, e1, e2, r): :rtype: array of size of input grid with integers 0 or 1 """ x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x, center_y) - r_ab = (x_**2 + y_**2) / r ** 2 - mask = np.empty_like(r_ab, dtype='int') + r_ab = (x_**2 + y_**2) / r**2 + mask = np.empty_like(r_ab, dtype="int") mask[r_ab > 1] = 0 mask[r_ab <= 1] = 1 return mask @export -def mask_half_moon(x, y, center_x, center_y, r_in, r_out, phi0=0, delta_phi=2*np.pi): +def mask_half_moon(x, y, center_x, center_y, r_in, r_out, phi0=0, delta_phi=2 * np.pi): """ :param x: @@ -113,11 +112,11 @@ def mask_half_moon(x, y, center_x, center_y, r_in, r_out, phi0=0, delta_phi=2*np """ x_shift = x - center_x y_shift = y - center_y - R = np.sqrt(x_shift*x_shift + y_shift*y_shift) + R = np.sqrt(x_shift * x_shift + y_shift * y_shift) phi = np.arctan2(x_shift, y_shift) - phi_min = phi0 - delta_phi/2. - phi_max = phi0 + delta_phi/2. - mask = np.zeros_like(x, dtype='int') + phi_min = phi0 - delta_phi / 2.0 + phi_max = phi0 + delta_phi / 2.0 + mask = np.zeros_like(x, dtype="int") if phi_max > phi_min: mask[(R < r_out) & (R > r_in) & (phi > phi_min) & (phi < phi_max)] = 1 else: @@ -127,8 +126,7 @@ def mask_half_moon(x, y, center_x, center_y, r_in, r_out, phi0=0, delta_phi=2*np def mask_shell(x, y, center_x, center_y, r_in, r_out): - """ - ring mask + """Ring mask. :param x: x-coordinate grid :param y: y-coordinate grid @@ -141,6 +139,6 @@ def mask_shell(x, y, center_x, center_y, r_in, r_out): x_shift = x - center_x y_shift = y - center_y r = np.sqrt(x_shift * x_shift + y_shift * y_shift) - mask = np.zeros_like(x, dtype='int') - mask[(r >= r_in) & (r< r_out)] = 1 - return mask \ No newline at end of file + mask = np.zeros_like(x, dtype="int") + mask[(r >= r_in) & (r < r_out)] = 1 + return mask diff --git a/lenstronomy/Util/multi_gauss_expansion.py b/lenstronomy/Util/multi_gauss_expansion.py index c5038445c..d7298c22a 100644 --- a/lenstronomy/Util/multi_gauss_expansion.py +++ b/lenstronomy/Util/multi_gauss_expansion.py @@ -8,6 +8,7 @@ import warnings from lenstronomy.Util.package_util import exporter from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian_func = Gaussian() export, __all__ = exporter() @@ -23,7 +24,7 @@ def gaussian(R, sigma, amp): :return: Gaussian function """ c = amp / (2 * np.pi * sigma**2) - return c * np.exp(-(R/float(sigma))**2/2.) + return c * np.exp(-((R / float(sigma)) ** 2) / 2.0) @export @@ -36,7 +37,9 @@ def mge_1d(r_array, flux_r, N=20, linspace=False): :return: amplitudes and Gaussian sigmas for the best 1d flux profile """ if N == 0: - warnings.warn('Number of MGE went down to zero! This should not happen!', Warning) + warnings.warn( + "Number of MGE went down to zero! This should not happen!", Warning + ) amplitudes = [0] sigmas = [1] norm = 0 @@ -61,23 +64,25 @@ def _mge_1d(r_array, flux_r, N=20, linspace=False): if linspace is True: sigmas = np.linspace(r_array[0], r_array[-1] / 2, N + 2)[1:-1] else: - sigmas = np.logspace(np.log10(r_array[0]), np.log10((r_array[-1] + 0.0000001) / 2.), N + 2)[1:-1] + sigmas = np.logspace( + np.log10(r_array[0]), np.log10((r_array[-1] + 0.0000001) / 2.0), N + 2 + )[1:-1] # sigmas = np.linspace(r_array[0], r_array[-1]/2, N + 2)[1:-1] A = np.zeros((len(flux_r), N)) for j in np.arange(A.shape[1]): - A[:, j] = gaussian(r_array, sigmas[j], 1.) + A[:, j] = gaussian(r_array, sigmas[j], 1.0) amplitudes, norm = nnls(A, flux_r) return amplitudes, sigmas, norm @export def de_projection_3d(amplitudes, sigmas): - """ - de-projects a gaussian (or list of multiple Gaussians from a 2d projected to a 3d profile) - :param amplitudes: + """De-projects a gaussian (or list of multiple Gaussians from a 2d projected to a 3d + profile) :param amplitudes: + :param sigmas: :return: """ - amplitudes_3d = amplitudes / sigmas / np.sqrt(2*np.pi) + amplitudes_3d = amplitudes / sigmas / np.sqrt(2 * np.pi) return amplitudes_3d, sigmas diff --git a/lenstronomy/Util/numba_util.py b/lenstronomy/Util/numba_util.py index b9726ef70..86ba82c8b 100644 --- a/lenstronomy/Util/numba_util.py +++ b/lenstronomy/Util/numba_util.py @@ -12,12 +12,12 @@ """ numba_conf = config_loader.numba_conf() -nopython = numba_conf['nopython'] -cache = numba_conf['cache'] -parallel = numba_conf['parallel'] -numba_enabled = numba_conf['enable'] and not environ.get("NUMBA_DISABLE_JIT", False) -fastmath = numba_conf['fastmath'] -error_model = numba_conf['error_model'] +nopython = numba_conf["nopython"] +cache = numba_conf["cache"] +parallel = numba_conf["parallel"] +numba_enabled = numba_conf["enable"] and not environ.get("NUMBA_DISABLE_JIT", False) +fastmath = numba_conf["fastmath"] +error_model = numba_conf["error_model"] if numba_enabled: try: @@ -28,35 +28,68 @@ numba = None extending = None -__all__ = ['jit', 'overload', 'nan_to_num', 'nan_to_num_arr', 'nan_to_num_single'] +__all__ = ["jit", "overload", "nan_to_num", "nan_to_num_arr", "nan_to_num_single"] -def jit(nopython=nopython, cache=cache, parallel=parallel, fastmath=fastmath, error_model=error_model, inline='never'): +def jit( + nopython=nopython, + cache=cache, + parallel=parallel, + fastmath=fastmath, + error_model=error_model, + inline="never", +): if numba_enabled: + def wrapper(func): - return numba.jit(func, nopython=nopython, cache=cache, parallel=parallel, fastmath=fastmath, - error_model=error_model, inline=inline) + return numba.jit( + func, + nopython=nopython, + cache=cache, + parallel=parallel, + fastmath=fastmath, + error_model=error_model, + inline=inline, + ) + else: + def wrapper(func): return func + return wrapper -def overload(nopython=nopython, cache=cache, parallel=parallel, fastmath=fastmath, error_model=error_model): - """ - Wrapper around numba.generated_jit. Allows you to redirect a function to another based on its type - - see the Numba docs for more info +def overload( + nopython=nopython, + cache=cache, + parallel=parallel, + fastmath=fastmath, + error_model=error_model, +): + """Wrapper around numba.generated_jit. + + Allows you to redirect a function to another based on its type + - see the Numba docs for more info """ if numba_enabled: def wrapper(func): # TODO change to overload, but currently breaks tests with nopython - return numba.generated_jit(func, nopython=nopython, cache=cache, parallel=parallel, fastmath=fastmath, - error_model=error_model) + return numba.generated_jit( + func, + nopython=nopython, + cache=cache, + parallel=parallel, + fastmath=fastmath, + error_model=error_model, + ) # return extending.overload(func, jit_options={'nopython': nopython, 'cache': cache, # 'parallel': parallel, # 'fastmath': fastmath, 'error_model': error_model}) + else: + def wrapper(func): return func @@ -64,22 +97,32 @@ def wrapper(func): @overload() -def nan_to_num(x, posinf=1e10, neginf=-1e10, nan=0.): - """ - Implements a Numba equivalent to np.nan_to_num (with copy=False!) array or scalar in Numba. - Behaviour is the same as np.nan_to_num with copy=False, although it only supports 1-dimensional arrays and - scalar inputs. +def nan_to_num(x, posinf=1e10, neginf=-1e10, nan=0.0): + """Implements a Numba equivalent to np.nan_to_num (with copy=False!) array or scalar + in Numba. + + Behaviour is the same as np.nan_to_num with copy=False, although it only supports + 1-dimensional arrays and scalar inputs. """ # The generated_jit part is necessary because of the need to support both arrays and scalars for all input # functions. - if ((numba_enabled and isinstance(x, numba.types.Array)) or isinstance(x, np.ndarray)) and x.ndim > 0: - return nan_to_num_arr if numba_enabled else nan_to_num_arr(x, posinf, neginf, nan) + if ( + (numba_enabled and isinstance(x, numba.types.Array)) + or isinstance(x, np.ndarray) + ) and x.ndim > 0: + return ( + nan_to_num_arr if numba_enabled else nan_to_num_arr(x, posinf, neginf, nan) + ) else: - return nan_to_num_single if numba_enabled else nan_to_num_single(x, posinf, neginf, nan) + return ( + nan_to_num_single + if numba_enabled + else nan_to_num_single(x, posinf, neginf, nan) + ) @jit() -def nan_to_num_arr(x, posinf=1e10, neginf=-1e10, nan=0.): +def nan_to_num_arr(x, posinf=1e10, neginf=-1e10, nan=0.0): """Part of the Numba implementation of np.nan_to_num - see nan_to_num""" for i in range(len(x)): if np.isnan(x[i]): @@ -93,7 +136,7 @@ def nan_to_num_arr(x, posinf=1e10, neginf=-1e10, nan=0.): @jit() -def nan_to_num_single(x, posinf=1e10, neginf=-1e10, nan=0.): +def nan_to_num_single(x, posinf=1e10, neginf=-1e10, nan=0.0): """Part of the Numba implementation of np.nan_to_num - see nan_to_num""" if np.isnan(x): return nan diff --git a/lenstronomy/Util/package_util.py b/lenstronomy/Util/package_util.py index 91cd1da00..582261656 100644 --- a/lenstronomy/Util/package_util.py +++ b/lenstronomy/Util/package_util.py @@ -7,7 +7,7 @@ def exporter(export_self=False): """ all_ = [] if export_self: - all_.append('exporter') + all_.append("exporter") def decorator(obj): all_.append(obj.__name__) @@ -55,18 +55,19 @@ def short(_laconic=False): for loader, module_name, is_pkg in pkgutil.walk_packages(lenstronomy.__path__): # This deep internal module relies heavily on 'multiprocessing', # which may not be installed - if module_name == 'Sampling.Pool.multiprocessing': + if module_name == "Sampling.Pool.multiprocessing": continue # Load the module - module = all_modules[module_name] = \ - loader.find_module(module_name).load_module(module_name) + module = all_modules[module_name] = loader.find_module(module_name).load_module( + module_name + ) - if '.' in module_name: + if "." in module_name: # Submodule, e.g. Data.psf # Monkeypatch the parent module to make it accessible - fragments = module_name.split('.') - parent_module_name, child_name = '.'.join(fragments[:-1]), fragments[-1] + fragments = module_name.split(".") + parent_module_name, child_name = ".".join(fragments[:-1]), fragments[-1] setattr(all_modules[parent_module_name], child_name, module) else: # Top-level module, e.g. Data: add as lenstronomy attribute @@ -75,14 +76,14 @@ def short(_laconic=False): if _laconic: # If the module defines an __all__, load its symbols as well. # (unlike import *, we do not just load everything if __all__ is missing) - if hasattr(module, '__all__'): + if hasattr(module, "__all__"): for symbol in module.__all__: symbol_name = symbol if isinstance(to_add.get(symbol), types.ModuleType): # Key class clashing with module name # (Cosmo, LensModel, LightModel, PointSource) # Try to add the symbol as LensModel_: - symbol_name = symbol + '_' + symbol_name = symbol + "_" if symbol_name in to_add: # Name clash! Do not add the symbol # or the one it clashed with. diff --git a/lenstronomy/Util/param_util.py b/lenstronomy/Util/param_util.py index 026245b8a..319a6b176 100644 --- a/lenstronomy/Util/param_util.py +++ b/lenstronomy/Util/param_util.py @@ -2,13 +2,14 @@ from lenstronomy.Util.numba_util import jit from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export def cart2polar(x, y, center_x=0, center_y=0): - """ - transforms cartesian coords [x,y] into polar coords [r,phi] in the frame of the lens center + """Transforms cartesian coords [x,y] into polar coords [r,phi] in the frame of the + lens center. :param x: set of x-coordinates :type x: array of size (n) @@ -18,19 +19,19 @@ def cart2polar(x, y, center_x=0, center_y=0): :type center_x: float :param center_y: rotation point :type center_y: float - :returns: array of same size with coords [r,phi] + :returns: array of same size with coords [r,phi] """ coord_shift_x = x - center_x coord_shift_y = y - center_y - r = np.sqrt(coord_shift_x**2+coord_shift_y**2) + r = np.sqrt(coord_shift_x**2 + coord_shift_y**2) phi = np.arctan2(coord_shift_y, coord_shift_x) return r, phi @export def polar2cart(r, phi, center): - """ - transforms polar coords [r,phi] into cartesian coords [x,y] in the frame of the lense center + """Transforms polar coords [r,phi] into cartesian coords [x,y] in the frame of the + lense center. :param r: radial coordinate (distance) to the center :type r: array of size n or float @@ -38,11 +39,11 @@ def polar2cart(r, phi, center): :type phi: array of size n or float :param center: rotation point :type center: array of size (2) - :returns: array of same size with coords [x,y] + :returns: array of same size with coords [x,y] :raises: AttributeError, KeyError """ - x = r*np.cos(phi) - y = r*np.sin(phi) + x = r * np.cos(phi) + y = r * np.sin(phi) return x - center[0], y - center[1] @@ -54,8 +55,8 @@ def shear_polar2cartesian(phi, gamma): :param gamma: shear strength :return: shear components gamma1, gamma2 """ - gamma1 = gamma*np.cos(2*phi) - gamma2 = gamma*np.sin(2*phi) + gamma1 = gamma * np.cos(2 * phi) + gamma2 = gamma * np.sin(2 * phi) return gamma1, gamma2 @@ -67,47 +68,45 @@ def shear_cartesian2polar(gamma1, gamma2): :return: shear angle, shear strength """ phi = np.arctan2(gamma2, gamma1) / 2 - gamma = np.sqrt(gamma1 ** 2 + gamma2 ** 2) + gamma = np.sqrt(gamma1**2 + gamma2**2) return phi, gamma @export @jit() def phi_q2_ellipticity(phi, q): - """ - transforms orientation angle and axis ratio into complex ellipticity moduli e1, e2 + """Transforms orientation angle and axis ratio into complex ellipticity moduli e1, + e2. :param phi: angle of orientation (in radian) :param q: axis ratio minor axis / major axis :return: eccentricities e1 and e2 in complex ellipticity moduli """ - e1 = (1. - q) / (1. + q) * np.cos(2 * phi) - e2 = (1. - q) / (1. + q) * np.sin(2 * phi) + e1 = (1.0 - q) / (1.0 + q) * np.cos(2 * phi) + e2 = (1.0 - q) / (1.0 + q) * np.sin(2 * phi) return e1, e2 @export @jit() def ellipticity2phi_q(e1, e2): - """ - transforms complex ellipticity moduli in orientation angle and axis ratio + """Transforms complex ellipticity moduli in orientation angle and axis ratio. :param e1: eccentricity in x-direction :param e2: eccentricity in xy-direction :return: angle in radian, axis ratio (minor/major) """ - phi = np.arctan2(e2, e1)/2 - c = np.sqrt(e1**2+e2**2) + phi = np.arctan2(e2, e1) / 2 + c = np.sqrt(e1**2 + e2**2) c = np.minimum(c, 0.9999) - q = (1-c)/(1+c) + q = (1 - c) / (1 + c) return phi, q @export def transform_e1e2_product_average(x, y, e1, e2, center_x, center_y): - """ - maps the coordinates x, y with eccentricities e1 e2 into a new elliptical coordinate system - such that R = sqrt(R_major * R_minor) + """Maps the coordinates x, y with eccentricities e1 e2 into a new elliptical + coordinate system such that R = sqrt(R_major * R_minor) :param x: x-coordinate :param y: y-coordinate @@ -131,9 +130,8 @@ def transform_e1e2_product_average(x, y, e1, e2, center_x, center_y): @export def transform_e1e2_square_average(x, y, e1, e2, center_x, center_y): - """ - maps the coordinates x, y with eccentricities e1 e2 into a new elliptical coordinate system - such that R = sqrt(R_major**2 + R_minor**2) + """Maps the coordinates x, y with eccentricities e1 e2 into a new elliptical + coordinate system such that R = sqrt(R_major**2 + R_minor**2) :param x: x-coordinate :param y: y-coordinate @@ -155,8 +153,7 @@ def transform_e1e2_square_average(x, y, e1, e2, center_x, center_y): def q2e(q): - """ - computes + """computes. .. math:: e = \\equic \\frac{1 - q^2}{1 + q^2} @@ -164,5 +161,5 @@ def q2e(q): :param q: axis ratio of minor to major axis :return: ellipticity e """ - e = abs(1 - q ** 2) / (1 + q ** 2) + e = abs(1 - q**2) / (1 + q**2) return e diff --git a/lenstronomy/Util/prob_density.py b/lenstronomy/Util/prob_density.py index ad662c46f..dc4704561 100644 --- a/lenstronomy/Util/prob_density.py +++ b/lenstronomy/Util/prob_density.py @@ -1,18 +1,18 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from scipy import stats import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export class SkewGaussian(object): - """ - class for the Skew Gaussian distribution - """ - def pdf(self, x, e=0., w=1., a=0.): + """Class for the Skew Gaussian distribution.""" + + def pdf(self, x, e=0.0, w=1.0, a=0.0): """ probability density function see: https://en.wikipedia.org/wiki/Skew_normal_distribution @@ -23,12 +23,11 @@ def pdf(self, x, e=0., w=1., a=0.): :param a: :return: """ - t = (x-e) / w - return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t) + t = (x - e) / w + return 2.0 / w * stats.norm.pdf(t) * stats.norm.cdf(a * t) def pdf_skew(self, x, mu, sigma, skw): - """ - function with different parameterisation + """Function with different parameterisation. :param x: :param mu: mean @@ -48,9 +47,9 @@ def _delta_skw(self, skw): :param skw: skewness parameter :return: delta """ - skw_23 = np.abs(skw)**(2./3) - delta2 = skw_23*np.pi/2 / (skw_23 + ((4-np.pi)/2)**(2./3)) - return np.sqrt(delta2)*skw/np.abs(skw) + skw_23 = np.abs(skw) ** (2.0 / 3) + delta2 = skw_23 * np.pi / 2 / (skw_23 + ((4 - np.pi) / 2) ** (2.0 / 3)) + return np.sqrt(delta2) * skw / np.abs(skw) def _alpha_delta(self, delta): """ @@ -58,18 +57,17 @@ def _alpha_delta(self, delta): :param delta: delta parameter :return: alpha (a) """ - return delta/np.sqrt(1-delta**2) + return delta / np.sqrt(1 - delta**2) def _w_sigma_delta(self, sigma, delta): - """ - invert variance + """Invert variance. :param sigma: :param delta: :return: w parameter """ - sigma2=sigma**2 - w2 = sigma2/(1-2*delta**2/np.pi) + sigma2 = sigma**2 + w2 = sigma2 / (1 - 2 * delta**2 / np.pi) w = np.sqrt(w2) return w @@ -81,12 +79,11 @@ def _e_mu_w_delta(self, mu, w, delta): :param delta: :return: epsilon (e) """ - e = mu - w*delta*np.sqrt(2/np.pi) + e = mu - w * delta * np.sqrt(2 / np.pi) return e def map_mu_sigma_skw(self, mu, sigma, skw): - """ - map to parameters e, w, a + """Map to parameters e, w, a. :param mu: mean :param sigma: standard deviation @@ -102,9 +99,8 @@ def map_mu_sigma_skw(self, mu, sigma, skw): @export class KDE1D(object): - """ - class that allows to compute likelihoods based on a 1-d posterior sample - """ + """Class that allows to compute likelihoods based on a 1-d posterior sample.""" + def __init__(self, values): """ @@ -126,37 +122,44 @@ def likelihood(self, x): @export def compute_lower_upper_errors(sample, num_sigma=1): - """ - computes the upper and lower sigma from the median value. - This functions gives good error estimates for skewed pdf's + """Computes the upper and lower sigma from the median value. This functions gives + good error estimates for skewed pdf's. :param sample: 1-D sample :param num_sigma: integer, number of sigmas to be returned :return: median, lower_sigma, upper_sigma """ if num_sigma > 3: - raise ValueError("Number of sigma-constraints restricted to three. %s not valid" % num_sigma) + raise ValueError( + "Number of sigma-constraints restricted to three. %s not valid" % num_sigma + ) num = len(sample) - num_threshold1 = int(round((num-1)*0.841345)) - num_threshold2 = int(round((num-1)*0.977249868)) - num_threshold3 = int(round((num-1)*0.998650102)) + num_threshold1 = int(round((num - 1) * 0.841345)) + num_threshold2 = int(round((num - 1) * 0.977249868)) + num_threshold3 = int(round((num - 1) * 0.998650102)) median = np.median(sample) sorted_sample = np.sort(sample) if num_sigma > 0: - upper_sigma1 = sorted_sample[num_threshold1-1] - lower_sigma1 = sorted_sample[num-num_threshold1-1] + upper_sigma1 = sorted_sample[num_threshold1 - 1] + lower_sigma1 = sorted_sample[num - num_threshold1 - 1] else: return median, [[]] if num_sigma > 1: - upper_sigma2 = sorted_sample[num_threshold2-1] - lower_sigma2 = sorted_sample[num-num_threshold2-1] + upper_sigma2 = sorted_sample[num_threshold2 - 1] + lower_sigma2 = sorted_sample[num - num_threshold2 - 1] else: - return median, [[median-lower_sigma1, upper_sigma1-median]] + return median, [[median - lower_sigma1, upper_sigma1 - median]] if num_sigma > 2: - upper_sigma3 = sorted_sample[num_threshold3-1] - lower_sigma3 = sorted_sample[num-num_threshold3-1] - return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median], - [median-lower_sigma3, upper_sigma3-median]] + upper_sigma3 = sorted_sample[num_threshold3 - 1] + lower_sigma3 = sorted_sample[num - num_threshold3 - 1] + return median, [ + [median - lower_sigma1, upper_sigma1 - median], + [median - lower_sigma2, upper_sigma2 - median], + [median - lower_sigma3, upper_sigma3 - median], + ] else: - return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median]] + return median, [ + [median - lower_sigma1, upper_sigma1 - median], + [median - lower_sigma2, upper_sigma2 - median], + ] diff --git a/lenstronomy/Util/sampling_util.py b/lenstronomy/Util/sampling_util.py index f0df2eb25..eec396db1 100644 --- a/lenstronomy/Util/sampling_util.py +++ b/lenstronomy/Util/sampling_util.py @@ -1,37 +1,34 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import numpy as np from scipy import stats from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() # transform the unit hypercube to pysical parameters for (nested) sampling + @export def unit2uniform(x, vmin, vmax): - """ - mapping from uniform distribution on parameter space - to uniform distribution on unit hypercube - """ + """Mapping from uniform distribution on parameter space to uniform distribution on + unit hypercube.""" return vmin + (vmax - vmin) * x @export def uniform2unit(theta, vmin, vmax): - """ - mapping from uniform distribution on unit hypercube - to uniform distribution on parameter space - """ + """Mapping from uniform distribution on unit hypercube to uniform distribution on + parameter space.""" return (theta - vmin) / (vmax - vmin) @export def cube2args_uniform(cube, lowers, uppers, num_dims, copy=False): - """ - mapping from uniform distribution on unit hypercube 'cube' - to uniform distribution on parameter space + """Mapping from uniform distribution on unit hypercube 'cube' to uniform + distribution on parameter space. :param cube: list or 1D-array of parameter values on unit hypercube :param lowers: lower bounds for each parameter @@ -52,10 +49,8 @@ def cube2args_uniform(cube, lowers, uppers, num_dims, copy=False): @export def cube2args_gaussian(cube, lowers, uppers, means, sigmas, num_dims, copy=False): - """ - mapping from uniform distribution on unit hypercube 'cube' - to truncated gaussian distribution on parameter space, - with mean 'mu' and std dev 'sigma' + """Mapping from uniform distribution on unit hypercube 'cube' to truncated gaussian + distribution on parameter space, with mean 'mu' and std dev 'sigma'. :param cube: list or 1D-array of parameter values on unit hypercube :param lowers: lower bounds for each parameter @@ -69,8 +64,10 @@ def cube2args_gaussian(cube, lowers, uppers, means, sigmas, num_dims, copy=False if copy: cube_ = cube cube = np.zeros_like(cube_) - a, b = (np.array(lowers)-means)/sigmas, (np.array(uppers)-means)/sigmas - cube[:] = stats.truncnorm.ppf(cube_ if copy else cube, a=a, b=b, loc=means, scale=sigmas) + a, b = (np.array(lowers) - means) / sigmas, (np.array(uppers) - means) / sigmas + cube[:] = stats.truncnorm.ppf( + cube_ if copy else cube, a=a, b=b, loc=means, scale=sigmas + ) return cube @@ -79,48 +76,60 @@ def scale_limits(lowers, uppers, scale): if not isinstance(lowers, np.ndarray): lowers = np.asarray(lowers) uppers = np.asarray(uppers) - mid_points = (lowers + uppers) / 2. + mid_points = (lowers + uppers) / 2.0 widths_scaled = (uppers - lowers) * scale - lowers_scaled = mid_points - widths_scaled / 2. - uppers_scaled = mid_points + widths_scaled / 2. + lowers_scaled = mid_points - widths_scaled / 2.0 + uppers_scaled = mid_points + widths_scaled / 2.0 return lowers_scaled, uppers_scaled @export -def sample_ball(p0, std, size=1, dist='uniform'): - """ - Produce a ball of walkers around an initial parameter value. - this routine is from the emcee package as it became deprecated there +def sample_ball(p0, std, size=1, dist="uniform"): + """Produce a ball of walkers around an initial parameter value. this routine is from + the emcee package as it became deprecated there. :param p0: The initial parameter values (array). :param std: The axis-aligned standard deviation (array). :param size: The number of samples to produce. - :param dist: string, specifies the distribution being sampled, supports 'uniform' and 'normal' - + :param dist: string, specifies the distribution being sampled, supports 'uniform' + and 'normal' """ - assert(len(p0) == len(std)) - if dist == 'uniform': - return np.vstack([p0 + std * np.random.uniform(low=-1, high=1, size=len(p0)) - for i in range(size)]) - elif dist == 'normal': - return np.vstack([p0 + std * np.random.normal(loc=0, scale=1, size=len(p0)) - for i in range(size)]) + assert len(p0) == len(std) + if dist == "uniform": + return np.vstack( + [ + p0 + std * np.random.uniform(low=-1, high=1, size=len(p0)) + for i in range(size) + ] + ) + elif dist == "normal": + return np.vstack( + [ + p0 + std * np.random.normal(loc=0, scale=1, size=len(p0)) + for i in range(size) + ] + ) else: - raise ValueError('distribution %s not supported. Chose among "uniform" or "normal".' % dist) + raise ValueError( + 'distribution %s not supported. Chose among "uniform" or "normal".' % dist + ) @export def sample_ball_truncated(mean, sigma, lower_limit, upper_limit, size): - """ - samples gaussian ball with truncation at lower and upper limit of the distribution + """Samples gaussian ball with truncation at lower and upper limit of the + distribution. :param mean: numpy array, mean of the distribution to be sampled :param sigma: numpy array, sigma of the distribution to be sampled :param lower_limit: numpy array, lower bound of to be sampled distribution :param upper_limit: numpy array, upper bound of to be sampled distribution :param size: number of tuples to be sampled - :return: realization of truncated normal distribution with shape (size, dim(parameters)) + :return: realization of truncated normal distribution with shape (size, + dim(parameters)) """ a, b = (lower_limit - mean) / sigma, (upper_limit - mean) / sigma - draws = np.vstack([mean + sigma * stats.truncnorm.rvs(a, b, size=len(a)) for i in range(size)]) + draws = np.vstack( + [mean + sigma * stats.truncnorm.rvs(a, b, size=len(a)) for i in range(size)] + ) return draws diff --git a/lenstronomy/Util/simulation_util.py b/lenstronomy/Util/simulation_util.py index e14cb6f38..792a44fe9 100644 --- a/lenstronomy/Util/simulation_util.py +++ b/lenstronomy/Util/simulation_util.py @@ -4,14 +4,21 @@ import numpy as np from lenstronomy.Util.package_util import exporter + export, __all__ = exporter() @export -def data_configure_simple(numPix, deltaPix, exposure_time=None, background_rms=None, center_ra=0, center_dec=0, - inverse=False): - """ - configures the data keyword arguments with a coordinate grid centered at zero. +def data_configure_simple( + numPix, + deltaPix, + exposure_time=None, + background_rms=None, + center_ra=0, + center_dec=0, + inverse=False, +): + """Configures the data keyword arguments with a coordinate grid centered at zero. :param numPix: number of pixel (numPix x numPix) :param deltaPix: pixel size (in angular units) @@ -20,26 +27,54 @@ def data_configure_simple(numPix, deltaPix, exposure_time=None, background_rms=N :param center_ra: RA at the center of the image :param center_dec: DEC at the center of the image :param inverse: if True, coordinate system is ra to the left, if False, to the right - :return: keyword arguments that can be used to construct a Data() class instance of lenstronomy + :return: keyword arguments that can be used to construct a Data() class instance of + lenstronomy """ - + # 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, center_ra=center_ra, center_dec=center_dec, subgrid_res=1, inverse=inverse) + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix=numPix, + deltapix=deltaPix, + center_ra=center_ra, + center_dec=center_dec, + subgrid_res=1, + inverse=inverse, + ) # mask (1= model this pixel, 0= leave blanck) # exposure_map = np.ones((numPix, numPix)) * exposure_time # individual exposure time/weight per pixel kwargs_data = { - 'background_rms': background_rms, - 'exposure_time': exposure_time - , 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord - , 'image_data': np.zeros((numPix, numPix)) - } + "background_rms": background_rms, + "exposure_time": exposure_time, + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + "transform_pix2angle": Mpix2coord, + "image_data": np.zeros((numPix, numPix)), + } return kwargs_data @export -def simulate_simple(image_model_class, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - no_noise=False, source_add=True, lens_light_add=True, point_source_add=True): +def simulate_simple( + image_model_class, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + no_noise=False, + source_add=True, + lens_light_add=True, + point_source_add=True, +): """ :param image_model_class: @@ -54,11 +89,23 @@ def simulate_simple(image_model_class, kwargs_lens=None, kwargs_source=None, kwa :return: """ - image = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_add=source_add, lens_light_add=lens_light_add, point_source_add=point_source_add) + image = image_model_class.image( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + source_add=source_add, + lens_light_add=lens_light_add, + point_source_add=point_source_add, + ) # add noise if no_noise: return image else: - poisson = image_util.add_poisson(image, exp_time=image_model_class.Data.exposure_map) - bkg = image_util.add_background(image, sigma_bkd=image_model_class.Data.background_rms) + poisson = image_util.add_poisson( + image, exp_time=image_model_class.Data.exposure_map + ) + bkg = image_util.add_background( + image, sigma_bkd=image_model_class.Data.background_rms + ) return image + bkg + poisson diff --git a/lenstronomy/Util/util.py b/lenstronomy/Util/util.py index 9c3e97c8e..90bc3cba0 100644 --- a/lenstronomy/Util/util.py +++ b/lenstronomy/Util/util.py @@ -1,5 +1,4 @@ -__author__ = 'Simon Birrer' - +__author__ = "Simon Birrer" """ this file contains standard routines """ @@ -14,10 +13,8 @@ @export def merge_dicts(*dict_args): - """ - Given any number of dicts, shallow copy and merge into a new dict, - precedence goes to key value pairs in latter dicts. - """ + """Given any number of dicts, shallow copy and merge into a new dict, precedence + goes to key value pairs in latter dicts.""" result = {} for dictionary in dict_args: result.update(dictionary) @@ -29,9 +26,9 @@ def approx_theta_E(ximg, yimg): dis = [] xinds, yinds = [0, 0, 0, 1, 1, 2], [1, 2, 3, 2, 3, 3] - for (i, j) in zip(xinds, yinds): + for i, j in zip(xinds, yinds): dx, dy = ximg[i] - ximg[j], yimg[i] - yimg[j] - dr = (dx ** 2 + dy ** 2) ** 0.5 + dr = (dx**2 + dy**2) ** 0.5 dis.append(dr) dis = np.array(dis) @@ -70,7 +67,7 @@ def sort_image_index(ximg, yimg, xref, yref): for j in range(0, int(len(x_self[0]))): dr += (x_self[i][j] - xref[j]) ** 2 + (y_self[i][j] - yref[j]) ** 2 - delta_r.append(dr ** .5) + delta_r.append(dr**0.5) min_indexes = np.array(index_iterations[np.argmin(delta_r)]) @@ -87,14 +84,15 @@ def rotate(xcoords, ycoords, angle): :param angle: angle in radians :return: x points and y points rotated ccw by angle theta """ - return xcoords * np.cos(angle) + ycoords * np.sin(angle), -xcoords * np.sin(angle) + ycoords * np.cos(angle) + return xcoords * np.cos(angle) + ycoords * np.sin(angle), -xcoords * np.sin( + angle + ) + ycoords * np.cos(angle) @export def map_coord2pix(ra, dec, x_0, y_0, M): - """ - this routines performs a linear transformation between two coordinate systems. Mainly used to transform angular - into pixel coordinates in an image + """This routines performs a linear transformation between two coordinate systems. + Mainly used to transform angular into pixel coordinates in an image. :param ra: ra coordinates :param dec: dec coordinates @@ -109,19 +107,21 @@ def map_coord2pix(ra, dec, x_0, y_0, M): @export def array2image(array, nx=0, ny=0): - """ - returns the information contained in a 1d array into an n*n 2d array - (only works when length of array is n**2, or nx and ny are provided) + """Returns the information contained in a 1d array into an n*n 2d array (only works + when length of array is n**2, or nx and ny are provided) :param array: image values :type array: array of size n**2 - :returns: 2d array + :returns: 2d array :raises: AttributeError, KeyError """ if nx == 0 or ny == 0: n = int(np.sqrt(len(array))) - if n ** 2 != len(array): - raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(array))) + if n**2 != len(array): + raise ValueError( + "lenght of input array given as %s is not square of integer number!" + % (len(array)) + ) nx, ny = n, n image = array.reshape(int(nx), int(ny)) return image @@ -129,12 +129,11 @@ def array2image(array, nx=0, ny=0): @export def image2array(image): - """ - returns the information contained in a 2d array into an n*n 1d array + """Returns the information contained in a 2d array into an n*n 1d array. :param image: image values :type image: array of size (n,n) - :returns: 1d array + :returns: 1d array :raises: AttributeError, KeyError """ nx, ny = image.shape # find the size of the array @@ -144,8 +143,8 @@ def image2array(image): @export def array2cube(array, n_1, n_23): - """ - returns the information contained in a 1d array of shape (n_1*n_23*n_23) into 3d array with shape (n_1, sqrt(n_23), sqrt(n_23)) + """Returns the information contained in a 1d array of shape (n_1*n_23*n_23) into 3d + array with shape (n_1, sqrt(n_23), sqrt(n_23)) :param array: image values :type array: 1d array @@ -157,8 +156,10 @@ def array2cube(array, n_1, n_23): :raises ValueError: when n_23 is not a perfect square """ n = int(np.sqrt(n_23)) - if n ** 2 != n_23: - raise ValueError("2nd and 3rd dims (%s) are not square of integer number!" % n_23) + if n**2 != n_23: + raise ValueError( + "2nd and 3rd dims (%s) are not square of integer number!" % n_23 + ) n_2, n_3 = n, n cube = array.reshape(n_1, n_2, n_3) return cube @@ -166,8 +167,8 @@ def array2cube(array, n_1, n_23): @export def cube2array(cube): - """ - returns the information contained in a 3d array of shape (n_1, n_2, n_3) into 1d array with shape (n_1*n_2*n_3) + """Returns the information contained in a 3d array of shape (n_1, n_2, n_3) into 1d + array with shape (n_1*n_2*n_3) :param cube: image values :type cube: 3d array @@ -180,13 +181,11 @@ def cube2array(cube): @export def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False): - """ - creates pixel grid (in 1d arrays of x- and y- positions) - default coordinate frame is such that (0,0) is in the center of the coordinate grid + """Creates pixel grid (in 1d arrays of x- and y- positions) default coordinate frame + is such that (0,0) is in the center of the coordinate grid. - :param numPix: number of pixels per axis - Give an integers for a square grid, or a 2-length sequence - (first, second axis length) for a non-square grid. + :param numPix: number of pixels per axis Give an integers for a square grid, or a + 2-length sequence (first, second axis length) for a non-square grid. :param deltapix: pixel size :param subgrid_res: sub-pixel resolution (default=1) :return: x, y position information in two 1d arrays @@ -215,7 +214,7 @@ def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False): if left_lower is True: # Shift so (0, 0) is in the "lower left" # Note this does not shift when subgrid_res = 1 - shift = -1. / 2 + 1. / (2 * subgrid_res) * np.array([1, 1]) + shift = -1.0 / 2 + 1.0 / (2 * subgrid_res) * np.array([1, 1]) else: # Shift so (0, 0) is centered shift = deltapix_eff * (numPix_eff - 1) / 2 @@ -225,8 +224,7 @@ def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False): @export def make_grid_transformed(numPix, Mpix2Angle): - """ - returns grid with linear transformation (deltaPix and rotation) + """Returns grid with linear transformation (deltaPix and rotation) :param numPix: number of Pixels :param Mpix2Angle: 2-by-2 matrix to mat a pixel to a coordinate @@ -238,10 +236,17 @@ def make_grid_transformed(numPix, Mpix2Angle): @export -def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, center_dec=0, left_lower=False, - inverse=True): - """ - same as make_grid routine, but returns the transformation matrix and shift between coordinates and pixel +def make_grid_with_coordtransform( + numPix, + deltapix, + subgrid_res=1, + center_ra=0, + center_dec=0, + left_lower=False, + inverse=True, +): + """Same as make_grid routine, but returns the transformation matrix and shift + between coordinates and pixel. :param numPix: number of pixels per axis :param deltapix: pixel scale per axis @@ -250,7 +255,8 @@ def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, :param center_dec: center of the grid :param left_lower: sets the zero point at the lower left corner of the pixels :param inverse: bool, if true sets East as left, otherwise East is righrt - :return: ra_grid, dec_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix + :return: ra_grid, dec_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, + Mpix2coord, Mcoord2pix """ numPix_eff = numPix * subgrid_res deltapix_eff = deltapix / float(subgrid_res) @@ -264,9 +270,9 @@ def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, ra_grid = matrix[:, 0] * delta_x dec_grid = matrix[:, 1] * deltapix_eff else: - ra_grid = (matrix[:, 0] - (numPix_eff - 1) / 2.) * delta_x - dec_grid = (matrix[:, 1] - (numPix_eff - 1) / 2.) * deltapix_eff - shift = (subgrid_res - 1) / (2. * subgrid_res) * deltapix + ra_grid = (matrix[:, 0] - (numPix_eff - 1) / 2.0) * delta_x + dec_grid = (matrix[:, 1] - (numPix_eff - 1) / 2.0) * deltapix_eff + shift = (subgrid_res - 1) / (2.0 * subgrid_res) * deltapix ra_grid += -shift + center_ra dec_grid += -shift + center_dec ra_at_xy_0 = ra_grid[0] @@ -274,18 +280,29 @@ def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, Mpix2coord = np.array([[delta_x, 0], [0, deltapix_eff]]) Mcoord2pix = np.linalg.inv(Mpix2coord) - x_at_radec_0, y_at_radec_0 = map_coord2pix(-ra_at_xy_0, -dec_at_xy_0, x_0=0, y_0=0, M=Mcoord2pix) - return ra_grid, dec_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix + x_at_radec_0, y_at_radec_0 = map_coord2pix( + -ra_at_xy_0, -dec_at_xy_0, x_0=0, y_0=0, M=Mcoord2pix + ) + return ( + ra_grid, + dec_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) @export def grid_from_coordinate_transform(nx, ny, Mpix2coord, ra_at_xy_0, dec_at_xy_0): - """ - return a grid in x and y coordinates that satisfy the coordinate system + """Return a grid in x and y coordinates that satisfy the coordinate system. :param nx: number of pixels in x-axis :param ny: number of pixels in y-axis - :param Mpix2coord: transformation matrix (2x2) of pixels into coordinate displacements + :param Mpix2coord: transformation matrix (2x2) of pixels into coordinate + displacements :param ra_at_xy_0: RA coordinate at (x,y) = (0,0) :param dec_at_xy_0: DEC coordinate at (x,y) = (0,0) :return: RA coordinate grid, DEC coordinate grid @@ -302,16 +319,18 @@ def grid_from_coordinate_transform(nx, ny, Mpix2coord, ra_at_xy_0, dec_at_xy_0): @export def get_axes(x, y): - """ - computes the axis x and y of a given 2d grid + """Computes the axis x and y of a given 2d grid. :param x: :param y: :return: """ n = int(np.sqrt(len(x))) - if n ** 2 != len(x): - raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x))) + if n**2 != len(x): + raise ValueError( + "lenght of input array given as %s is not square of integer number!" + % (len(x)) + ) x_image = x.reshape(n, n) y_image = y.reshape(n, n) x_axes = x_image[0, :] @@ -321,25 +340,28 @@ def get_axes(x, y): @export def averaging(grid, numGrid, numPix): - """ - resize 2d pixel grid with numGrid to numPix and averages over the pixels + """Resize 2d pixel grid with numGrid to numPix and averages over the pixels. :param grid: higher resolution pixel grid :param numGrid: number of pixels per axis in the high resolution input image - :param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number) + :param numPix: lower number of pixels per axis in the output image (numGrid/numPix + is integer number) :return: averaged pixel grid """ Nbig = numGrid Nsmall = numPix - small = grid.reshape([int(Nsmall), int(Nbig / Nsmall), int(Nsmall), int(Nbig / Nsmall)]).mean(3).mean(1) + small = ( + grid.reshape([int(Nsmall), int(Nbig / Nsmall), int(Nsmall), int(Nbig / Nsmall)]) + .mean(3) + .mean(1) + ) return small @export def displaceAbs(x, y, sourcePos_x, sourcePos_y): - """ - calculates a grid of distances to the observer in angel + """Calculates a grid of distances to the observer in angel. :param x: cartesian coordinates :type x: numpy array @@ -349,12 +371,12 @@ def displaceAbs(x, y, sourcePos_x, sourcePos_y): :type sourcePos_x: float :param sourcePos_y: source position :type sourcePos_y: float - :returns: array of displacement + :returns: array of displacement :raises: AttributeError, KeyError """ x_mapped = x - sourcePos_x y_mapped = y - sourcePos_y - absmapped = np.sqrt(x_mapped ** 2 + y_mapped ** 2) + absmapped = np.sqrt(x_mapped**2 + y_mapped**2) return absmapped @@ -369,7 +391,7 @@ def get_distance(x_mins, y_mins, x_true, y_true): :return: """ if len(x_mins) != len(x_true): - return 10 ** 10 + return 10**10 dist = 0 x_true_list = np.array(x_true) y_true_list = np.array(y_true) @@ -398,14 +420,13 @@ def compare_distance(x_mapped, y_mapped): for j in range(i + 1, len(x_mapped)): dx = x_mapped[i] - x_mapped[j] dy = y_mapped[i] - y_mapped[j] - X2 += dx ** 2 + dy ** 2 + X2 += dx**2 + dy**2 return X2 @export def min_square_dist(x_1, y_1, x_2, y_2): - """ - return minimum of quadratic distance of pairs (x1, y1) to pairs (x2, y2) + """Return minimum of quadratic distance of pairs (x1, y1) to pairs (x2, y2) :param x_1: :param y_1: @@ -432,12 +453,14 @@ def selectBest(array, criteria, numSelect, highest=True): n = len(array) m = len(criteria) if n != m: - raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m)) + raise ValueError( + "Elements in array (%s) not equal to elements in criteria (%s)" % (n, m) + ) if n < numSelect: return array array_sorted = array[criteria.argsort()] if highest: - result = array_sorted[n - numSelect:] + result = array_sorted[n - numSelect :] else: result = array_sorted[0:numSelect] return result[::-1] @@ -456,21 +479,22 @@ def select_best(array, criteria, num_select, highest=True): n = len(array) m = len(criteria) if n != m: - raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m)) + raise ValueError( + "Elements in array (%s) not equal to elements in criteria (%s)" % (n, m) + ) if n < num_select: return array array = np.array(array) if highest is True: indexes = criteria.argsort()[::-1][:num_select] else: - indexes = criteria.argsort()[::-1][n - num_select:] + indexes = criteria.argsort()[::-1][n - num_select :] return array[indexes] @export def points_on_circle(radius, num_points, connect_ends=True): - """ - returns a set of uniform points around a circle + """Returns a set of uniform points around a circle. :param radius: radius of the circle :param num_points: number of points on the circle @@ -480,17 +504,17 @@ def points_on_circle(radius, num_points, connect_ends=True): if connect_ends: angle = np.linspace(0, 2 * np.pi, num_points) else: - angle = np.linspace(0, 2 * np.pi * (1 - 1./num_points), num_points) + angle = np.linspace(0, 2 * np.pi * (1 - 1.0 / num_points), num_points) x_coord = np.cos(angle) * radius y_coord = np.sin(angle) * radius return x_coord, y_coord + @export @jit() def local_minima_2d(a, x, y): - """ - finds (local) minima in a 2d grid - applies less rigid criteria for maximum without second-order tangential minima criteria + """Finds (local) minima in a 2d grid applies less rigid criteria for maximum without + second-order tangential minima criteria. :param a: 1d array of displacements from the source positions :type a: numpy array with length numPix**2 in float @@ -498,7 +522,7 @@ def local_minima_2d(a, x, y): :type x: numpy array with length numPix**2 in float :param y: 1d coordinate grid in x-direction :type y: numpy array with length numPix**2 in float - :returns: array of indices of local minima, values of those minima + :returns: array of indices of local minima, values of those minima :raises: AttributeError, KeyError """ dim = int(np.sqrt(len(a))) @@ -506,14 +530,16 @@ def local_minima_2d(a, x, y): x_mins = [] y_mins = [] for i in range(dim + 1, len(a) - dim - 1): - if (a[i] < a[i - 1] - and a[i] < a[i + 1] - and a[i] < a[i - dim] - and a[i] < a[i + dim] - and a[i] < a[i - (dim - 1)] - and a[i] < a[i - (dim + 1)] - and a[i] < a[i + (dim - 1)] - and a[i] < a[i + (dim + 1)]): + if ( + a[i] < a[i - 1] + and a[i] < a[i + 1] + and a[i] < a[i - dim] + and a[i] < a[i + dim] + and a[i] < a[i - (dim - 1)] + and a[i] < a[i - (dim + 1)] + and a[i] < a[i + (dim - 1)] + and a[i] < a[i + (dim + 1)] + ): x_mins.append(x[i]) y_mins.append(y[i]) values.append(a[i]) @@ -523,25 +549,17 @@ def local_minima_2d(a, x, y): @export @jit() def neighborSelect(a, x, y): - """ - #TODO replace by from scipy.signal import argrelextrema for speed up - >>> from scipy.signal import argrelextrema - >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) - >>> argrelextrema(x, np.greater) - (array([3, 6]),) - >>> y = np.array([[1, 2, 1, 2], - ... [2, 2, 0, 0], - ... [5, 3, 4, 4]]) - ... - >>> argrelextrema(y, np.less, axis=1) - (array([0, 2]), array([2, 1])) - + """#TODO replace by from scipy.signal import argrelextrema for speed up >>> from + scipy.signal import argrelextrema >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> + argrelextrema(x, np.greater) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... + [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelextrema(y, np.less, + axis=1) (array([0, 2]), array([2, 1])) finds (local) minima in a 2d grid :param a: 1d array of displacements from the source positions :type a: numpy array with length numPix**2 in float - :returns: array of indices of local minima, values of those minima + :returns: array of indices of local minima, values of those minima :raises: AttributeError, KeyError """ dim = int(np.sqrt(len(a))) @@ -549,26 +567,30 @@ def neighborSelect(a, x, y): x_mins = [] y_mins = [] for i in range(dim + 1, len(a) - dim - 1): - if (a[i] < a[i - 1] - and a[i] < a[i + 1] - and a[i] < a[i - dim] - and a[i] < a[i + dim] - and a[i] < a[i - (dim - 1)] - and a[i] < a[i - (dim + 1)] - and a[i] < a[i + (dim - 1)] - and a[i] < a[i + (dim + 1)]): - if (a[i] < a[(i - 2 * dim - 1) % dim ** 2] - and a[i] < a[(i - 2 * dim + 1) % dim ** 2] - and a[i] < a[(i - dim - 2) % dim ** 2] - and a[i] < a[(i - dim + 2) % dim ** 2] - and a[i] < a[(i + dim - 2) % dim ** 2] - and a[i] < a[(i + dim + 2) % dim ** 2] - and a[i] < a[(i + 2 * dim - 1) % dim ** 2] - and a[i] < a[(i + 2 * dim + 1) % dim ** 2] - and a[i] < a[(i + 2 * dim) % dim ** 2] - and a[i] < a[(i - 2 * dim) % dim ** 2] - and a[i] < a[(i - 2) % dim ** 2] - and a[i] < a[(i + 2) % dim ** 2]): + if ( + a[i] < a[i - 1] + and a[i] < a[i + 1] + and a[i] < a[i - dim] + and a[i] < a[i + dim] + and a[i] < a[i - (dim - 1)] + and a[i] < a[i - (dim + 1)] + and a[i] < a[i + (dim - 1)] + and a[i] < a[i + (dim + 1)] + ): + if ( + a[i] < a[(i - 2 * dim - 1) % dim**2] + and a[i] < a[(i - 2 * dim + 1) % dim**2] + and a[i] < a[(i - dim - 2) % dim**2] + and a[i] < a[(i - dim + 2) % dim**2] + and a[i] < a[(i + dim - 2) % dim**2] + and a[i] < a[(i + dim + 2) % dim**2] + and a[i] < a[(i + 2 * dim - 1) % dim**2] + and a[i] < a[(i + 2 * dim + 1) % dim**2] + and a[i] < a[(i + 2 * dim) % dim**2] + and a[i] < a[(i - 2 * dim) % dim**2] + and a[i] < a[(i - 2) % dim**2] + and a[i] < a[(i + 2) % dim**2] + ): x_mins.append(x[i]) y_mins.append(y[i]) values.append(a[i]) @@ -622,8 +644,7 @@ def hyper2F2_array(a, b, c, d, x): @export def make_subgrid(ra_coord, dec_coord, subgrid_res=2): - """ - return a grid with subgrid resolution + """Return a grid with subgrid resolution. :param ra_coord: :param dec_coord: @@ -642,14 +663,18 @@ def make_subgrid(ra_coord, dec_coord, subgrid_res=2): dec_array_new = np.zeros((n * subgrid_res, n * subgrid_res)) for i in range(0, subgrid_res): for j in range(0, subgrid_res): - ra_array_new[i::subgrid_res, j::subgrid_res] = ra_array + d_ra_x * ( - -1 / 2. + 1 / (2. * subgrid_res) + j / float(subgrid_res)) + d_ra_y * ( - -1 / 2. + 1 / (2. * subgrid_res) + i / float( - subgrid_res)) - dec_array_new[i::subgrid_res, j::subgrid_res] = dec_array + d_dec_x * ( - -1 / 2. + 1 / (2. * subgrid_res) + j / float(subgrid_res)) + d_dec_y * ( - -1 / 2. + 1 / (2. * subgrid_res) + i / float( - subgrid_res)) + ra_array_new[i::subgrid_res, j::subgrid_res] = ( + ra_array + + d_ra_x * (-1 / 2.0 + 1 / (2.0 * subgrid_res) + j / float(subgrid_res)) + + d_ra_y * (-1 / 2.0 + 1 / (2.0 * subgrid_res) + i / float(subgrid_res)) + ) + dec_array_new[i::subgrid_res, j::subgrid_res] = ( + dec_array + + d_dec_x + * (-1 / 2.0 + 1 / (2.0 * subgrid_res) + j / float(subgrid_res)) + + d_dec_y + * (-1 / 2.0 + 1 / (2.0 * subgrid_res) + i / float(subgrid_res)) + ) ra_coords_sub = image2array(ra_array_new) dec_coords_sub = image2array(dec_array_new) @@ -658,13 +683,12 @@ def make_subgrid(ra_coord, dec_coord, subgrid_res=2): @export def convert_bool_list(n, k=None): - """ - returns a bool list of the length of the lens models + """Returns a bool list of the length of the lens models. - if k = None: returns bool list with True's - if k is int, returns bool list with False's but k'th is True - if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in the integers listed and False elsewhere - if k is a boolean list, checks for size to match the numbers of models and returns it + if k = None: returns bool list with True's if k is int, returns bool list with + False's but k'th is True if k is a list of int, e.g. [0, 3, 5], returns a bool list + with True's in the integers listed and False elsewhere if k is a boolean list, + checks for size to match the numbers of models and returns it :param n: integer, total length of output boolean list :param k: None, int, or list of ints @@ -679,8 +703,10 @@ def convert_bool_list(n, k=None): bool_list = [False] * n elif isinstance(k[0], bool): if n != len(k): - raise ValueError('length of selected lens models in format of boolean list is %s ' - 'and does not match the models of this class instance %s.' % (len(k), n)) + raise ValueError( + "length of selected lens models in format of boolean list is %s " + "and does not match the models of this class instance %s." % (len(k), n) + ) bool_list = k elif isinstance(k[0], (int, np.integer)): # list of integers bool_list = [False] * n @@ -691,15 +717,17 @@ def convert_bool_list(n, k=None): if k_i < n: bool_list[k_i] = True else: - raise ValueError("k as set by %s is not convertable in a bool string of length %s !" % (k, n)) + raise ValueError( + "k as set by %s is not convertable in a bool string of length %s !" + % (k, n) + ) else: - raise ValueError('input list k as %s not compatible' % k) + raise ValueError("input list k as %s not compatible" % k) return bool_list def area(vs): - """ - Use Green's theorem to compute the area enclosed by the given contour. + """Use Green's theorem to compute the area enclosed by the given contour. param vs: 2d array of vertices of a contour line return: area within contour line diff --git a/lenstronomy/Workflow/alignment_matching.py b/lenstronomy/Workflow/alignment_matching.py index 9138d3a7e..b13434580 100644 --- a/lenstronomy/Workflow/alignment_matching.py +++ b/lenstronomy/Workflow/alignment_matching.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import time import copy @@ -6,30 +6,51 @@ from lenstronomy.ImSim.MultiBand.single_band_multi_model import SingleBandMultiModel from lenstronomy.Sampling.Samplers.pso import ParticleSwarmOptimizer -__all__ = ['AlignmentFitting', 'AlignmentLikelihood'] +__all__ = ["AlignmentFitting", "AlignmentLikelihood"] class AlignmentFitting(object): - """ - class which executes the different sampling methods - """ - def __init__(self, multi_band_list, kwargs_model, kwargs_params, band_index=0, likelihood_mask_list=None, - align_offset=True, align_rotation=False): - """ - initialise the classes of the chain and for parameter options + """Class which executes the different sampling methods.""" + + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + band_index=0, + likelihood_mask_list=None, + align_offset=True, + align_rotation=False, + ): + """Initialise the classes of the chain and for parameter options. :param align_offset: aligns shift in Ra and Dec :type align_offset: boolean :param align_rotation: aligns coordinate rotation :type align_rotation: boolean """ - self.chain = AlignmentLikelihood(multi_band_list, kwargs_model, kwargs_params, band_index, likelihood_mask_list, - align_rotation=align_rotation, align_offset=align_offset) - - def pso(self, n_particles=10, n_iterations=10, delta_shift=0.2, delta_rot=0.1, threadCount=1, mpi=False, - print_key='default'): - """ - returns the best fit for the lens model on catalogue basis with particle swarm optimizer + self.chain = AlignmentLikelihood( + multi_band_list, + kwargs_model, + kwargs_params, + band_index, + likelihood_mask_list, + align_rotation=align_rotation, + align_offset=align_offset, + ) + + def pso( + self, + n_particles=10, + n_iterations=10, + delta_shift=0.2, + delta_rot=0.1, + threadCount=1, + mpi=False, + print_key="default", + ): + """Returns the best fit for the lens model on catalogue basis with particle + swarm optimizer. :param n_particles: :param n_iterations: @@ -44,14 +65,16 @@ def pso(self, n_particles=10, n_iterations=10, delta_shift=0.2, delta_rot=0.1, t lower_limit, upper_limit = self.chain.lower_upper_limit(delta_shift, delta_rot) pool = choose_pool(mpi=mpi, processes=threadCount, use_dill=True) - pso = ParticleSwarmOptimizer(self.chain, lower_limit, upper_limit, - n_particles, pool=pool) + pso = ParticleSwarmOptimizer( + self.chain, lower_limit, upper_limit, n_particles, pool=pool + ) if init_pos is not None: - pso.set_global_best(init_pos, [0]*len(init_pos), - self.chain.likelihood(init_pos)) + pso.set_global_best( + init_pos, [0] * len(init_pos), self.chain.likelihood(init_pos) + ) if pool.is_master(): - print('Computing the %s ...' % print_key) + print("Computing the %s ..." % print_key) time_start = time.time() @@ -62,16 +85,22 @@ def pso(self, n_particles=10, n_iterations=10, delta_shift=0.2, delta_rot=0.1, t if pool.is_master(): time_end = time.time() print("Shifts found: ", result) - print(time_end - time_start, 'time used for ', print_key) + print(time_end - time_start, "time used for ", print_key) return kwargs_data, [chi2_list, pos_list, vel_list] class AlignmentLikelihood(object): - - def __init__(self, multi_band_list, kwargs_model, kwargs_params, band_index=0, likelihood_mask_list=None, - align_offset=True, align_rotation=False): - """ - initializes all the classes needed for the chain + def __init__( + self, + multi_band_list, + kwargs_model, + kwargs_params, + band_index=0, + likelihood_mask_list=None, + align_offset=True, + align_rotation=False, + ): + """Initializes all the classes needed for the chain. :param align_offset: aligns shift in Ra and Dec :type align_offset: boolean @@ -92,14 +121,18 @@ def __init__(self, multi_band_list, kwargs_model, kwargs_params, band_index=0, l self._kwargs_params = kwargs_params def _likelihood(self, args): - """ - routine to compute X2 given variable parameters for a MCMC/PSO chainF - """ + """Routine to compute X2 given variable parameters for a MCMC/PSO chainF.""" # generate image and computes likelihood multi_band_list = self.update_multi_band(args) - image_model = SingleBandMultiModel(multi_band_list, self._kwargs_model, - likelihood_mask_list=self._likelihood_mask_list, band_index=self._band_index) - log_likelihood = image_model.likelihood_data_given_model(source_marg=self._source_marg, **self._kwargs_params) + image_model = SingleBandMultiModel( + multi_band_list, + self._kwargs_model, + likelihood_mask_list=self._likelihood_mask_list, + band_index=self._band_index, + ) + log_likelihood = image_model.likelihood_data_given_model( + source_marg=self._source_marg, **self._kwargs_params + ) return log_likelihood def __call__(self, a): @@ -129,11 +162,11 @@ def update_data(self, args): k = 0 kwargs_data = self._kwargs_data_shifted if self._align_offset: - kwargs_data['ra_shift'] = args[k] - kwargs_data['dec_shift'] = args[k + 1] + kwargs_data["ra_shift"] = args[k] + kwargs_data["dec_shift"] = args[k + 1] k += 2 if self._align_rotation: - kwargs_data['phi_rot'] = args[k] + kwargs_data["phi_rot"] = args[k] k += 1 return kwargs_data @@ -144,10 +177,10 @@ def get_args(self, kwargs_data): """ args = [] if self._align_offset: - args.append(kwargs_data.get('ra_shift', 0)) - args.append(kwargs_data.get('dec_shift', 0)) + args.append(kwargs_data.get("ra_shift", 0)) + args.append(kwargs_data.get("dec_shift", 0)) if self._align_rotation: - args.append(kwargs_data.get('phi_rot', 0)) + args.append(kwargs_data.get("phi_rot", 0)) return args @property diff --git a/lenstronomy/Workflow/fitting_sequence.py b/lenstronomy/Workflow/fitting_sequence.py index 9ea05ccf2..58bb42fb6 100644 --- a/lenstronomy/Workflow/fitting_sequence.py +++ b/lenstronomy/Workflow/fitting_sequence.py @@ -13,18 +13,25 @@ import numpy as np import lenstronomy.Util.analysis_util as analysis_util -__all__ = ['FittingSequence'] +__all__ = ["FittingSequence"] class FittingSequence(object): - """ - class to define a sequence of fitting applied, inherit the Fitting class - this is a Workflow manager that allows to update model configurations before executing another step in the modelling - The user can take this module as an example of how to create their own workflows or build their own around the - FittingSequence - """ - def __init__(self, kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, mpi=False, - verbose=True): + """Class to define a sequence of fitting applied, inherit the Fitting class this is + a Workflow manager that allows to update model configurations before executing + another step in the modelling The user can take this module as an example of how to + create their own workflows or build their own around the FittingSequence.""" + + def __init__( + self, + kwargs_data_joint, + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + mpi=False, + verbose=True, + ): """ :param kwargs_data_joint: keyword argument specifying the data according to LikelihoodModule @@ -47,18 +54,22 @@ def __init__(self, kwargs_data_joint, kwargs_model, kwargs_constraints, kwargs_l :param verbose: bool, if True prints temporary results and indicators of the fitting process """ self.kwargs_data_joint = kwargs_data_joint - self.multi_band_list = kwargs_data_joint.get('multi_band_list', []) - self.multi_band_type = kwargs_data_joint.get('multi_band_type', 'single-band') + self.multi_band_list = kwargs_data_joint.get("multi_band_list", []) + self.multi_band_type = kwargs_data_joint.get("multi_band_type", "single-band") self._verbose = verbose self._mpi = mpi - self._updateManager = MultiBandUpdateManager(kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, - num_bands=len(self.multi_band_list)) + self._updateManager = MultiBandUpdateManager( + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + num_bands=len(self.multi_band_list), + ) self._mcmc_init_samples = None @property def kwargs_fixed(self): - """ - Returns the updated kwargs_fixed from the update manager. + """Returns the updated kwargs_fixed from the update manager. :return: list of fixed kwargs, see UpdateManager() """ @@ -76,66 +87,65 @@ def fit_sequence(self, fitting_list): fitting_type = fitting[0] kwargs = fitting[1] - if fitting_type == 'restart': + if fitting_type == "restart": self._updateManager.set_init_state() - elif fitting_type == 'update_settings': + elif fitting_type == "update_settings": self.update_settings(**kwargs) - elif fitting_type == 'set_param_value': + elif fitting_type == "set_param_value": self.set_param_value(**kwargs) - elif fitting_type == 'fix_not_computed': + elif fitting_type == "fix_not_computed": self.fix_not_computed(**kwargs) - elif fitting_type == 'psf_iteration': + elif fitting_type == "psf_iteration": self.psf_iteration(**kwargs) - elif fitting_type == 'align_images': + elif fitting_type == "align_images": self.align_images(**kwargs) - elif fitting_type == 'calibrate_images': + elif fitting_type == "calibrate_images": self.flux_calibration(**kwargs) - elif fitting_type == 'PSO': + elif fitting_type == "PSO": kwargs_result, chain, param = self.pso(**kwargs) self._updateManager.update_param_state(**kwargs_result) chain_list.append([fitting_type, chain, param]) - elif fitting_type == 'SIMPLEX': + elif fitting_type == "SIMPLEX": kwargs_result = self.simplex(**kwargs) self._updateManager.update_param_state(**kwargs_result) chain_list.append([fitting_type, kwargs_result]) - elif fitting_type == 'MCMC': - if 'init_samples' not in kwargs: - kwargs['init_samples'] = self._mcmc_init_samples - elif kwargs['init_samples'] is None: - kwargs['init_samples'] = self._mcmc_init_samples + elif fitting_type == "MCMC": + if "init_samples" not in kwargs: + kwargs["init_samples"] = self._mcmc_init_samples + elif kwargs["init_samples"] is None: + kwargs["init_samples"] = self._mcmc_init_samples mcmc_output = self.mcmc(**kwargs) kwargs_result = self._result_from_mcmc(mcmc_output) self._updateManager.update_param_state(**kwargs_result) chain_list.append(mcmc_output) - elif fitting_type == 'Nautilus': + elif fitting_type == "Nautilus": # do importance nested sampling with Nautilus nautilus = NautilusSampler( - likelihood_module=self.likelihoodModule, mpi=self._mpi, - **kwargs) + likelihood_module=self.likelihoodModule, mpi=self._mpi, **kwargs + ) points, log_w, log_l, log_z = nautilus.run(**kwargs) chain_list.append([points, log_w, log_l, log_z]) - if kwargs.get('verbose', False): - print(len(points), 'number of points sampled') + if kwargs.get("verbose", False): + print(len(points), "number of points sampled") kwargs_result = self.best_fit_from_samples(points, log_l) self._updateManager.update_param_state(**kwargs_result) - elif fitting_type == 'nested_sampling': + elif fitting_type == "nested_sampling": ns_output = self.nested_sampling(**kwargs) chain_list.append(ns_output) - elif fitting_type == 'metropolis_hastings': - - print('Using the Metropolis--Hastings MCMC sampler in Cobaya.') + elif fitting_type == "metropolis_hastings": + print("Using the Metropolis--Hastings MCMC sampler in Cobaya.") param_class = self.param_class @@ -151,7 +161,9 @@ def fit_sequence(self, fitting_list): updated_info, sampler_type, best_fit_values = sampler.run(**kwargs) # change the best-fit values returned by cobaya into lenstronomy kwargs format - best_fit_kwargs = self.param_class.args2kwargs(best_fit_values, bijective=True) + best_fit_kwargs = self.param_class.args2kwargs( + best_fit_values, bijective=True + ) # collect the products mh_output = [updated_info, sampler_type, best_fit_kwargs] @@ -160,9 +172,11 @@ def fit_sequence(self, fitting_list): chain_list.append(mh_output) else: - raise ValueError("fitting_sequence {} is not supported. Please use: 'PSO', 'SIMPLEX', 'MCMC', 'metropolis_hastings', " - "'Nautilus', 'nested_sampling', 'psf_iteration', 'restart', 'update_settings', 'calibrate_images' or " - "'align_images'".format(fitting_type)) + raise ValueError( + "fitting_sequence {} is not supported. Please use: 'PSO', 'SIMPLEX', 'MCMC', 'metropolis_hastings', " + "'Nautilus', 'nested_sampling', 'psf_iteration', 'restart', 'update_settings', 'calibrate_images' or " + "'align_images'".format(fitting_type) + ) return chain_list def best_fit(self, bijective=False): @@ -175,8 +189,7 @@ def best_fit(self, bijective=False): return self._updateManager.best_fit(bijective=bijective) def update_state(self, kwargs_update): - """ - updates current best fit state to the input model keywords specified. + """Updates current best fit state to the input model keywords specified. :param kwargs_update: format of kwargs_result :return: None @@ -185,8 +198,8 @@ def update_state(self, kwargs_update): @property def best_fit_likelihood(self): - """ - returns the log likelihood of the best fit model of the current state of this class + """Returns the log likelihood of the best fit model of the current state of this + class. :return: log likelihood, float """ @@ -198,8 +211,7 @@ def best_fit_likelihood(self): @property def bic(self): - """ - Bayesian information criterion (BIC) of the model. + """Bayesian information criterion (BIC) of the model. :return: bic value, float """ @@ -226,15 +238,17 @@ def likelihoodModule(self): """ kwargs_model = self._updateManager.kwargs_model kwargs_likelihood = self._updateManager.kwargs_likelihood - likelihoodModule = LikelihoodModule(self.kwargs_data_joint, kwargs_model, self.param_class, **kwargs_likelihood) + likelihoodModule = LikelihoodModule( + self.kwargs_data_joint, kwargs_model, self.param_class, **kwargs_likelihood + ) return likelihoodModule - def simplex(self, n_iterations, method='Nelder-Mead'): - """ - Downhill simplex optimization using the Nelder-Mead algorithm. + def simplex(self, n_iterations, method="Nelder-Mead"): + """Downhill simplex optimization using the Nelder-Mead algorithm. :param n_iterations: maximum number of iterations to perform - :param method: the optimization method used, see documentation in scipy.optimize.minimize + :param method: the optimization method used, see documentation in + scipy.optimize.minimize :return: result of the best fit """ @@ -247,11 +261,23 @@ def simplex(self, n_iterations, method='Nelder-Mead'): kwargs_result = param_class.args2kwargs(result, bijective=True) return kwargs_result - def mcmc(self, n_burn, n_run, walkerRatio=None, n_walkers=None, sigma_scale=1, threadCount=1, init_samples=None, - re_use_samples=True, sampler_type='EMCEE', progress=True, backend_filename=None, start_from_backend=False, - **kwargs_zeus): - """ - MCMC routine + def mcmc( + self, + n_burn, + n_run, + walkerRatio=None, + n_walkers=None, + sigma_scale=1, + threadCount=1, + init_samples=None, + re_use_samples=True, + sampler_type="EMCEE", + progress=True, + backend_filename=None, + start_from_backend=False, + **kwargs_zeus + ): + """MCMC routine. :param n_burn: number of burn in iterations (will not be saved) :param n_run: number of MCMC iterations that are saved @@ -284,7 +310,9 @@ def mcmc(self, n_burn, n_run, walkerRatio=None, n_walkers=None, sigma_scale=1, t num_param, param_list = param_class.num_param() if n_walkers is None: if walkerRatio is None: - raise ValueError('MCMC sampler needs either n_walkers or walkerRatio as input argument') + raise ValueError( + "MCMC sampler needs either n_walkers or walkerRatio as input argument" + ) n_walkers = num_param * walkerRatio # run MCMC if init_samples is not None and re_use_samples is True: @@ -294,40 +322,62 @@ def mcmc(self, n_burn, n_run, walkerRatio=None, n_walkers=None, sigma_scale=1, t idxs = np.random.choice(len(init_samples), n_walkers) initpos = init_samples[idxs] else: - raise ValueError("Can not re-use previous MCMC samples as number of parameters have changed!") + raise ValueError( + "Can not re-use previous MCMC samples as number of parameters have changed!" + ) else: initpos = None - if sampler_type == 'EMCEE': - samples, dist = mcmc_class.mcmc_emcee(n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=self._mpi, - threadCount=threadCount, progress=progress, initpos=initpos, - backend_filename=backend_filename, - start_from_backend=start_from_backend) + if sampler_type == "EMCEE": + samples, dist = mcmc_class.mcmc_emcee( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=self._mpi, + threadCount=threadCount, + progress=progress, + initpos=initpos, + backend_filename=backend_filename, + start_from_backend=start_from_backend, + ) output = [sampler_type, samples, param_list, dist] - elif sampler_type == 'ZEUS': - - samples, dist = mcmc_class.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start, - mpi=self._mpi, threadCount=threadCount, - progress=progress, initpos = initpos, backend_filename = backend_filename, - **kwargs_zeus) + elif sampler_type == "ZEUS": + samples, dist = mcmc_class.mcmc_zeus( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=self._mpi, + threadCount=threadCount, + progress=progress, + initpos=initpos, + backend_filename=backend_filename, + **kwargs_zeus + ) output = [sampler_type, samples, param_list, dist] else: - raise ValueError('sampler_type %s not supported!' % sampler_type) + raise ValueError("sampler_type %s not supported!" % sampler_type) self._mcmc_init_samples = samples # overwrites previous samples to continue from there in the next MCMC run return output - def pso(self, n_particles, n_iterations, sigma_scale=1, print_key='PSO', threadCount=1): - """ - Particle Swarm Optimization + def pso( + self, n_particles, n_iterations, sigma_scale=1, print_key="PSO", threadCount=1 + ): + """Particle Swarm Optimization. :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process - :param sigma_scale: scaling of the initial parameter spread relative to the width in the initial settings + :param sigma_scale: scaling of the initial parameter spread relative to the + width in the initial settings :param print_key: string, printed text when executing this routine :param threadCount: number of CPU threads. If MPI option is set, threadCount=1 - :return: result of the best fit, the PSO chain of the best fit parameter after each iteration - [lnlikelihood, parameters, velocities], list of parameters in same order as in chain + :return: result of the best fit, the PSO chain of the best fit parameter after + each iteration [lnlikelihood, parameters, velocities], list of parameters in + same order as in chain """ param_class = self.param_class @@ -341,103 +391,160 @@ def pso(self, n_particles, n_iterations, sigma_scale=1, print_key='PSO', threadC num_param, param_list = param_class.num_param() # run PSO sampler = Sampler(likelihoodModule=self.likelihoodModule) - result, chain = sampler.pso(n_particles, n_iterations, lower_start, upper_start, init_pos=init_pos, - threadCount=threadCount, mpi=self._mpi, print_key=print_key) + result, chain = sampler.pso( + n_particles, + n_iterations, + lower_start, + upper_start, + init_pos=init_pos, + threadCount=threadCount, + mpi=self._mpi, + print_key=print_key, + ) kwargs_result = param_class.args2kwargs(result, bijective=True) return kwargs_result, chain, param_list - def nested_sampling(self, sampler_type='MULTINEST', kwargs_run={}, - prior_type='uniform', width_scale=1, sigma_scale=1, - output_basename='chain', remove_output_dir=True, - dypolychord_dynamic_goal=0.8, - polychord_settings={}, - dypolychord_seed_increment=200, - output_dir="nested_sampling_chains", - dynesty_bound='multi', dynesty_sample='auto'): - """ - Run (Dynamic) Nested Sampling algorithms, depending on the type of algorithm. - + def nested_sampling( + self, + sampler_type="MULTINEST", + kwargs_run={}, + prior_type="uniform", + width_scale=1, + sigma_scale=1, + output_basename="chain", + remove_output_dir=True, + dypolychord_dynamic_goal=0.8, + polychord_settings={}, + dypolychord_seed_increment=200, + output_dir="nested_sampling_chains", + dynesty_bound="multi", + dynesty_sample="auto", + ): + """Run (Dynamic) Nested Sampling algorithms, depending on the type of algorithm. + + :param sampler_type: 'MULTINEST', 'DYPOLYCHORD', 'DYNESTY' :param kwargs_run: + keywords passed to the core sampling method :param prior_type: 'uniform' of + 'gaussian', for converting the unit hypercube to param cube :param width_scale: + scale the width (lower/upper limits) of the parameters space by this factor + :param sigma_scale: if prior_type is 'gaussian', scale the gaussian sigma by + this factor :param output_basename: name of the folder in which the core + MultiNest/PolyChord code will save output files :param remove_output_dir: if + True, the above folder is removed after completion :param + dypolychord_dynamic_goal: dynamic goal for DyPolyChord (trade-off between + evidence (0) and posterior (1) computation) :param polychord_settings: settings + dictionary to send to pypolychord. Check dypolychord documentation for details. + :param dypolychord_seed_increment: seed increment for dypolychord with MPI. + Check dypolychord documentation for details. :param dynesty_bound: see + https://dynesty.readthedocs.io :param sampler_type: 'MULTINEST', 'DYPOLYCHORD', 'DYNESTY' :param kwargs_run: keywords passed to the core sampling method - :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to param cube - :param width_scale: scale the width (lower/upper limits) of the parameters space by this factor - :param sigma_scale: if prior_type is 'gaussian', scale the gaussian sigma by this factor - :param output_basename: name of the folder in which the core MultiNest/PolyChord code will save output files + :param prior_type: 'uniform' of 'gaussian', for converting the unit hypercube to + param cube + :param width_scale: scale the width (lower/upper limits) of the parameters space + by this factor + :param sigma_scale: if prior_type is 'gaussian', scale the gaussian sigma by + this factor + :param output_basename: name of the folder in which the core MultiNest/PolyChord + code will save output files :param remove_output_dir: if True, the above folder is removed after completion - :param dypolychord_dynamic_goal: dynamic goal for DyPolyChord (trade-off between evidence (0) and posterior (1) computation) - :param polychord_settings: settings dictionary to send to pypolychord. Check dypolychord documentation for details. - :param dypolychord_seed_increment: seed increment for dypolychord with MPI. Check dypolychord documentation for details. + :param dypolychord_dynamic_goal: dynamic goal for DyPolyChord (trade-off between + evidence (0) and posterior (1) computation) + :param polychord_settings: settings dictionary to send to pypolychord. Check + dypolychord documentation for details. + :param dypolychord_seed_increment: seed increment for dypolychord with MPI. + Check dypolychord documentation for details. :param dynesty_bound: see https://dynesty.readthedocs.io for details :param dynesty_sample: see https://dynesty.readthedocs.io for details - :return: list of output arguments : samples, mean inferred values, log-likelihood, log-evidence, error on log-evidence for each sample + :return: list of output arguments : samples, mean inferred values, log- + likelihood, log-evidence, error on log-evidence for each sample """ mean_start, sigma_start = self._prepare_sampling(prior_type) - if sampler_type == 'MULTINEST': - sampler = MultiNestSampler(self.likelihoodModule, - prior_type=prior_type, - prior_means=mean_start, - prior_sigmas=sigma_start, - width_scale=width_scale, - sigma_scale=sigma_scale, - output_dir=output_dir, - output_basename=output_basename, - remove_output_dir=remove_output_dir, - use_mpi=self._mpi) - samples, means, logZ, logZ_err, logL, results_object = sampler.run(kwargs_run) - - elif sampler_type == 'DYPOLYCHORD': - if 'resume_dyn_run' in kwargs_run and kwargs_run['resume_dyn_run'] is True: + if sampler_type == "MULTINEST": + sampler = MultiNestSampler( + self.likelihoodModule, + prior_type=prior_type, + prior_means=mean_start, + prior_sigmas=sigma_start, + width_scale=width_scale, + sigma_scale=sigma_scale, + output_dir=output_dir, + output_basename=output_basename, + remove_output_dir=remove_output_dir, + use_mpi=self._mpi, + ) + samples, means, logZ, logZ_err, logL, results_object = sampler.run( + kwargs_run + ) + + elif sampler_type == "DYPOLYCHORD": + if "resume_dyn_run" in kwargs_run and kwargs_run["resume_dyn_run"] is True: resume_dyn_run = True else: resume_dyn_run = False - sampler = DyPolyChordSampler(self.likelihoodModule, - prior_type=prior_type, - prior_means=mean_start, - prior_sigmas=sigma_start, - width_scale=width_scale, - sigma_scale=sigma_scale, - output_dir=output_dir, - output_basename=output_basename, - polychord_settings=polychord_settings, - remove_output_dir=remove_output_dir, - resume_dyn_run=resume_dyn_run, - use_mpi=self._mpi) - samples, means, logZ, logZ_err, logL, results_object = sampler.run(dypolychord_dynamic_goal, kwargs_run) - - elif sampler_type == 'DYNESTY': - sampler = DynestySampler(self.likelihoodModule, - prior_type=prior_type, - prior_means=mean_start, - prior_sigmas=sigma_start, - width_scale=width_scale, - sigma_scale=sigma_scale, - bound=dynesty_bound, - sample=dynesty_sample, - use_mpi=self._mpi) - samples, means, logZ, logZ_err, logL, results_object = sampler.run(kwargs_run) + sampler = DyPolyChordSampler( + self.likelihoodModule, + prior_type=prior_type, + prior_means=mean_start, + prior_sigmas=sigma_start, + width_scale=width_scale, + sigma_scale=sigma_scale, + output_dir=output_dir, + output_basename=output_basename, + polychord_settings=polychord_settings, + remove_output_dir=remove_output_dir, + resume_dyn_run=resume_dyn_run, + use_mpi=self._mpi, + ) + samples, means, logZ, logZ_err, logL, results_object = sampler.run( + dypolychord_dynamic_goal, kwargs_run + ) + + elif sampler_type == "DYNESTY": + sampler = DynestySampler( + self.likelihoodModule, + prior_type=prior_type, + prior_means=mean_start, + prior_sigmas=sigma_start, + width_scale=width_scale, + sigma_scale=sigma_scale, + bound=dynesty_bound, + sample=dynesty_sample, + use_mpi=self._mpi, + ) + samples, means, logZ, logZ_err, logL, results_object = sampler.run( + kwargs_run + ) else: - raise ValueError('Sampler type %s not supported.' % sampler_type) + raise ValueError("Sampler type %s not supported." % sampler_type) # update current best fit values self._update_state(samples[-1]) - output = [sampler_type, samples, sampler.param_names, logL, - logZ, logZ_err, results_object] + output = [ + sampler_type, + samples, + sampler.param_names, + logL, + logZ, + logZ_err, + results_object, + ] return output def psf_iteration(self, compute_bands=None, **kwargs_psf_iter): - """ - iterative PSF reconstruction + """Iterative PSF reconstruction. - :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands - :param kwargs_psf_iter: keyword arguments as used or available in PSFIteration.update_iterative() definition + :param compute_bands: bool list, if multiple bands, this process can be limited + to a subset of bands + :param kwargs_psf_iter: keyword arguments as used or available in + PSFIteration.update_iterative() definition :return: 0, updated PSF is stored in self.multi_band_list """ kwargs_model = self._updateManager.kwargs_model kwargs_likelihood = self._updateManager.kwargs_likelihood - likelihood_mask_list = kwargs_likelihood.get('image_likelihood_mask_list', None) - kwargs_pixelbased = kwargs_likelihood.get('kwargs_pixelbased', None) + likelihood_mask_list = kwargs_likelihood.get("image_likelihood_mask_list", None) + kwargs_pixelbased = kwargs_likelihood.get("kwargs_pixelbased", None) kwargs_temp = self.best_fit(bijective=False) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) @@ -445,19 +552,34 @@ def psf_iteration(self, compute_bands=None, **kwargs_psf_iter): for band_index in range(len(self.multi_band_list)): if compute_bands[band_index] is True: kwargs_psf = self.multi_band_list[band_index][1] - image_model = SingleBandMultiModel(self.multi_band_list, kwargs_model, - likelihood_mask_list=likelihood_mask_list, band_index=band_index, - kwargs_pixelbased=kwargs_pixelbased) + image_model = SingleBandMultiModel( + self.multi_band_list, + kwargs_model, + likelihood_mask_list=likelihood_mask_list, + band_index=band_index, + kwargs_pixelbased=kwargs_pixelbased, + ) psf_iter = PsfFitting(image_model_class=image_model) - kwargs_psf = psf_iter.update_iterative(kwargs_psf, kwargs_params=kwargs_temp, **kwargs_psf_iter) + kwargs_psf = psf_iter.update_iterative( + kwargs_psf, kwargs_params=kwargs_temp, **kwargs_psf_iter + ) self.multi_band_list[band_index][1] = kwargs_psf return 0 - def align_images(self, n_particles=10, n_iterations=10, align_offset=True, align_rotation=False, threadCount=1, - compute_bands=None, delta_shift=0.2, delta_rot=0.1): - """ - aligns the coordinate systems of different exposures within a fixed model parameterisation by executing a PSO - with relative coordinate shifts as free parameters + def align_images( + self, + n_particles=10, + n_iterations=10, + align_offset=True, + align_rotation=False, + threadCount=1, + compute_bands=None, + delta_shift=0.2, + delta_rot=0.1, + ): + """Aligns the coordinate systems of different exposures within a fixed model + parameterisation by executing a PSO with relative coordinate shifts as free + parameters. :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process @@ -467,40 +589,62 @@ def align_images(self, n_particles=10, n_iterations=10, align_offset=True, align :type align_rotation: boolean :param delta_shift: astrometric shift tolerance :param delta_rot: rotation angle tolerance [in radian] - :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands for which - the coordinate system is being fit for best alignment to the model parameters + :param compute_bands: bool list, if multiple bands, this process can be limited + to a subset of bands for which the coordinate system is being fit for best + alignment to the model parameters :return: 0, updated coordinate system for the band(s) """ kwargs_model = self._updateManager.kwargs_model kwargs_likelihood = self._updateManager.kwargs_likelihood - likelihood_mask_list = kwargs_likelihood.get('image_likelihood_mask_list', None) + likelihood_mask_list = kwargs_likelihood.get("image_likelihood_mask_list", None) kwargs_temp = self.best_fit(bijective=False) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) for i in range(len(self.multi_band_list)): if compute_bands[i] is True: - - alignmentFitting = AlignmentFitting(self.multi_band_list, kwargs_model, kwargs_temp, band_index=i, - likelihood_mask_list=likelihood_mask_list, - align_offset=align_offset, align_rotation=align_rotation) - - kwargs_data, chain = alignmentFitting.pso(n_particles=n_particles, n_iterations=n_iterations, - delta_shift=delta_shift, delta_rot=delta_rot, - threadCount=threadCount, mpi=self._mpi, - print_key='Alignment fitting for band %s ...' % i) - print('Align completed for band %s.' % i) - print('ra_shift: %s, dec_shift: %s, phi_rot: %s' %(kwargs_data.get('ra_shift', 0), - kwargs_data.get('dec_shift', 0), - kwargs_data.get('phi_rot', 0))) + alignmentFitting = AlignmentFitting( + self.multi_band_list, + kwargs_model, + kwargs_temp, + band_index=i, + likelihood_mask_list=likelihood_mask_list, + align_offset=align_offset, + align_rotation=align_rotation, + ) + + kwargs_data, chain = alignmentFitting.pso( + n_particles=n_particles, + n_iterations=n_iterations, + delta_shift=delta_shift, + delta_rot=delta_rot, + threadCount=threadCount, + mpi=self._mpi, + print_key="Alignment fitting for band %s ..." % i, + ) + print("Align completed for band %s." % i) + print( + "ra_shift: %s, dec_shift: %s, phi_rot: %s" + % ( + kwargs_data.get("ra_shift", 0), + kwargs_data.get("dec_shift", 0), + kwargs_data.get("phi_rot", 0), + ) + ) self.multi_band_list[i][0] = kwargs_data return 0 - def flux_calibration(self, n_particles=10, n_iterations=10, threadCount=1, calibrate_bands=None, - scaling_lower_limit=0, scaling_upper_limit=1000): - """ - calibrates flux_scaling between multiple images. This routine only works in 'join-linear' model when fluxes - are meant to be identical for different bands + def flux_calibration( + self, + n_particles=10, + n_iterations=10, + threadCount=1, + calibrate_bands=None, + scaling_lower_limit=0, + scaling_upper_limit=1000, + ): + """Calibrates flux_scaling between multiple images. This routine only works in + 'join-linear' model when fluxes are meant to be identical for different bands. :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process @@ -514,29 +658,51 @@ def flux_calibration(self, n_particles=10, n_iterations=10, threadCount=1, calib """ kwargs_model = self._updateManager.kwargs_model kwargs_temp = self.best_fit(bijective=False) - multi_band_type = self.kwargs_data_joint.get('multi_band_type', 'multi-linear') + multi_band_type = self.kwargs_data_joint.get("multi_band_type", "multi-linear") kwargs_imaging = self.likelihoodModule.kwargs_imaging - calibration_fitting = FluxCalibration(kwargs_imaging=kwargs_imaging, kwargs_model=kwargs_model, - kwargs_params=kwargs_temp, - calibrate_bands=calibrate_bands) - - multi_band_list, chain = calibration_fitting.pso(n_particles=n_particles, n_iterations=n_iterations, - threadCount=threadCount, mpi=self._mpi, - scaling_lower_limit=scaling_lower_limit, - scaling_upper_limit=scaling_upper_limit) + calibration_fitting = FluxCalibration( + kwargs_imaging=kwargs_imaging, + kwargs_model=kwargs_model, + kwargs_params=kwargs_temp, + calibrate_bands=calibrate_bands, + ) + + multi_band_list, chain = calibration_fitting.pso( + n_particles=n_particles, + n_iterations=n_iterations, + threadCount=threadCount, + mpi=self._mpi, + scaling_lower_limit=scaling_lower_limit, + scaling_upper_limit=scaling_upper_limit, + ) self.multi_band_list = multi_band_list return 0 - def update_settings(self, kwargs_model=None, kwargs_constraints=None, kwargs_likelihood=None, lens_add_fixed=None, - source_add_fixed=None, lens_light_add_fixed=None, ps_add_fixed=None, special_add_fixed=None, - lens_remove_fixed=None, source_remove_fixed=None, lens_light_remove_fixed=None, - ps_remove_fixed=None, special_remove_fixed=None, - change_source_lower_limit=None, change_source_upper_limit=None, - change_lens_lower_limit=None, change_lens_upper_limit=None, - change_sigma_lens=None, change_sigma_source=None, change_sigma_lens_light=None): - """ - updates lenstronomy settings "on the fly" + def update_settings( + self, + kwargs_model=None, + kwargs_constraints=None, + kwargs_likelihood=None, + lens_add_fixed=None, + source_add_fixed=None, + lens_light_add_fixed=None, + ps_add_fixed=None, + special_add_fixed=None, + lens_remove_fixed=None, + source_remove_fixed=None, + lens_light_remove_fixed=None, + ps_remove_fixed=None, + special_remove_fixed=None, + change_source_lower_limit=None, + change_source_upper_limit=None, + change_lens_lower_limit=None, + change_lens_upper_limit=None, + change_sigma_lens=None, + change_sigma_source=None, + change_sigma_lens_light=None, + ): + """Updates lenstronomy settings "on the fly". :param kwargs_model: kwargs, specified keyword arguments overwrite the existing ones :param kwargs_constraints: kwargs, specified keyword arguments overwrite the existing ones @@ -560,19 +726,36 @@ def update_settings(self, kwargs_model=None, kwargs_constraints=None, kwargs_lik :param change_sigma_lens_light: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]] :return: 0, the settings are overwritten for the next fitting step to come """ - self._updateManager.update_options(kwargs_model, kwargs_constraints, kwargs_likelihood) - self._updateManager.update_fixed(lens_add_fixed, source_add_fixed, lens_light_add_fixed, - ps_add_fixed, special_add_fixed, lens_remove_fixed, source_remove_fixed, - lens_light_remove_fixed, ps_remove_fixed, special_remove_fixed) - self._updateManager.update_limits(change_source_lower_limit, change_source_upper_limit, change_lens_lower_limit, - change_lens_upper_limit) - self._updateManager.update_sigmas(change_sigma_lens=change_sigma_lens, change_sigma_source=change_sigma_source, - change_sigma_lens_light=change_sigma_lens_light) + self._updateManager.update_options( + kwargs_model, kwargs_constraints, kwargs_likelihood + ) + self._updateManager.update_fixed( + lens_add_fixed, + source_add_fixed, + lens_light_add_fixed, + ps_add_fixed, + special_add_fixed, + lens_remove_fixed, + source_remove_fixed, + lens_light_remove_fixed, + ps_remove_fixed, + special_remove_fixed, + ) + self._updateManager.update_limits( + change_source_lower_limit, + change_source_upper_limit, + change_lens_lower_limit, + change_lens_upper_limit, + ) + self._updateManager.update_sigmas( + change_sigma_lens=change_sigma_lens, + change_sigma_source=change_sigma_source, + change_sigma_lens_light=change_sigma_lens_light, + ) return 0 def set_param_value(self, **kwargs): - """ - Set a parameter to a specific value. `kwargs` are below. + """Set a parameter to a specific value. `kwargs` are below. :param lens: [[i_model, ['param1', 'param2',...], [...]] :type lens: @@ -588,19 +771,24 @@ def set_param_value(self, **kwargs): self._updateManager.update_param_value(**kwargs) def fix_not_computed(self, free_bands): - """ - fixes lens model parameters of imaging bands/frames that are not computed and frees the parameters of the other - lens models to the initial kwargs_fixed options + """Fixes lens model parameters of imaging bands/frames that are not computed and + frees the parameters of the other lens models to the initial kwargs_fixed + options. - :param free_bands: bool list of length of imaging bands in order of imaging bands, if False: set fixed lens model + :param free_bands: bool list of length of imaging bands in order of imaging + bands, if False: set fixed lens model :return: None """ self._updateManager.fix_not_computed(free_bands=free_bands) def _prepare_sampling(self, prior_type): - if prior_type == 'gaussian': - mean_start = self.param_class.kwargs2args(**self._updateManager.parameter_state) - sigma_start = self.param_class.kwargs2args(**self._updateManager.sigma_kwargs) + if prior_type == "gaussian": + mean_start = self.param_class.kwargs2args( + **self._updateManager.parameter_state + ) + sigma_start = self.param_class.kwargs2args( + **self._updateManager.sigma_kwargs + ) mean_start = np.array(mean_start) sigma_start = np.array(sigma_start) else: @@ -626,8 +814,7 @@ def _result_from_mcmc(self, mcmc_output): return self.best_fit_from_samples(samples, logL_values) def best_fit_from_samples(self, samples, logl): - """ - return best fit (max likelihood) value of samples in lenstronomy conventions + """Return best fit (max likelihood) value of samples in lenstronomy conventions. :param samples: samples of multi-dimensional parameter space :param logl: likelihood values for each sample diff --git a/lenstronomy/Workflow/flux_calibration.py b/lenstronomy/Workflow/flux_calibration.py index 6718f05c2..32aebfbe7 100644 --- a/lenstronomy/Workflow/flux_calibration.py +++ b/lenstronomy/Workflow/flux_calibration.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import time import copy @@ -6,40 +6,52 @@ from lenstronomy.Sampling.Likelihoods.image_likelihood import ImageLikelihood from lenstronomy.Sampling.Samplers.pso import ParticleSwarmOptimizer -__all__ = ['FluxCalibration', 'CalibrationLikelihood'] +__all__ = ["FluxCalibration", "CalibrationLikelihood"] class FluxCalibration(object): - """ - class to fit coordinate system alignment and flux amplitude calibrations - """ + """Class to fit coordinate system alignment and flux amplitude calibrations.""" + def __init__(self, kwargs_imaging, kwargs_model, kwargs_params, calibrate_bands): - """ - initialise the classes of the chain and for parameter options for the flux calibration fitting + """Initialise the classes of the chain and for parameter options for the flux + calibration fitting. - :param kwargs_imaging: keyword argument related to imaging data and imaging likelihood. - Feeds into ImageLikelihood(**kwargs_imaging) + :param kwargs_imaging: keyword argument related to imaging data and imaging + likelihood. Feeds into ImageLikelihood(**kwargs_imaging) :param kwargs_model: keyword argument of model components :param kwargs_params: keyword argument of model parameters :param calibrate_bands: state which bands the flux calibration is applied to :type calibrate_bands: list of booleans of length of the imaging bands """ - multi_band_list = kwargs_imaging['multi_band_list'] - multi_band_type = kwargs_imaging['multi_band_type'] + multi_band_list = kwargs_imaging["multi_band_list"] + multi_band_type = kwargs_imaging["multi_band_type"] if calibrate_bands is None: calibrate_bands = [False] * len(multi_band_list) - if multi_band_type != 'joint-linear': - raise ValueError('flux calibration should only be done with join-linear data model!') + if multi_band_type != "joint-linear": + raise ValueError( + "flux calibration should only be done with join-linear data model!" + ) self._calibrate_bands = calibrate_bands - self.chain = CalibrationLikelihood(kwargs_model, kwargs_params, - calibrate_bands=calibrate_bands, - kwargs_imaging=kwargs_imaging) - - def pso(self, n_particles=10, n_iterations=10, threadCount=1, mpi=False, scaling_lower_limit=0, - scaling_upper_limit=1000, print_key='flux calibration'): - """ - returns the best fit for the lens model on catalogue basis with particle swarm optimizer + self.chain = CalibrationLikelihood( + kwargs_model, + kwargs_params, + calibrate_bands=calibrate_bands, + kwargs_imaging=kwargs_imaging, + ) + + def pso( + self, + n_particles=10, + n_iterations=10, + threadCount=1, + mpi=False, + scaling_lower_limit=0, + scaling_upper_limit=1000, + print_key="flux calibration", + ): + """Returns the best fit for the lens model on catalogue basis with particle + swarm optimizer. :param n_particles: number of particles in the PSO :param n_iterations: number of iterations of the PSO @@ -56,11 +68,15 @@ def pso(self, n_particles=10, n_iterations=10, threadCount=1, mpi=False, scaling lower_limit = [scaling_lower_limit] * num_param upper_limit = [scaling_upper_limit] * num_param - pso = ParticleSwarmOptimizer(self.chain, lower_limit, upper_limit, n_particles, pool=pool) - pso.set_global_best(init_pos, [0]*len(init_pos), self.chain.likelihood(init_pos)) + pso = ParticleSwarmOptimizer( + self.chain, lower_limit, upper_limit, n_particles, pool=pool + ) + pso.set_global_best( + init_pos, [0] * len(init_pos), self.chain.likelihood(init_pos) + ) if pool.is_master(): - print('Computing the %s ...' % print_key) + print("Computing the %s ..." % print_key) time_start = time.time() @@ -71,16 +87,14 @@ def pso(self, n_particles=10, n_iterations=10, threadCount=1, mpi=False, scaling if pool.is_master(): time_end = time.time() print("parameters found: ", result) - print(time_end - time_start, 'time used for ', print_key) - print('Calibration completed for bands %s.' % self._calibrate_bands) + print(time_end - time_start, "time used for ", print_key) + print("Calibration completed for bands %s." % self._calibrate_bands) return multi_band_list, [chi2_list, pos_list, vel_list] class CalibrationLikelihood(object): - def __init__(self, kwargs_model, kwargs_params, calibrate_bands, kwargs_imaging): - """ - initializes all the classes needed for the chain + """Initializes all the classes needed for the chain. :param kwargs_model: keyword argument of model components :param kwargs_params: keyword argument of model parameters @@ -92,17 +106,17 @@ def __init__(self, kwargs_model, kwargs_params, calibrate_bands, kwargs_imaging) self._kwargs_model = kwargs_model self._kwargs_params = kwargs_params self._kwargs_imaging_likelihood = copy.deepcopy(kwargs_imaging) - self.multi_band_list = self._kwargs_imaging_likelihood['multi_band_list'] + self.multi_band_list = self._kwargs_imaging_likelihood["multi_band_list"] def _likelihood(self, args): - """ - routine to compute X2 given variable parameters for a MCMC/PSO chainF - """ + """Routine to compute X2 given variable parameters for a MCMC/PSO chainF.""" # generate image and computes likelihood multi_band_list = self.update_data(args) - self._kwargs_imaging_likelihood['multi_band_list'] = multi_band_list + self._kwargs_imaging_likelihood["multi_band_list"] = multi_band_list # this line is redundant since the self.multi_band_list variable got already updated - image_likelihood = ImageLikelihood(kwargs_model=self._kwargs_model, **self._kwargs_imaging_likelihood) + image_likelihood = ImageLikelihood( + kwargs_model=self._kwargs_model, **self._kwargs_imaging_likelihood + ) log_likelihood = image_likelihood.logL(**self._kwargs_params) return log_likelihood @@ -125,7 +139,7 @@ def update_data(self, args): for i, band in enumerate(self.multi_band_list): if self._calibrate_bands[i]: kwargs_data = band[0] - kwargs_data['flux_scaling'] = args[k] + kwargs_data["flux_scaling"] = args[k] k += 1 return self.multi_band_list @@ -138,7 +152,7 @@ def get_args(self, multi_band_list): args = [] for i, band in enumerate(multi_band_list): if self._calibrate_bands[i]: - args.append(band[0].get('flux_scaling', 1)) + args.append(band[0].get("flux_scaling", 1)) return args @property diff --git a/lenstronomy/Workflow/multi_band_manager.py b/lenstronomy/Workflow/multi_band_manager.py index b9a30c150..71a11a1f6 100644 --- a/lenstronomy/Workflow/multi_band_manager.py +++ b/lenstronomy/Workflow/multi_band_manager.py @@ -1,18 +1,28 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.Workflow.update_manager import UpdateManager import copy -__all__ = ['MultiBandUpdateManager'] +__all__ = ["MultiBandUpdateManager"] class MultiBandUpdateManager(UpdateManager): + """Specific Manager to deal with multiple images with disjoint lens model + parameterization. + + The class inherits the UpdateManager() class and adds functionalities to hold and + relieve fixed all lens model parameters of a specific frame/image for more + convenient use of the FittingSequence. """ - specific Manager to deal with multiple images with disjoint lens model parameterization. The class inherits the - UpdateManager() class and adds functionalities to hold and relieve fixed all lens model parameters of a specific - frame/image for more convenient use of the FittingSequence. - """ - def __init__(self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, num_bands=0): + + def __init__( + self, + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + num_bands=0, + ): """ :param kwargs_model: keyword arguments to describe all model components used in @@ -31,15 +41,21 @@ def __init__(self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_p 'special': [kwargs_init, kwargs_sigma, kwargs_fixed, kwargs_lower, kwargs_upper] :param num_bands: integer, number of image bands """ - super(MultiBandUpdateManager, self).__init__(kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) + super(MultiBandUpdateManager, self).__init__( + kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params + ) kwargs_lens_fixed_init, _, _, _, _, _ = self.fixed_kwargs self._kwargs_lens_fixed_init = copy.deepcopy(kwargs_lens_fixed_init) - self._index_lens_model_list = kwargs_model.get('index_lens_model_list', [None for i in range(num_bands)]) - self._index_source_list = kwargs_model.get('index_source_light_model_list', - [None for i in range(num_bands)]) - self._index_lens_light_list = kwargs_model.get('index_lens_light_model_list', - [None for i in range(num_bands)]) + self._index_lens_model_list = kwargs_model.get( + "index_lens_model_list", [None for i in range(num_bands)] + ) + self._index_source_list = kwargs_model.get( + "index_source_light_model_list", [None for i in range(num_bands)] + ) + self._index_lens_light_list = kwargs_model.get( + "index_lens_light_model_list", [None for i in range(num_bands)] + ) self._num_bands = num_bands def keep_frame_fixed(self, frame_list_fixed): @@ -51,7 +67,7 @@ def keep_frame_fixed(self, frame_list_fixed): for j in frame_list_fixed: if self._index_lens_model_list[j] is not None: for i in self._index_lens_model_list[j]: - self._lens_fixed[i] = self._kwargs_temp['kwargs_lens'][i] + self._lens_fixed[i] = self._kwargs_temp["kwargs_lens"][i] def undo_frame_fixed(self, frame_list): """ @@ -66,12 +82,12 @@ def undo_frame_fixed(self, frame_list): self._lens_fixed[i] = copy.deepcopy(self._kwargs_lens_fixed_init[i]) def fix_not_computed(self, free_bands): - """ - fix all the lens models that are part of a imaging band that is not set to be computed. Free those that are - modeled. - #TODO check for overlapping models for more automated fixing of parameters + """Fix all the lens models that are part of a imaging band that is not set to be + computed. Free those that are modeled. #TODO check for overlapping models for + more automated fixing of parameters. - :param free_bands: boolean list of length of the imaging bands, True indicates that the lens model is being fitted for + :param free_bands: boolean list of length of the imaging bands, True indicates + that the lens model is being fitted for :return: None """ undo_frame_list = [] diff --git a/lenstronomy/Workflow/psf_fitting.py b/lenstronomy/Workflow/psf_fitting.py index 4e284b79f..94e33468f 100644 --- a/lenstronomy/Workflow/psf_fitting.py +++ b/lenstronomy/Workflow/psf_fitting.py @@ -8,16 +8,16 @@ import copy from scipy import ndimage -__all__ = ['PsfFitting'] +__all__ = ["PsfFitting"] class PsfFitting(object): - """ - class to find subsequently a better psf - The method make use of a model and subtracts all the non-point source components of the model from the data. - If the model is sufficient, then the data will be a (better) representation of the actual PSF. The method cuts out - those point sources and combines them to update the estimate of the PSF. This is done in an iterative procedure as - the model components of the extended features is PSF-dependent (hopefully not too much). + """Class to find subsequently a better psf The method make use of a model and + subtracts all the non-point source components of the model from the data. If the + model is sufficient, then the data will be a (better) representation of the actual + PSF. The method cuts out those point sources and combines them to update the + estimate of the PSF. This is done in an iterative procedure as the model components + of the extended features is PSF-dependent (hopefully not too much). Various options can be chosen. There is no guarantee that the method works for specific data and models. @@ -52,17 +52,17 @@ def __init__(self, image_model_class): @staticmethod def calc_cornermask(kernelsize, psf_symmetry): - """ - - calculate the completeness numerically when rotational symmetry is imposed. This is the simplest 'mask' which throws away - anywhere the rotations are not fully complete ->e.g. in the corners. This ONLY accounts for information loss in + """Calculate the completeness numerically when rotational symmetry is imposed. + This is the simplest 'mask' which throws away anywhere the rotations are not + fully complete ->e.g. in the corners. This ONLY accounts for information loss in corners, not due e.g. to losses at the edges of the images. :param kernelsize: int, size of kernel array :param psf_symmetry: int, the symmetry being imposed on the data - :return: mask showing where the psf with symmetry n is incomplete due to rotation. + :return: mask showing where the psf with symmetry n is incomplete due to + rotation. """ - angle = 360. / psf_symmetry + angle = 360.0 / psf_symmetry ones_im = np.ones((kernelsize, kernelsize)) corner_norm_array = np.zeros((psf_symmetry, kernelsize, kernelsize)) @@ -74,8 +74,16 @@ def calc_cornermask(kernelsize, psf_symmetry): mask = total_corner_norm < total_corner_norm.max() return mask - def update_iterative(self, kwargs_psf, kwargs_params, num_iter=10, keep_psf_error_map=True, no_break=True, - verbose=True, **kwargs_psf_update): + def update_iterative( + self, + kwargs_psf, + kwargs_params, + num_iter=10, + keep_psf_error_map=True, + no_break=True, + verbose=True, + **kwargs_psf_update + ): """ :param kwargs_psf: keyword arguments to construct the PSF() class @@ -90,33 +98,36 @@ def update_iterative(self, kwargs_psf, kwargs_params, num_iter=10, keep_psf_erro :return: keyword argument of PSF constructor for PSF() class with updated PSF """ self._image_model_class.PointSource.set_save_cache(True) - if 'kernel_point_source_init' not in kwargs_psf: - kernel_point_source_init = copy.deepcopy(kwargs_psf['kernel_point_source']) + if "kernel_point_source_init" not in kwargs_psf: + kernel_point_source_init = copy.deepcopy(kwargs_psf["kernel_point_source"]) else: - kernel_point_source_init = kwargs_psf['kernel_point_source_init'] + kernel_point_source_init = kwargs_psf["kernel_point_source_init"] kwargs_psf_new = copy.deepcopy(kwargs_psf) kwargs_psf_final = copy.deepcopy(kwargs_psf) - if 'psf_error_map' in kwargs_psf: - error_map_final = kwargs_psf['psf_error_map'] + if "psf_error_map" in kwargs_psf: + error_map_final = kwargs_psf["psf_error_map"] else: error_map_final = np.zeros_like(kernel_point_source_init) error_map_init = copy.deepcopy(error_map_final) psf_class = PSF(**kwargs_psf) self._image_model_class.update_psf(psf_class) - logL_before = self._image_model_class.likelihood_data_given_model(**kwargs_params) + logL_before = self._image_model_class.likelihood_data_given_model( + **kwargs_params + ) logL_best = copy.deepcopy(logL_before) i_best = 0 corner_mask = None - if ('corner_symmetry' in kwargs_psf_update.keys()): - if type(kwargs_psf_update['corner_symmetry']) == int: - psf_symmetry = kwargs_psf_update['psf_symmetry'] - kernel_size = len(kwargs_psf['kernel_point_source']) - corner_mask= self.calc_cornermask(kernel_size, psf_symmetry) - + if "corner_symmetry" in kwargs_psf_update.keys(): + if type(kwargs_psf_update["corner_symmetry"]) == int: + psf_symmetry = kwargs_psf_update["psf_symmetry"] + kernel_size = len(kwargs_psf["kernel_point_source"]) + corner_mask = self.calc_cornermask(kernel_size, psf_symmetry) for i in range(num_iter): - kwargs_psf_new, logL_after, error_map = self.update_psf(kwargs_psf_new, kwargs_params, corner_mask, **kwargs_psf_update) + kwargs_psf_new, logL_after, error_map = self.update_psf( + kwargs_psf_new, kwargs_params, corner_mask, **kwargs_psf_update + ) if logL_after > logL_best: kwargs_psf_final = copy.deepcopy(kwargs_psf_new) @@ -126,21 +137,38 @@ def update_iterative(self, kwargs_psf, kwargs_params, num_iter=10, keep_psf_erro else: if not no_break: if verbose: - print("iterative PSF reconstruction makes reconstruction worse in step %s - aborted" % i) + print( + "iterative PSF reconstruction makes reconstruction worse in step %s - aborted" + % i + ) break if verbose is True: print("iteration of step %s gave best reconstruction." % i_best) - print("log likelihood before: %s and log likelihood after: %s" % (logL_before, logL_best)) + print( + "log likelihood before: %s and log likelihood after: %s" + % (logL_before, logL_best) + ) if keep_psf_error_map is True: - kwargs_psf_final['psf_error_map'] = error_map_init + kwargs_psf_final["psf_error_map"] = error_map_init else: - kwargs_psf_final['psf_error_map'] = error_map_final - kwargs_psf_final['kernel_point_source_init'] = kernel_point_source_init + kwargs_psf_final["psf_error_map"] = error_map_final + kwargs_psf_final["kernel_point_source_init"] = kernel_point_source_init return kwargs_psf_final - def update_psf(self, kwargs_psf, kwargs_params, corner_mask = None,stacking_method='median', psf_symmetry=1, - psf_iter_factor=.2, block_center_neighbour=0, error_map_radius=None, - block_center_neighbour_error_map=None, new_procedure=True, corner_symmetry=None): + def update_psf( + self, + kwargs_psf, + kwargs_params, + corner_mask=None, + stacking_method="median", + psf_symmetry=1, + psf_iter_factor=0.2, + block_center_neighbour=0, + error_map_radius=None, + block_center_neighbour_error_map=None, + new_procedure=True, + corner_symmetry=None, + ): """ :param kwargs_psf: keyword arguments to construct the PSF() class @@ -180,94 +208,165 @@ def update_psf(self, kwargs_psf, kwargs_params, corner_mask = None,stacking_meth psf_class = PSF(**kwargs_psf) kwargs_psf_copy = copy.deepcopy(kwargs_psf) - point_source_supersampling_factor = kwargs_psf_copy.get('point_source_supersampling_factor', 1) + point_source_supersampling_factor = kwargs_psf_copy.get( + "point_source_supersampling_factor", 1 + ) - kwargs_psf_new = {'psf_type': 'PIXEL', 'kernel_point_source': kwargs_psf_copy['kernel_point_source'], - 'point_source_supersampling_factor': point_source_supersampling_factor, - 'psf_error_map': kwargs_psf_copy.get('psf_error_map', None)} + kwargs_psf_new = { + "psf_type": "PIXEL", + "kernel_point_source": kwargs_psf_copy["kernel_point_source"], + "point_source_supersampling_factor": point_source_supersampling_factor, + "psf_error_map": kwargs_psf_copy.get("psf_error_map", None), + } # if 'psf_error_map' in kwargs_psf_copy: # kwargs_psf_new['psf_error_map'] = kwargs_psf_copy['psf_error_map'] / 10 self._image_model_class.update_psf(PSF(**kwargs_psf_new)) - model, error_map_image, cov_param, param = self._image_model_class.image_linear_solve(**kwargs_params) - kwargs_ps = kwargs_params.get('kwargs_ps', None) - kwargs_lens = kwargs_params.get('kwargs_lens', None) - ra_image, dec_image, point_amp = self._image_model_class.PointSource.point_source_list(kwargs_ps, kwargs_lens) + ( + model, + error_map_image, + cov_param, + param, + ) = self._image_model_class.image_linear_solve(**kwargs_params) + kwargs_ps = kwargs_params.get("kwargs_ps", None) + kwargs_lens = kwargs_params.get("kwargs_lens", None) + ( + ra_image, + dec_image, + point_amp, + ) = self._image_model_class.PointSource.point_source_list( + kwargs_ps, kwargs_lens + ) x_, y_ = self._image_model_class.Data.map_coord2pix(ra_image, dec_image) kernel_old = psf_class.kernel_point_source kernel_size = len(kernel_old) if not new_procedure: - image_single_point_source_list = self.image_single_point_source(self._image_model_class, kwargs_params) - star_cutout_list = self.point_like_source_cutouts(x_pos=x_, y_pos=y_, - image_list=image_single_point_source_list, - cutout_size=kernel_size) - psf_kernel_list = self.cutout_psf(ra_image, dec_image, x_, y_, image_single_point_source_list, kernel_size, - kernel_old, block_center_neighbour=block_center_neighbour) - - kernel_new = self.combine_psf(psf_kernel_list, kernel_old, factor=psf_iter_factor, - stacking_option=stacking_method, symmetry=psf_symmetry) + image_single_point_source_list = self.image_single_point_source( + self._image_model_class, kwargs_params + ) + star_cutout_list = self.point_like_source_cutouts( + x_pos=x_, + y_pos=y_, + image_list=image_single_point_source_list, + cutout_size=kernel_size, + ) + psf_kernel_list = self.cutout_psf( + ra_image, + dec_image, + x_, + y_, + image_single_point_source_list, + kernel_size, + kernel_old, + block_center_neighbour=block_center_neighbour, + ) + + kernel_new = self.combine_psf( + psf_kernel_list, + kernel_old, + factor=psf_iter_factor, + stacking_option=stacking_method, + symmetry=psf_symmetry, + ) kernel_new = kernel_util.cut_psf(kernel_new, psf_size=kernel_size) - error_map = self.error_map_estimate(kernel_new, star_cutout_list, point_amp, x_, y_, - error_map_radius=error_map_radius, - block_center_neighbour=block_center_neighbour_error_map) + error_map = self.error_map_estimate( + kernel_new, + star_cutout_list, + point_amp, + x_, + y_, + error_map_radius=error_map_radius, + block_center_neighbour=block_center_neighbour_error_map, + ) if point_source_supersampling_factor > 1: # The current version of using a super-sampled PSF in the iterative reconstruction is to first # constrain a down-sampled version and then in a second step perform a super-sampling of it. This is not # optimal and should be changed in the future that the corrections of the super-sampled version is done # rather than constraining a totally new PSF first - kernel_new = kernel_util.subgrid_kernel(kernel_new, subgrid_res=point_source_supersampling_factor, - odd=True, num_iter=10) + kernel_new = kernel_util.subgrid_kernel( + kernel_new, + subgrid_res=point_source_supersampling_factor, + odd=True, + num_iter=10, + ) # chop edges - n_kernel = len(kwargs_psf['kernel_point_source']) + n_kernel = len(kwargs_psf["kernel_point_source"]) kernel_new = kernel_util.cut_psf(kernel_new, psf_size=n_kernel) else: - kernel_old_high_res = psf_class.kernel_point_source_supersampled(supersampling_factor=point_source_supersampling_factor) + kernel_old_high_res = psf_class.kernel_point_source_supersampled( + supersampling_factor=point_source_supersampling_factor + ) kernel_size_high = len(kernel_old_high_res) data = self._image_model_class.Data.data residuals = data - model - psf_kernel_list = self.psf_estimate_individual(ra_image, dec_image, point_amp, residuals, - cutout_size=kernel_size, kernel_guess=kernel_old_high_res, - supersampling_factor=point_source_supersampling_factor, - block_center_neighbour=block_center_neighbour) - - kernel_new = self.combine_psf(psf_kernel_list, kernel_old_high_res, factor=psf_iter_factor, - stacking_option=stacking_method, symmetry=psf_symmetry, - corner_symmetry=corner_symmetry,corner_mask = corner_mask) + psf_kernel_list = self.psf_estimate_individual( + ra_image, + dec_image, + point_amp, + residuals, + cutout_size=kernel_size, + kernel_guess=kernel_old_high_res, + supersampling_factor=point_source_supersampling_factor, + block_center_neighbour=block_center_neighbour, + ) + + kernel_new = self.combine_psf( + psf_kernel_list, + kernel_old_high_res, + factor=psf_iter_factor, + stacking_option=stacking_method, + symmetry=psf_symmetry, + corner_symmetry=corner_symmetry, + corner_mask=corner_mask, + ) kernel_new = kernel_util.cut_psf(kernel_new, psf_size=kernel_size_high) # resize kernel for error_map estimate # kernel_new_low = kernel_util.degrade_kernel(kernel_new, point_source_supersampling_factor) # compute error map on pixel level - error_map = self.error_map_estimate_new(kernel_new, psf_kernel_list, ra_image, dec_image, point_amp, - point_source_supersampling_factor, - error_map_radius=error_map_radius) - - kwargs_psf_new['kernel_point_source'] = kernel_new + error_map = self.error_map_estimate_new( + kernel_new, + psf_kernel_list, + ra_image, + dec_image, + point_amp, + point_source_supersampling_factor, + error_map_radius=error_map_radius, + ) + + kwargs_psf_new["kernel_point_source"] = kernel_new # if 'psf_error_map' in kwargs_psf_new: # kwargs_psf_new['psf_error_map'] *= 10 self._image_model_class.update_psf(PSF(**kwargs_psf_new)) - logL_after = self._image_model_class.likelihood_data_given_model(**kwargs_params) + logL_after = self._image_model_class.likelihood_data_given_model( + **kwargs_params + ) return kwargs_psf_new, logL_after, error_map def image_single_point_source(self, image_model_class, kwargs_params): - """ - return model without including the point source contributions as a list (for each point source individually) + """Return model without including the point source contributions as a list (for + each point source individually) :param image_model_class: ImageModel class instance - :param kwargs_params: keyword arguments of model component keyword argument lists + :param kwargs_params: keyword arguments of model component keyword argument + lists :return: list of images with point source isolated """ # reconstructed model with given psf - model, error_map, cov_param, param = image_model_class.image_linear_solve(**kwargs_params) + model, error_map, cov_param, param = image_model_class.image_linear_solve( + **kwargs_params + ) data = image_model_class.Data.data mask = image_model_class.likelihood_mask - kwargs_ps = kwargs_params.get('kwargs_ps', None) - kwargs_lens = kwargs_params.get('kwargs_lens', None) - point_source_list = self._point_sources_list(image_model_class, kwargs_ps, kwargs_lens) + kwargs_ps = kwargs_params.get("kwargs_ps", None) + kwargs_lens = kwargs_params.get("kwargs_lens", None) + point_source_list = self._point_sources_list( + image_model_class, kwargs_ps, kwargs_lens + ) n = len(point_source_list) model_single_source_list = [] for i in range(n): @@ -283,14 +382,29 @@ def _point_sources_list(image_model_class, kwargs_ps, kwargs_lens, k=None): :return: list of images containing only single point sources """ point_list = [] - ra_array, dec_array, amp_array = image_model_class.PointSource.point_source_list(kwargs_ps, kwargs_lens, k=k) + ( + ra_array, + dec_array, + amp_array, + ) = image_model_class.PointSource.point_source_list(kwargs_ps, kwargs_lens, k=k) for i in range(len(ra_array)): - point_source = image_model_class.ImageNumerics.point_source_rendering([ra_array[i]], [dec_array[i]], - [amp_array[i]]) + point_source = image_model_class.ImageNumerics.point_source_rendering( + [ra_array[i]], [dec_array[i]], [amp_array[i]] + ) point_list.append(point_source) return point_list - def cutout_psf(self, ra_image, dec_image, x, y, image_list, kernel_size, kernel_init, block_center_neighbour=0): + def cutout_psf( + self, + ra_image, + dec_image, + x, + y, + image_list, + kernel_size, + kernel_init, + block_center_neighbour=0, + ): """ :param ra_image: coordinate array of images in angles @@ -313,14 +427,27 @@ def cutout_psf(self, ra_image, dec_image, x, y, image_list, kernel_size, kernel_ kernel_list = [] for l in range(len(x)): - mask_point_source = self.mask_point_source(ra_image, dec_image, ra_grid, dec_grid, radius, i=l) + mask_point_source = self.mask_point_source( + ra_image, dec_image, ra_grid, dec_grid, radius, i=l + ) mask_i = mask * mask_point_source - kernel_deshifted = self.cutout_psf_single(x[l], y[l], image_list[l], mask_i, kernel_size, kernel_init) + kernel_deshifted = self.cutout_psf_single( + x[l], y[l], image_list[l], mask_i, kernel_size, kernel_init + ) kernel_list.append(kernel_deshifted) return kernel_list - def psf_estimate_individual(self, ra_image, dec_image, point_amp, residuals, cutout_size, kernel_guess, - supersampling_factor, block_center_neighbour): + def psf_estimate_individual( + self, + ra_image, + dec_image, + point_amp, + residuals, + cutout_size, + kernel_guess, + supersampling_factor, + block_center_neighbour, + ): """ :param ra_image: list; position in angular units of the image @@ -342,32 +469,44 @@ def psf_estimate_individual(self, ra_image, dec_image, point_amp, residuals, cut kernel_list = [] for l in range(len(ra_image)): - mask_point_source = self.mask_point_source(ra_image, dec_image, ra_grid, dec_grid, radius, i=l) + mask_point_source = self.mask_point_source( + ra_image, dec_image, ra_grid, dec_grid, radius, i=l + ) mask_i = mask * mask_point_source # cutout residuals x_int = int(round(x_[l])) y_int = int(round(y_[l])) - residual_cutout = kernel_util.cutout_source(x_int, y_int, residuals, cutout_size + 2, shift=False) + residual_cutout = kernel_util.cutout_source( + x_int, y_int, residuals, cutout_size + 2, shift=False + ) # cutout the mask - mask_cutout = kernel_util.cutout_source(x_int, y_int, mask_i, cutout_size + 2, shift=False) + mask_cutout = kernel_util.cutout_source( + x_int, y_int, mask_i, cutout_size + 2, shift=False + ) # apply mask residual_cutout_mask = residual_cutout * mask_cutout # re-scale residuals with point source brightness residual_cutout_mask /= point_amp[l] # enlarge residuals by super-sampling factor - residual_cutout_mask = residual_cutout_mask.repeat(supersampling_factor, axis=0).repeat(supersampling_factor, axis=1) + residual_cutout_mask = residual_cutout_mask.repeat( + supersampling_factor, axis=0 + ).repeat(supersampling_factor, axis=1) # inverse shift residuals shift_x = (x_int - x_[l]) * supersampling_factor shift_y = (y_int - y_[l]) * supersampling_factor # for odd number super-sampling if supersampling_factor % 2 == 1: - residuals_shifted = ndimage.shift(residual_cutout_mask, shift=[shift_y, shift_x], order=1) + residuals_shifted = ndimage.shift( + residual_cutout_mask, shift=[shift_y, shift_x], order=1 + ) else: # for even number super-sampling half a super-sampled pixel offset needs to be performed - residuals_shifted = ndimage.shift(residual_cutout_mask, shift=[shift_y - 0.5, shift_x - 0.5], order=1) + residuals_shifted = ndimage.shift( + residual_cutout_mask, shift=[shift_y - 0.5, shift_x - 0.5], order=1 + ) # and the last column and row need to be removed residuals_shifted = residuals_shifted[:-1, :-1] @@ -384,13 +523,12 @@ def psf_estimate_individual(self, ra_image, dec_image, point_amp, residuals, cut @staticmethod def point_like_source_cutouts(x_pos, y_pos, image_list, cutout_size): - """ - cutouts of point-like objects + """Cutouts of point-like objects. :param x_pos: list of image positions in pixel units :param y_pos: list of image position in pixel units - :param image_list: list of 2d numpy arrays with cleaned images, with all contaminating sources removed except - the point-like object to be cut out. + :param image_list: list of 2d numpy arrays with cleaned images, with all + contaminating sources removed except the point-like object to be cut out. :param cutout_size: odd integer, size of cutout. :return: list of cutouts """ @@ -399,7 +537,9 @@ def point_like_source_cutouts(x_pos, y_pos, image_list, cutout_size): for l in range(len(x_pos)): x_int = int(round(x_pos[l])) y_int = int(round(y_pos[l])) - star_cutout = kernel_util.cutout_source(x_int, y_int, image_list[l], cutout_size, shift=False) + star_cutout = kernel_util.cutout_source( + x_int, y_int, image_list[l], cutout_size, shift=False + ) star_cutout_list.append(star_cutout) return star_cutout_list @@ -418,16 +558,22 @@ def cutout_psf_single(x, y, image, mask, kernel_size, kernel_init): # cutout the star x_int = int(round(x)) y_int = int(round(y)) - star_cutout = kernel_util.cutout_source(x_int, y_int, image, kernel_size + 2, shift=False) + star_cutout = kernel_util.cutout_source( + x_int, y_int, image, kernel_size + 2, shift=False + ) # cutout the mask - mask_cutout = kernel_util.cutout_source(x_int, y_int, mask, kernel_size + 2, shift=False) + mask_cutout = kernel_util.cutout_source( + x_int, y_int, mask, kernel_size + 2, shift=False + ) # enlarge the initial PSF kernel to the new cutout size kernel_enlarged = np.zeros((kernel_size + 2, kernel_size + 2)) kernel_enlarged[1:-1, 1:-1] = kernel_init # shift the initial kernel to the shift of the star shift_x = x_int - x shift_y = y_int - y - kernel_shifted = ndimage.shift(kernel_enlarged, shift=[-shift_y, -shift_x], order=1) + kernel_shifted = ndimage.shift( + kernel_enlarged, shift=[-shift_y, -shift_x], order=1 + ) # compute normalization of masked and unmasked region of the shifted kernel # norm_masked = np.sum(kernel_shifted[mask_i == 0]) norm_unmasked = np.sum(kernel_shifted[mask_cutout == 1]) @@ -437,48 +583,57 @@ def cutout_psf_single(x, y, image, mask, kernel_size, kernel_init): star_cutout[mask_cutout == 0] = kernel_shifted[mask_cutout == 0] star_cutout[star_cutout < 0] = 0 # de-shift kernel - kernel_deshifted = kernel_util.de_shift_kernel(star_cutout, shift_x, shift_y, iterations=20, - fractional_step_size=0.1) + kernel_deshifted = kernel_util.de_shift_kernel( + star_cutout, shift_x, shift_y, iterations=20, fractional_step_size=0.1 + ) # re-size kernel kernel_deshifted = image_util.cut_edges(kernel_deshifted, kernel_size) # re-normalize kernel again kernel_deshifted = kernel_util.kernel_norm(kernel_deshifted) return kernel_deshifted - @staticmethod ####Correct this based on Maverick Oh's note about lack of flux conservation. - def combine_psf(kernel_list_new, kernel_old, factor=1., stacking_option='median', symmetry=1, - corner_symmetry=None, corner_mask=None): + def combine_psf( + kernel_list_new, + kernel_old, + factor=1.0, + stacking_option="median", + symmetry=1, + corner_symmetry=None, + corner_mask=None, + ): ## TODO: Account for image edges/masked pixels. - """ - updates psf estimate based on old kernel and several new estimates + """Updates psf estimate based on old kernel and several new estimates. - :param kernel_list_new: list of new PSF kernels estimated from the point sources in the image (un-normalized) + :param kernel_list_new: list of new PSF kernels estimated from the point sources + in the image (un-normalized) :param kernel_old: old PSF kernel - :param factor: weight of updated estimate based on new and old estimate, factor=1 means new estimate, - factor=0 means old estimate + :param factor: weight of updated estimate based on new and old estimate, + factor=1 means new estimate, factor=0 means old estimate :param stacking_option: option of stacking, mean or median :param symmetry: imposed symmetry of PSF estimate - :param corner_symmetry: int, if the imposed symmetry is an odd number, the edges of the reconstructed PSF in its default form will be - clipped at the corners. corner_symmetry - 1) tracks where the residuals are being clipped by the imposed symmetry and then - 2) creates a psf with symmetry=corner symmetry which is either 1 or 360/symm = n*90. (e.g for a symmetry 6 psf you could use symmetry 2 in the corners). - 3) adds the corner_symmetry psf (which has information at the corners) to the odd symmetry PSF, in the regions - where the odd-symmetry PSF does not have complete information. + :param corner_symmetry: int, if the imposed symmetry is an odd number, the edges + of the reconstructed PSF in its default form will be clipped at the corners. + corner_symmetry 1) tracks where the residuals are being clipped by the + imposed symmetry and then 2) creates a psf with symmetry=corner symmetry + which is either 1 or 360/symm = n*90. (e.g for a symmetry 6 psf you could + use symmetry 2 in the corners). 3) adds the corner_symmetry psf (which has + information at the corners) to the odd symmetry PSF, in the regions where + the odd-symmetry PSF does not have complete information. :return: updated PSF estimate """ ## keep_corners is a boolean which tracks whether to calc PSF separately in the corners for odd symmetry rotations. keep_corners = type(corner_symmetry) == int n = int(len(kernel_list_new) * symmetry) - angle = 360. / symmetry + angle = 360.0 / symmetry kernelsize = len(kernel_old) kernel_list = np.zeros((n, kernelsize, kernelsize)) if keep_corners: n_corners = int(len(kernel_list_new * corner_symmetry)) - angle_corner = 360. / corner_symmetry + angle_corner = 360.0 / corner_symmetry corner_kernel_array = np.zeros((n_corners, kernelsize, kernelsize)) i = 0 @@ -494,26 +649,30 @@ def combine_psf(kernel_list_new, kernel_old, factor=1., stacking_option='median' ###do a rotation for the corner part of the data (i.e. if symmetry is 2 or 4). if keep_corners: for j in range(corner_symmetry): - corner_kernel_rotated = image_util.rotateImage(kernel_new, angle_corner * j) + corner_kernel_rotated = image_util.rotateImage( + kernel_new, angle_corner * j + ) corner_kernel_array[m, :, :] = corner_kernel_rotated m += 1 - if stacking_option == 'median': + if stacking_option == "median": ##previous version took the median including the old kernel (extended kernel list with rotated old kernel) # Now remove that and use the weighting later for stabilization kernel_new = np.median(kernel_list, axis=0) if keep_corners: kernel_new_corners = np.median(corner_kernel_array, axis=0) - - elif stacking_option == 'mean': + elif stacking_option == "mean": ##previous version took the mean including the old kernel. Now remove that and use the weighting later for stabilization kernel_new = np.mean(kernel_list, axis=0) if keep_corners: kernel_new_corners = np.mean(corner_kernel_array, axis=0) else: - raise ValueError(" stack_option must be 'median' or 'mean', %s is not supported." % stacking_option) + raise ValueError( + " stack_option must be 'median' or 'mean', %s is not supported." + % stacking_option + ) ###calculate the completeness for the main rotational symmetry--> anywhere this is not 1, only use the 'corners' # kernel future improvement: do a weighted median/mean based on this normalization. @@ -529,13 +688,21 @@ def combine_psf(kernel_list_new, kernel_old, factor=1., stacking_option='median' ###just in case the old kernel is not normalized. Probably want to do this earlier elsewhere, but @simon can let me know if this is necessary here. kernel_old = kernel_util.kernel_norm(kernel_old) kernel_new = kernel_util.kernel_norm(kernel_new) - kernel_return = factor * kernel_new + (1. - factor) * kernel_old + kernel_return = factor * kernel_new + (1.0 - factor) * kernel_old return kernel_return - def error_map_estimate_new(self, psf_kernel, psf_kernel_list, ra_image, dec_image, point_amp, supersampling_factor, - error_map_radius=None): - """ - relative uncertainty in the psf model (in quadrature) per pixel based on residuals achieved in the image + def error_map_estimate_new( + self, + psf_kernel, + psf_kernel_list, + ra_image, + dec_image, + point_amp, + supersampling_factor, + error_map_radius=None, + ): + """Relative uncertainty in the psf model (in quadrature) per pixel based on + residuals achieved in the image. :param psf_kernel: PSF kernel (super-sampled) :param psf_kernel_list: list of individual best PSF kernel estimates @@ -544,25 +711,35 @@ def error_map_estimate_new(self, psf_kernel, psf_kernel_list, ra_image, dec_imag :param point_amp: image amplitude :param supersampling_factor: super-sampling factor :param error_map_radius: radius (in angle) to cut the error map - :return: psf error map such that square of the uncertainty gets boosted by error_map * (psf * amp)**2 + :return: psf error map such that square of the uncertainty gets boosted by + error_map * (psf * amp)**2 """ kernel_low = kernel_util.degrade_kernel(psf_kernel, supersampling_factor) - error_map_list = np.zeros((len(psf_kernel_list), len(kernel_low), len(kernel_low))) + error_map_list = np.zeros( + (len(psf_kernel_list), len(kernel_low), len(kernel_low)) + ) x_pos, y_pos = self._image_model_class.Data.map_coord2pix(ra_image, dec_image) for i, psf_kernel_i in enumerate(psf_kernel_list): - kernel_low_i = kernel_util.degrade_kernel(psf_kernel_i, supersampling_factor) + kernel_low_i = kernel_util.degrade_kernel( + psf_kernel_i, supersampling_factor + ) x, y, amp_i = x_pos[i], y_pos[i], point_amp[i] x_int = int(round(x)) y_int = int(round(y)) - C_D_cutout = kernel_util.cutout_source(x_int, y_int, self._image_model_class.Data.C_D, len(kernel_low_i), - shift=False) + C_D_cutout = kernel_util.cutout_source( + x_int, + y_int, + self._image_model_class.Data.C_D, + len(kernel_low_i), + shift=False, + ) residuals_i = np.abs(kernel_low - kernel_low_i) residuals_i -= np.sqrt(C_D_cutout) / amp_i residuals_i[residuals_i < 0] = 0 - error_map_list[i, :, :] = residuals_i ** 2 + error_map_list[i, :, :] = residuals_i**2 error_map = np.median(error_map_list, axis=0) error_map[kernel_low > 0] /= kernel_low[kernel_low > 0] ** 2 @@ -573,26 +750,41 @@ def error_map_estimate_new(self, psf_kernel, psf_kernel_list, ra_image, dec_imag if error_map_radius is not None: pixel_scale = self._image_model_class.Data.pixel_width x_grid, y_grid = util.make_grid(numPix=len(error_map), deltapix=pixel_scale) - mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x=0, center_y=0, r=error_map_radius) + mask = mask_util.mask_azimuthal( + x_grid, y_grid, center_x=0, center_y=0, r=error_map_radius + ) error_map *= util.array2image(mask) return error_map - def error_map_estimate(self, kernel, star_cutout_list, amp, x_pos, y_pos, error_map_radius=None, - block_center_neighbour=0): - """ - provides a psf_error_map based on the goodness of fit of the given PSF kernel on the point source cutouts, - their estimated amplitudes and positions + def error_map_estimate( + self, + kernel, + star_cutout_list, + amp, + x_pos, + y_pos, + error_map_radius=None, + block_center_neighbour=0, + ): + """Provides a psf_error_map based on the goodness of fit of the given PSF kernel + on the point source cutouts, their estimated amplitudes and positions. :param kernel: PSF kernel - :param star_cutout_list: list of 2d arrays of cutouts of the point sources with all other model components subtracted + :param star_cutout_list: list of 2d arrays of cutouts of the point sources with + all other model components subtracted :param amp: list of amplitudes of the estimated PSF kernel - :param x_pos: pixel position (in original data unit, not in cutout) of the point sources (same order as amp and star cutouts) - :param y_pos: pixel position (in original data unit, not in cutout) of the point sources (same order as amp and star cutouts) - :param error_map_radius: float, radius (in arc seconds) of the outermost error in the PSF estimate (e.g. to avoid double counting of overlapping PSF erros) - :param block_center_neighbour: angle, radius of neighbouring point sources around their centers the estimates - is ignored. Default is zero, meaning a not optimal subtraction of the neighbouring point sources might - contaminate the estimate. - :return: relative uncertainty in the psf model (in quadrature) per pixel based on residuals achieved in the image + :param x_pos: pixel position (in original data unit, not in cutout) of the point + sources (same order as amp and star cutouts) + :param y_pos: pixel position (in original data unit, not in cutout) of the point + sources (same order as amp and star cutouts) + :param error_map_radius: float, radius (in arc seconds) of the outermost error + in the PSF estimate (e.g. to avoid double counting of overlapping PSF erros) + :param block_center_neighbour: angle, radius of neighbouring point sources + around their centers the estimates is ignored. Default is zero, meaning a + not optimal subtraction of the neighbouring point sources might contaminate + the estimate. + :return: relative uncertainty in the psf model (in quadrature) per pixel based + on residuals achieved in the image """ error_map_list = np.zeros((len(star_cutout_list), len(kernel), len(kernel))) mask_list = np.zeros((len(star_cutout_list), len(kernel), len(kernel))) @@ -613,18 +805,22 @@ def error_map_estimate(self, kernel, star_cutout_list, amp, x_pos, y_pos, error_ # compute residuals residual = np.abs(star - model) # subtract background and Poisson noise residuals - C_D_cutout = kernel_util.cutout_source(x_int, y_int, self._image_model_class.Data.C_D, len(star), - shift=False) + C_D_cutout = kernel_util.cutout_source( + x_int, y_int, self._image_model_class.Data.C_D, len(star), shift=False + ) # block neighbor points in error estimate - mask_point_source = self.mask_point_source(x_pos, y_pos, ra_grid, dec_grid, radius=block_center_neighbour, - i=i) + mask_point_source = self.mask_point_source( + x_pos, y_pos, ra_grid, dec_grid, radius=block_center_neighbour, i=i + ) mask_i = mask * mask_point_source - mask_i = kernel_util.cutout_source(x_int, y_int, mask_i, len(star), shift=False) + mask_i = kernel_util.cutout_source( + x_int, y_int, mask_i, len(star), shift=False + ) residual -= np.sqrt(C_D_cutout) residual[residual < 0] = 0 # estimate relative error per star residual /= amp_i - error_map_list[i, :, :] = residual ** 2 * mask_i + error_map_list[i, :, :] = residual**2 * mask_i mask_list[i, :, :] = mask_i # take median absolute error for each pixel # TODO: only for pixels that are not masked @@ -637,7 +833,9 @@ def error_map_estimate(self, kernel, star_cutout_list, amp, x_pos, y_pos, error_ if error_map_radius is not None: pixel_scale = self._image_model_class.Data.pixel_width x_grid, y_grid = util.make_grid(numPix=len(error_map), deltapix=pixel_scale) - mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x=0, center_y=0, r=error_map_radius) + mask = mask_util.mask_azimuthal( + x_grid, y_grid, center_x=0, center_y=0, r=error_map_radius + ) error_map *= util.array2image(mask) return error_map @@ -656,6 +854,8 @@ def mask_point_source(x_pos, y_pos, x_grid, y_grid, radius, i=0): mask = np.ones_like(x_grid) for k in range(len(x_pos)): if k != i: - mask_point = 1 - mask_util.mask_azimuthal(x_grid, y_grid, x_pos[k], y_pos[k], radius) + mask_point = 1 - mask_util.mask_azimuthal( + x_grid, y_grid, x_pos[k], y_pos[k], radius + ) mask *= mask_point return util.array2image(mask) diff --git a/lenstronomy/Workflow/update_manager.py b/lenstronomy/Workflow/update_manager.py index cb89f21ca..dfde84eff 100644 --- a/lenstronomy/Workflow/update_manager.py +++ b/lenstronomy/Workflow/update_manager.py @@ -1,17 +1,19 @@ import copy from lenstronomy.Sampling.parameters import Param -__all__ = ['UpdateManager'] +__all__ = ["UpdateManager"] class UpdateManager(object): - """ - this class manages the parameter constraints as they may evolve through the steps of the modeling. - This includes: keeping certain parameters fixed during one modelling step + """This class manages the parameter constraints as they may evolve through the steps + of the modeling. + This includes: keeping certain parameters fixed during one modelling step """ - def __init__(self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params): + def __init__( + self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params + ): """ :param kwargs_model: keyword arguments to describe all model components used in @@ -34,36 +36,102 @@ def __init__(self, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_p self.kwargs_constraints = kwargs_constraints self.kwargs_likelihood = kwargs_likelihood - if kwargs_model.get('lens_model_list', None) is not None: - self._lens_init, self._lens_sigma, self._lens_fixed, self._lens_lower, self._lens_upper = kwargs_params[ - 'lens_model'] + if kwargs_model.get("lens_model_list", None) is not None: + ( + self._lens_init, + self._lens_sigma, + self._lens_fixed, + self._lens_lower, + self._lens_upper, + ) = kwargs_params["lens_model"] else: - self._lens_init, self._lens_sigma, self._lens_fixed, self._lens_lower, self._lens_upper = [], [], [], [], [] - if kwargs_model.get('source_light_model_list', None) is not None: - self._source_init, self._source_sigma, self._source_fixed, self._source_lower, self._source_upper = \ - kwargs_params['source_model'] + ( + self._lens_init, + self._lens_sigma, + self._lens_fixed, + self._lens_lower, + self._lens_upper, + ) = ([], [], [], [], []) + if kwargs_model.get("source_light_model_list", None) is not None: + ( + self._source_init, + self._source_sigma, + self._source_fixed, + self._source_lower, + self._source_upper, + ) = kwargs_params["source_model"] else: - self._source_init, self._source_sigma, self._source_fixed, self._source_lower, self._source_upper = [], [], [], [], [] - if kwargs_model.get('lens_light_model_list', None) is not None: - self._lens_light_init, self._lens_light_sigma, self._lens_light_fixed, self._lens_light_lower, self._lens_light_upper = \ - kwargs_params['lens_light_model'] + ( + self._source_init, + self._source_sigma, + self._source_fixed, + self._source_lower, + self._source_upper, + ) = ([], [], [], [], []) + if kwargs_model.get("lens_light_model_list", None) is not None: + ( + self._lens_light_init, + self._lens_light_sigma, + self._lens_light_fixed, + self._lens_light_lower, + self._lens_light_upper, + ) = kwargs_params["lens_light_model"] else: - self._lens_light_init, self._lens_light_sigma, self._lens_light_fixed, self._lens_light_lower, self._lens_light_upper = [], [], [], [], [] - if kwargs_model.get('point_source_model_list', None) is not None: - self._ps_init, self._ps_sigma, self._ps_fixed, self._ps_lower, self._ps_upper = kwargs_params[ - 'point_source_model'] + ( + self._lens_light_init, + self._lens_light_sigma, + self._lens_light_fixed, + self._lens_light_lower, + self._lens_light_upper, + ) = ([], [], [], [], []) + if kwargs_model.get("point_source_model_list", None) is not None: + ( + self._ps_init, + self._ps_sigma, + self._ps_fixed, + self._ps_lower, + self._ps_upper, + ) = kwargs_params["point_source_model"] else: - self._ps_init, self._ps_sigma, self._ps_fixed, self._ps_lower, self._ps_upper = [], [], [], [], [] - if kwargs_model.get('optical_depth_model_list', None) is not None: - self._extinction_init, self._extinction_sigma, self._extinction_fixed, self._extinction_lower, self._extinction_upper = kwargs_params[ - 'extinction_model'] + ( + self._ps_init, + self._ps_sigma, + self._ps_fixed, + self._ps_lower, + self._ps_upper, + ) = ([], [], [], [], []) + if kwargs_model.get("optical_depth_model_list", None) is not None: + ( + self._extinction_init, + self._extinction_sigma, + self._extinction_fixed, + self._extinction_lower, + self._extinction_upper, + ) = kwargs_params["extinction_model"] else: - self._extinction_init, self._extinction_sigma, self._extinction_fixed, self._extinction_lower, self._extinction_upper = [], [], [], [], [] - if 'special' in kwargs_params: - self._special_init, self._special_sigma, self._special_fixed, self._special_lower, self._special_upper = \ - kwargs_params['special'] + ( + self._extinction_init, + self._extinction_sigma, + self._extinction_fixed, + self._extinction_lower, + self._extinction_upper, + ) = ([], [], [], [], []) + if "special" in kwargs_params: + ( + self._special_init, + self._special_sigma, + self._special_fixed, + self._special_lower, + self._special_upper, + ) = kwargs_params["special"] else: - self._special_init, self._special_sigma, self._special_fixed, self._special_lower, self._special_upper = {}, {}, {}, {}, {} + ( + self._special_init, + self._special_sigma, + self._special_fixed, + self._special_lower, + self._special_upper, + ) = ({}, {}, {}, {}, {}) self._kwargs_temp = self.init_kwargs @@ -76,9 +144,14 @@ def init_kwargs(self): :return: keyword arguments for all model components of the initial mean model proposition in the sampling """ - return {'kwargs_lens': self._lens_init, 'kwargs_source': self._source_init, - 'kwargs_lens_light': self._lens_light_init, 'kwargs_ps': self._ps_init, - 'kwargs_special': self._special_init, 'kwargs_extinction': self._extinction_init} + return { + "kwargs_lens": self._lens_init, + "kwargs_source": self._source_init, + "kwargs_lens_light": self._lens_light_init, + "kwargs_ps": self._ps_init, + "kwargs_special": self._special_init, + "kwargs_extinction": self._extinction_init, + } @property def sigma_kwargs(self): @@ -86,25 +159,50 @@ def sigma_kwargs(self): :return: keyword arguments for all model components of the initial 1-sigma width proposition in the sampling """ - return {'kwargs_lens': self._lens_sigma, 'kwargs_source': self._source_sigma, - 'kwargs_lens_light': self._lens_light_sigma, 'kwargs_ps': self._ps_sigma, - 'kwargs_special': self._special_sigma, 'kwargs_extinction': self._extinction_sigma} + return { + "kwargs_lens": self._lens_sigma, + "kwargs_source": self._source_sigma, + "kwargs_lens_light": self._lens_light_sigma, + "kwargs_ps": self._ps_sigma, + "kwargs_special": self._special_sigma, + "kwargs_extinction": self._extinction_sigma, + } @property def _lower_kwargs(self): - return self._lens_lower, self._source_lower, self._lens_light_lower, self._ps_lower, self._special_lower, self._extinction_lower + return ( + self._lens_lower, + self._source_lower, + self._lens_light_lower, + self._ps_lower, + self._special_lower, + self._extinction_lower, + ) @property def _upper_kwargs(self): - return self._lens_upper, self._source_upper, self._lens_light_upper, self._ps_upper, self._special_upper, self._extinction_upper + return ( + self._lens_upper, + self._source_upper, + self._lens_light_upper, + self._ps_upper, + self._special_upper, + self._extinction_upper, + ) @property def fixed_kwargs(self): - return self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._special_fixed, self._extinction_fixed + return ( + self._lens_fixed, + self._source_fixed, + self._lens_light_fixed, + self._ps_fixed, + self._special_fixed, + self._extinction_fixed, + ) def set_init_state(self): - """ - set the current state of the parameters to the initial one. + """Set the current state of the parameters to the initial one. :return: """ @@ -119,31 +217,55 @@ def parameter_state(self): return self._kwargs_temp def best_fit(self, bijective=False): - """ - best fit (max likelihood) position for all the model parameters - - :param bijective: boolean, if True, returns the parameters in the argument of the sampling that might deviate - from the convention of the ImSim module. For example, if parameterized in the image position, the parameters - remain in the image plane rather than being mapped to the source plane. - :return: kwargs_result with all the keyword arguments of the best fit for the model components - """ - lens_temp, source_temp, lens_light_temp, ps_temp, special_temp, extinction_temp = self._kwargs_temp['kwargs_lens'], \ - self._kwargs_temp['kwargs_source'], \ - self._kwargs_temp['kwargs_lens_light'], \ - self._kwargs_temp['kwargs_ps'], \ - self._kwargs_temp['kwargs_special'], \ - self._kwargs_temp['kwargs_extinction'] + """Best fit (max likelihood) position for all the model parameters. + + :param bijective: boolean, if True, returns the parameters in the argument of + the sampling that might deviate from the convention of the ImSim module. For + example, if parameterized in the image position, the parameters remain in + the image plane rather than being mapped to the source plane. + :return: kwargs_result with all the keyword arguments of the best fit for the + model components + """ + ( + lens_temp, + source_temp, + lens_light_temp, + ps_temp, + special_temp, + extinction_temp, + ) = ( + self._kwargs_temp["kwargs_lens"], + self._kwargs_temp["kwargs_source"], + self._kwargs_temp["kwargs_lens_light"], + self._kwargs_temp["kwargs_ps"], + self._kwargs_temp["kwargs_special"], + self._kwargs_temp["kwargs_extinction"], + ) if bijective is False: - lens_temp = self.param_class.update_lens_scaling(special_temp, lens_temp, inverse=False) + lens_temp = self.param_class.update_lens_scaling( + special_temp, lens_temp, inverse=False + ) source_temp = self.param_class.image2source_plane(source_temp, lens_temp) - return {'kwargs_lens': lens_temp, 'kwargs_source': source_temp, 'kwargs_lens_light': lens_light_temp, - 'kwargs_ps': ps_temp, 'kwargs_special': special_temp, 'kwargs_extinction': extinction_temp} - - def update_param_state(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_special=None, kwargs_extinction=None): - """ - updates the temporary state of the parameters being saved. ATTENTION: Any previous knowledge gets lost if you - call this function + return { + "kwargs_lens": lens_temp, + "kwargs_source": source_temp, + "kwargs_lens_light": lens_light_temp, + "kwargs_ps": ps_temp, + "kwargs_special": special_temp, + "kwargs_extinction": extinction_temp, + } + + def update_param_state( + self, + kwargs_lens=None, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): + """Updates the temporary state of the parameters being saved. ATTENTION: Any + previous knowledge gets lost if you call this function. :param kwargs_lens: :param kwargs_source: @@ -153,14 +275,18 @@ def update_param_state(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_l :param kwargs_extinction: :return: """ - self._kwargs_temp = {'kwargs_lens': kwargs_lens, 'kwargs_source': kwargs_source, - 'kwargs_lens_light': kwargs_lens_light, 'kwargs_ps': kwargs_ps, - 'kwargs_special': kwargs_special, 'kwargs_extinction': kwargs_extinction} + self._kwargs_temp = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + "kwargs_ps": kwargs_ps, + "kwargs_special": kwargs_special, + "kwargs_extinction": kwargs_extinction, + } self.update_kwargs_model(kwargs_special) def update_param_value(self, lens=None, source=None, lens_light=None, ps=None): - """ - Set a model parameter to a specific value. + """Set a model parameter to a specific value. :param lens: [[i_model, ['param1', 'param2',...], [...]] :param source: [[i_model, ['param1', 'param2',...], [...]] @@ -176,8 +302,10 @@ def update_param_value(self, lens=None, source=None, lens_light=None, ps=None): lens_light = [] if ps is None: ps = [] - for items, kwargs_key in zip([lens, source, lens_light, ps], - ['kwargs_lens', 'kwargs_source', 'kwargs_lens_light', 'kwargs_ps']): + for items, kwargs_key in zip( + [lens, source, lens_light, ps], + ["kwargs_lens", "kwargs_source", "kwargs_lens_light", "kwargs_ps"], + ): for item in items: index = item[0] keys = item[1] @@ -188,39 +316,77 @@ def update_param_value(self, lens=None, source=None, lens_light=None, ps=None): @property def param_class(self): - """ - creating instance of lenstronomy Param() class. It uses the keyword arguments in self.kwargs_constraints as - __init__() arguments, as well as self.kwargs_model, and the set of kwargs_fixed___, kwargs_lower___, - kwargs_upper___ arguments for lens, lens_light, source, point source, extinction and special parameters. + """Creating instance of lenstronomy Param() class. It uses the keyword arguments + in self.kwargs_constraints as __init__() arguments, as well as + self.kwargs_model, and the set of kwargs_fixed___, kwargs_lower___, + kwargs_upper___ arguments for lens, lens_light, source, point source, extinction + and special parameters. :return: instance of the Param class with the recent options and bounds """ - kwargs_fixed_lens, kwargs_fixed_source, kwargs_fixed_lens_light, kwargs_fixed_ps, kwargs_fixed_special, kwargs_fixed_extinction = self.fixed_kwargs - kwargs_lower_lens, kwargs_lower_source, kwargs_lower_lens_light, kwargs_lower_ps, kwargs_lower_special, kwargs_lower_extinction = self._lower_kwargs - kwargs_upper_lens, kwargs_upper_source, kwargs_upper_lens_light, kwargs_upper_ps, kwargs_upper_special, kwargs_upper_extinction = self._upper_kwargs + ( + kwargs_fixed_lens, + kwargs_fixed_source, + kwargs_fixed_lens_light, + kwargs_fixed_ps, + kwargs_fixed_special, + kwargs_fixed_extinction, + ) = self.fixed_kwargs + ( + kwargs_lower_lens, + kwargs_lower_source, + kwargs_lower_lens_light, + kwargs_lower_ps, + kwargs_lower_special, + kwargs_lower_extinction, + ) = self._lower_kwargs + ( + kwargs_upper_lens, + kwargs_upper_source, + kwargs_upper_lens_light, + kwargs_upper_ps, + kwargs_upper_special, + kwargs_upper_extinction, + ) = self._upper_kwargs kwargs_model = self.kwargs_model kwargs_constraints = self.kwargs_constraints - lens_temp = self._kwargs_temp['kwargs_lens'] - param_class = Param(kwargs_model, kwargs_fixed_lens, kwargs_fixed_source, - kwargs_fixed_lens_light, kwargs_fixed_ps, kwargs_fixed_special, kwargs_fixed_extinction, - kwargs_lower_lens, kwargs_lower_source, kwargs_lower_lens_light, kwargs_lower_ps, - kwargs_lower_special, kwargs_lower_extinction, - kwargs_upper_lens, kwargs_upper_source, kwargs_upper_lens_light, kwargs_upper_ps, - kwargs_upper_special, kwargs_upper_extinction, - kwargs_lens_init=lens_temp, **kwargs_constraints) + lens_temp = self._kwargs_temp["kwargs_lens"] + param_class = Param( + kwargs_model, + kwargs_fixed_lens, + kwargs_fixed_source, + kwargs_fixed_lens_light, + kwargs_fixed_ps, + kwargs_fixed_special, + kwargs_fixed_extinction, + kwargs_lower_lens, + kwargs_lower_source, + kwargs_lower_lens_light, + kwargs_lower_ps, + kwargs_lower_special, + kwargs_lower_extinction, + kwargs_upper_lens, + kwargs_upper_source, + kwargs_upper_lens_light, + kwargs_upper_ps, + kwargs_upper_special, + kwargs_upper_extinction, + kwargs_lens_init=lens_temp, + **kwargs_constraints + ) return param_class def update_kwargs_model(self, kwargs_special): - """ - Update the kwargs_model with the new kwargs_special - """ + """Update the kwargs_model with the new kwargs_special.""" kwargs_model, update_bool = self.param_class.update_kwargs_model(kwargs_special) if update_bool: self.kwargs_model = kwargs_model return kwargs_model - def update_options(self, kwargs_model=None, kwargs_constraints=None, kwargs_likelihood=None): + def update_options( + self, kwargs_model=None, kwargs_constraints=None, kwargs_likelihood=None + ): """ updates the options by overwriting the kwargs with the new ones being added/changed WARNING: some updates may not be valid depending on the model options. Use carefully! @@ -240,12 +406,20 @@ def update_options(self, kwargs_model=None, kwargs_constraints=None, kwargs_like kwargs_model_updated = self.kwargs_model.update(kwargs_model) kwargs_constraints_updated = self.kwargs_constraints.update(kwargs_constraints) kwargs_likelihood_updated = self.kwargs_likelihood.update(kwargs_likelihood) - return kwargs_model_updated, kwargs_constraints_updated, kwargs_likelihood_updated - - def update_limits(self, change_source_lower_limit=None, change_source_upper_limit=None, - change_lens_lower_limit=None, change_lens_upper_limit=None,): - """ - updates the limits (lower and upper) of the update manager instance + return ( + kwargs_model_updated, + kwargs_constraints_updated, + kwargs_likelihood_updated, + ) + + def update_limits( + self, + change_source_lower_limit=None, + change_source_upper_limit=None, + change_lens_lower_limit=None, + change_lens_upper_limit=None, + ): + """Updates the limits (lower and upper) of the update manager instance. :param change_source_lower_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]] :param change_lens_lower_limit: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]] @@ -254,17 +428,30 @@ def update_limits(self, change_source_lower_limit=None, change_source_upper_limi :return: updates internal state of lower and upper limits accessible from outside """ if change_source_lower_limit is not None: - self._source_lower = self._update_kwargs_list(change_source_lower_limit, self._source_lower) + self._source_lower = self._update_kwargs_list( + change_source_lower_limit, self._source_lower + ) if change_source_upper_limit is not None: - self._source_upper = self._update_kwargs_list(change_source_upper_limit, self._source_upper) + self._source_upper = self._update_kwargs_list( + change_source_upper_limit, self._source_upper + ) if change_lens_lower_limit is not None: - self._lens_lower = self._update_kwargs_list(change_lens_lower_limit, self._lens_lower) + self._lens_lower = self._update_kwargs_list( + change_lens_lower_limit, self._lens_lower + ) if change_lens_upper_limit is not None: - self._lens_upper = self._update_kwargs_list(change_lens_upper_limit, self._lens_upper) - - def update_sigmas(self, change_sigma_lens=None, change_sigma_source=None, change_sigma_lens_light=None): - """ - updates individual estimated uncertainty levels for the initialization of search and sampling algorithms + self._lens_upper = self._update_kwargs_list( + change_lens_upper_limit, self._lens_upper + ) + + def update_sigmas( + self, + change_sigma_lens=None, + change_sigma_source=None, + change_sigma_lens_light=None, + ): + """Updates individual estimated uncertainty levels for the initialization of + search and sampling algorithms. :param change_sigma_lens: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]] :param change_sigma_source: [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ...]]] @@ -272,11 +459,17 @@ def update_sigmas(self, change_sigma_lens=None, change_sigma_source=None, change :return: updated internal state of the spread to initialize samplers """ if change_sigma_lens is not None: - self._lens_sigma = self._update_kwargs_list(change_sigma_lens, self._lens_sigma) + self._lens_sigma = self._update_kwargs_list( + change_sigma_lens, self._lens_sigma + ) if change_sigma_source is not None: - self._source_sigma = self._update_kwargs_list(change_sigma_source, self._source_sigma) + self._source_sigma = self._update_kwargs_list( + change_sigma_source, self._source_sigma + ) if change_sigma_lens_light is not None: - self._lens_light_sigma = self._update_kwargs_list(change_sigma_lens_light, self._lens_light_sigma) + self._lens_light_sigma = self._update_kwargs_list( + change_sigma_lens_light, self._lens_light_sigma + ) @staticmethod def _update_kwargs_list(change_list, kwargs_list_previous): @@ -296,13 +489,23 @@ def _update_kwargs_list(change_list, kwargs_list_previous): kwargs_limit_updated[i_model][param_name] = values[j] return kwargs_limit_updated - def update_fixed(self, lens_add_fixed=None, source_add_fixed=None, lens_light_add_fixed=None, ps_add_fixed=None, - special_add_fixed=None, lens_remove_fixed=None, source_remove_fixed=None, - lens_light_remove_fixed=None, ps_remove_fixed=None, special_remove_fixed=None): - """ - adds or removes the values of the keyword arguments that are stated in the _add_fixed to the existing fixed - arguments. convention for input arguments are: - [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ... (optional)], [], ...] + def update_fixed( + self, + lens_add_fixed=None, + source_add_fixed=None, + lens_light_add_fixed=None, + ps_add_fixed=None, + special_add_fixed=None, + lens_remove_fixed=None, + source_remove_fixed=None, + lens_light_remove_fixed=None, + ps_remove_fixed=None, + special_remove_fixed=None, + ): + """Adds or removes the values of the keyword arguments that are stated in the + _add_fixed to the existing fixed arguments. convention for input arguments are: + [[i_model, ['param_name1', 'param_name2', ...], [value1, value2, ... + (optional)], [], ...] :param lens_add_fixed: added fixed parameter in lens model :param source_add_fixed: added fixed parameter in source model @@ -316,16 +519,26 @@ def update_fixed(self, lens_add_fixed=None, source_add_fixed=None, lens_light_ad :param special_remove_fixed: remove fixed parameter in special model :return: updated kwargs fixed """ - lens_fixed = self._add_fixed(self._kwargs_temp['kwargs_lens'], self._lens_fixed, lens_add_fixed) + lens_fixed = self._add_fixed( + self._kwargs_temp["kwargs_lens"], self._lens_fixed, lens_add_fixed + ) lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed) - source_fixed = self._add_fixed(self._kwargs_temp['kwargs_source'], self._source_fixed, source_add_fixed) + source_fixed = self._add_fixed( + self._kwargs_temp["kwargs_source"], self._source_fixed, source_add_fixed + ) source_fixed = self._remove_fixed(source_fixed, source_remove_fixed) - lens_light_fixed = self._add_fixed(self._kwargs_temp['kwargs_lens_light'], self._lens_light_fixed, lens_light_add_fixed) + lens_light_fixed = self._add_fixed( + self._kwargs_temp["kwargs_lens_light"], + self._lens_light_fixed, + lens_light_add_fixed, + ) lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed) - ps_fixed = self._add_fixed(self._kwargs_temp['kwargs_ps'], self._ps_fixed, ps_add_fixed) + ps_fixed = self._add_fixed( + self._kwargs_temp["kwargs_ps"], self._ps_fixed, ps_add_fixed + ) ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed) special_fixed = copy.deepcopy(self._special_fixed) - special_temp = self._kwargs_temp['kwargs_special'] + special_temp = self._kwargs_temp["kwargs_special"] if special_add_fixed is None: special_add_fixed = [] for param_name in special_add_fixed: @@ -336,7 +549,13 @@ def update_fixed(self, lens_add_fixed=None, source_add_fixed=None, lens_light_ad for param_name in special_remove_fixed: if param_name in special_fixed: del special_fixed[param_name] - self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._special_fixed = lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed + ( + self._lens_fixed, + self._source_fixed, + self._lens_light_fixed, + self._ps_fixed, + self._special_fixed, + ) = (lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed) @staticmethod def _add_fixed(kwargs_model, kwargs_fixed, add_fixed): @@ -360,7 +579,9 @@ def _add_fixed(kwargs_model, kwargs_fixed, add_fixed): values = [None] * len(fix_names) for j, param_name in enumerate(fix_names): if values[j] is None: - kwargs_fixed[i_model][param_name] = kwargs_model[i_model][param_name] # add fixed list + kwargs_fixed[i_model][param_name] = kwargs_model[i_model][ + param_name + ] # add fixed list else: kwargs_fixed[i_model][param_name] = values[j] return kwargs_fixed @@ -380,14 +601,16 @@ def _remove_fixed(kwargs_fixed, remove_fixed): i_model = remove_fixed[i][0] fix_names = remove_fixed[i][1] for param_name in fix_names: - if param_name in kwargs_fixed[i_model]: # if the parameter already is in the fixed list, do not change it + if ( + param_name in kwargs_fixed[i_model] + ): # if the parameter already is in the fixed list, do not change it del kwargs_fixed[i_model][param_name] return kwargs_fixed def fix_image_parameters(self, image_index=0): - """ - fixes all parameters that are only assigned to a specific image. This allows to sample only parameters that - constraint by the fitting of a sub-set of the images. + """Fixes all parameters that are only assigned to a specific image. This allows + to sample only parameters that constraint by the fitting of a sub-set of the + images. :param image_index: index :return: None diff --git a/lenstronomy/__init__.py b/lenstronomy/__init__.py index 1eab08967..acda5331b 100644 --- a/lenstronomy/__init__.py +++ b/lenstronomy/__init__.py @@ -1,6 +1,6 @@ -__author__ = 'lenstronomy developers' -__email__ = 'lenstronomy-dev@googlegroups.com' -__version__ = '1.11.2' -__credits__ = 'ETH Zurich, UCLA, Stanford, Stony Brook' +__author__ = "lenstronomy developers" +__email__ = "lenstronomy-dev@googlegroups.com" +__version__ = "1.11.2" +__credits__ = "ETH Zurich, UCLA, Stanford, Stony Brook" from .Util.package_util import short, laconic diff --git a/setup.py b/setup.py index 6064393a9..4758a2cf6 100644 --- a/setup.py +++ b/setup.py @@ -11,8 +11,8 @@ from distutils.core import setup -if sys.argv[-1] == 'publish': - os.system('python setup.py sdist upload') +if sys.argv[-1] == "publish": + os.system("python setup.py sdist upload") sys.exit() @@ -24,61 +24,72 @@ def finalize_options(self): def run_tests(self): import pytest + errno = pytest.main(self.test_args) sys.exit(errno) -readme = open('README.rst').read() +readme = open("README.rst").read() doclink = """ Documentation ------------- The full documentation can be generated with Sphinx""" -history = open('HISTORY.rst').read().replace('.. :changelog:', '') +history = open("HISTORY.rst").read().replace(".. :changelog:", "") desc = open("README.rst").read() -requires = ['numpy>=1.13', - 'scipy>=0.19.1', - 'configparser', - 'astropy', - 'mpmath', - 'matplotlib', - 'scikit-learn', - 'numba>=0.43.1', - 'corner>=2.2.1', - 'scikit-image', - 'pyyaml', - 'h5py', - 'pyxdg', - 'schwimmbad', - 'multiprocess>=0.70.8', - ] -tests_require = ['pytest>=2.3', "mock", 'colossus==1.3.0', 'slitronomy==0.3.2', - 'emcee>=3.0.0', 'dynesty', 'nestcheck', 'pymultinest', 'zeus-mcmc>=2.4.0', - 'nautilus-sampler>=0.2.1', 'coolest', - ] +requires = [ + "numpy>=1.13", + "scipy>=0.19.1", + "configparser", + "astropy", + "mpmath", + "matplotlib", + "scikit-learn", + "numba>=0.43.1", + "corner>=2.2.1", + "scikit-image", + "pyyaml", + "h5py", + "pyxdg", + "schwimmbad", + "multiprocess>=0.70.8", +] +tests_require = [ + "pytest>=2.3", + "mock", + "colossus==1.3.0", + "slitronomy==0.3.2", + "emcee>=3.0.0", + "dynesty", + "nestcheck", + "pymultinest", + "zeus-mcmc>=2.4.0", + "nautilus-sampler>=0.2.1", + "coolest", +] PACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir)) setup( - name='lenstronomy', - version='1.11.2', - description='Strong lens modeling package.', + name="lenstronomy", + version="1.11.2", + description="Strong lens modeling package.", long_description=desc, - author='lenstronomy developers', - author_email='lenstronomy-dev@googlegroups.com', - url='https://github.com/lenstronomy/lenstronomy', - download_url='https://github.com/lenstronomy/lenstronomy/archive/1.11.2.tar.gz', + author="lenstronomy developers", + author_email="lenstronomy-dev@googlegroups.com", + url="https://github.com/lenstronomy/lenstronomy", + download_url="https://github.com/lenstronomy/lenstronomy/archive/1.11.2.tar.gz", packages=find_packages(PACKAGE_PATH, "test"), - package_dir={'lenstronomy': 'lenstronomy'}, + package_dir={"lenstronomy": "lenstronomy"}, include_package_data=True, # setup_requires=requires, install_requires=requires, - license='BSD-3', + license="BSD-3", zip_safe=False, - keywords='lenstronomy', + keywords="lenstronomy", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", @@ -89,5 +100,5 @@ def run_tests(self): "Programming Language :: Python :: 3.9", ], tests_require=tests_require, - cmdclass={'test': PyTest}, # 'build_ext':build_ext, + cmdclass={"test": PyTest}, # 'build_ext':build_ext, ) diff --git a/test/__init__.py b/test/__init__.py index 2190afc66..a16c4aeae 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1,4 +1,6 @@ import os -IMAGE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'Test_data','RXJ1131_1231_test.fits') +IMAGE_DATA_PATH = os.path.join( + os.path.dirname(__file__), "Test_data", "RXJ1131_1231_test.fits" +) diff --git a/test/test_Analysis/test_image_reconstruction.py b/test/test_Analysis/test_image_reconstruction.py index 8f27a882b..b1ebd9ecf 100644 --- a/test/test_Analysis/test_image_reconstruction.py +++ b/test/test_Analysis/test_image_reconstruction.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -17,11 +17,9 @@ class TestMultiBandImageReconstruction(object): - """ - test the fitting sequences - """ - def setup_method(self): + """Test the fitting sequences.""" + def setup_method(self): # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -31,89 +29,155 @@ def setup_method(self): # PSF specification - self.kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + self.kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**self.kwargs_data) - self.kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + self.kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**self.kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) e1, e2 = param_util.phi_q2_ellipticity(0.2, 0.8) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} - - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) self.LensModel = lens_model_class # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_list = ['SOURCE_POSITION'] - point_source_class = PointSource(point_source_type_list=point_source_list, fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 1} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps, no_noise=True) + self.kwargs_ps = [ + {"ra_source": 0.0, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_list = ["SOURCE_POSITION"] + point_source_class = PointSource( + point_source_type_list=point_source_list, fixed_magnification_list=[True] + ) + kwargs_numerics = {"supersampling_factor": 1} + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + no_noise=True, + ) data_class.update_data(image_sim) - self.kwargs_data['image_data'] = image_sim - self.kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': point_source_list, - 'fixed_magnification_list': [False], - } + self.kwargs_data["image_data"] = image_sim + self.kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": point_source_list, + "fixed_magnification_list": [False], + } self.kwargs_numerics = kwargs_numerics self.data_class = ImageData(**self.kwargs_data) - self.kwargs_params = {'kwargs_lens': self.kwargs_lens, 'kwargs_source': self.kwargs_source, 'kwargs_lens_light': self.kwargs_lens_light, - 'kwargs_ps': self.kwargs_ps} + self.kwargs_params = { + "kwargs_lens": self.kwargs_lens, + "kwargs_source": self.kwargs_source, + "kwargs_lens_light": self.kwargs_lens_light, + "kwargs_ps": self.kwargs_ps, + } def test_band_setup(self): multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - multi_band = MultiBandImageReconstruction(multi_band_list, self.kwargs_model, self.kwargs_params, - multi_band_type='single-band') - - multi_band = MultiBandImageReconstruction(multi_band_list, self.kwargs_model, self.kwargs_params, - multi_band_type='joint-linear') + multi_band = MultiBandImageReconstruction( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + multi_band_type="single-band", + ) + + multi_band = MultiBandImageReconstruction( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + multi_band_type="joint-linear", + ) image_model, kwargs_params = multi_band.band_setup(band_index=0) model = image_model.image(**kwargs_params) - npt.assert_almost_equal(model, self.kwargs_data['image_data'], decimal=5) + npt.assert_almost_equal(model, self.kwargs_data["image_data"], decimal=5) npt.assert_almost_equal(model, multi_band.model_band_list[0].model, decimal=5) - npt.assert_almost_equal(multi_band.model_band_list[0].norm_residuals, 0, decimal=5) + npt.assert_almost_equal( + multi_band.model_band_list[0].norm_residuals, 0, decimal=5 + ) def test_bands_compute(self): - multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]]*2 - multi_band = MultiBandImageReconstruction(multi_band_list, self.kwargs_model, self.kwargs_params, - kwargs_likelihood={'bands_compute': [False, True]}, - multi_band_type='multi-linear') + multi_band_list = [ + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] + ] * 2 + multi_band = MultiBandImageReconstruction( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + kwargs_likelihood={"bands_compute": [False, True]}, + multi_band_type="multi-linear", + ) image_model, kwargs_params = multi_band.band_setup(band_index=1) model = image_model.image(**kwargs_params) - npt.assert_almost_equal(model, self.kwargs_data['image_data'], decimal=5) + npt.assert_almost_equal(model, self.kwargs_data["image_data"], decimal=5) def test_not_verbose(self): multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - multi_band = MultiBandImageReconstruction(multi_band_list, self.kwargs_model, self.kwargs_params, - verbose=False) + multi_band = MultiBandImageReconstruction( + multi_band_list, self.kwargs_model, self.kwargs_params, verbose=False + ) def test_check_solver_error(self): - bool = check_solver_error(image=np.array([0, 0])) assert bool @@ -122,28 +186,36 @@ def test_check_solver_error(self): class TestRaises(unittest.TestCase): - def test_no_band(self): - """ - test raise statements if band is not evaluated - - """ + """Test raise statements if band is not evaluated.""" sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 10 # cutout pixel size deltaPix = 0.5 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.5 # full width half max of PSF - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) - kwargs_data['image_data'] = np.ones((numPix, numPix)) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) + kwargs_data["image_data"] = np.ones((numPix, numPix)) + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } kwargs_numerics = {} multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics]] - multi_band = MultiBandImageReconstruction(multi_band_list, {}, {}, - multi_band_type='single-band', kwargs_likelihood={'bands_compute': [False]}) + multi_band = MultiBandImageReconstruction( + multi_band_list, + {}, + {}, + multi_band_type="single-band", + kwargs_likelihood={"bands_compute": [False]}, + ) with self.assertRaises(ValueError): multi_band.band_setup(band_index=0) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_kinematics_api.py b/test/test_Analysis/test_kinematics_api.py index bd6d7f43b..855314dc7 100644 --- a/test/test_Analysis/test_kinematics_api.py +++ b/test/test_Analysis/test_kinematics_api.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -10,77 +10,160 @@ class TestKinematicsAPI(object): - def setup_method(self): pass def test_velocity_dispersion(self): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_model_list': ['SPEP', 'SHEAR', 'SIS', 'SIS', 'SIS'], - 'lens_light_model_list': ['SERSIC_ELLIPSE', 'SERSIC']} + kwargs_model = { + "lens_model_list": ["SPEP", "SHEAR", "SIS", "SIS", "SIS"], + "lens_light_model_list": ["SERSIC_ELLIPSE", "SERSIC"], + } theta_E = 1.5 gamma = 1.8 - kwargs_lens = [{'theta_E': theta_E, 'e1': 0, 'center_x': -0.044798916793300093, 'center_y': 0.0054408937891703788, 'e2': 0, 'gamma': gamma}, - {'e1': -0.050871696555354479, 'e2': -0.0061601733920590464}, {'center_y': 2.79985456, 'center_x': -2.32019894, - 'theta_E': 0.28165274714097904}, {'center_y': 3.83985426, - 'center_x': -2.32019933, 'theta_E': 0.0038110812674654873}, - {'center_y': 4.31985428, 'center_x': -1.68019931, 'theta_E': 0.45552039839735037}] + kwargs_lens = [ + { + "theta_E": theta_E, + "e1": 0, + "center_x": -0.044798916793300093, + "center_y": 0.0054408937891703788, + "e2": 0, + "gamma": gamma, + }, + {"e1": -0.050871696555354479, "e2": -0.0061601733920590464}, + { + "center_y": 2.79985456, + "center_x": -2.32019894, + "theta_E": 0.28165274714097904, + }, + { + "center_y": 3.83985426, + "center_x": -2.32019933, + "theta_E": 0.0038110812674654873, + }, + { + "center_y": 4.31985428, + "center_x": -1.68019931, + "theta_E": 0.45552039839735037, + }, + ] phi, q = -0.52624727893702705, 0.79703498156919605 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_lens_light = [{'n_sersic': 1.1212528655709217, - 'center_x': -0.019674496231393473, - 'e1': e1, 'e2': e2, 'amp': 1.1091367792010356, 'center_y': 0.076914975081560991, - 'R_sersic': 0.42691611878867058}, - {'R_sersic': 0.03025682660635394, 'amp': 139.96763298885992, 'n_sersic': 1.90000008624093865, - 'center_x': -0.019674496231393473, 'center_y': 0.076914975081560991}] + kwargs_lens_light = [ + { + "n_sersic": 1.1212528655709217, + "center_x": -0.019674496231393473, + "e1": e1, + "e2": e2, + "amp": 1.1091367792010356, + "center_y": 0.076914975081560991, + "R_sersic": 0.42691611878867058, + }, + { + "R_sersic": 0.03025682660635394, + "amp": 139.96763298885992, + "n_sersic": 1.90000008624093865, + "center_x": -0.019674496231393473, + "center_y": 0.076914975081560991, + }, + ] r_ani = 0.62 - kwargs_anisotropy = {'r_ani': r_ani} + kwargs_anisotropy = {"r_ani": r_ani} R_slit = 3.8 - dR_slit = 1. - aperture_type = 'slit' - kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit, 'angle': 0, 'center_dec': 0} + dR_slit = 1.0 + aperture_type = "slit" + kwargs_aperture = { + "aperture_type": aperture_type, + "center_ra": 0, + "width": dR_slit, + "length": R_slit, + "angle": 0, + "center_dec": 0, + } psf_fwhm = 0.7 - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - anisotropy_model = 'OM' - kwargs_mge = {'n_comp': 20} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + anisotropy_model = "OM" + kwargs_mge = {"n_comp": 20} r_eff = 0.211919902322 - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_seeing=kwargs_psf, - lens_model_kinematics_bool=[True, False, False, False, False], anisotropy_model=anisotropy_model, - kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000, - MGE_light=True) - - v_sigma = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff) - - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture, - kwargs_seeing=kwargs_psf, lens_model_kinematics_bool=[True, False, False, False, False], - anisotropy_model=anisotropy_model, - kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000, - MGE_light=True, MGE_mass=True) - v_sigma_mge_lens = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E) - #v_sigma_mge_lens = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, kwargs_aperture, + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_psf, + lens_model_kinematics_bool=[True, False, False, False, False], + anisotropy_model=anisotropy_model, + kwargs_mge_light=kwargs_mge, + kwargs_mge_mass=kwargs_mge, + sampling_number=1000, + MGE_light=True, + ) + + v_sigma = kinematicAPI.velocity_dispersion( + kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff + ) + + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_psf, + lens_model_kinematics_bool=[True, False, False, False, False], + anisotropy_model=anisotropy_model, + kwargs_mge_light=kwargs_mge, + kwargs_mge_mass=kwargs_mge, + sampling_number=1000, + MGE_light=True, + MGE_mass=True, + ) + v_sigma_mge_lens = kinematicAPI.velocity_dispersion( + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + ) + # v_sigma_mge_lens = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, kwargs_aperture, # kwargs_psf, anisotropy_model, MGE_light=True, MGE_mass=True, theta_E=theta_E, # kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, # r_eff=r_eff) - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture, - kwargs_seeing=kwargs_psf, - lens_model_kinematics_bool=[True, False, False, False, False], - anisotropy_model=anisotropy_model, - kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000, - MGE_light=False, MGE_mass=False, Hernquist_approx=True) - v_sigma_hernquist = kinematicAPI.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, - r_eff=r_eff, theta_E=theta_E) - #v_sigma_hernquist = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_psf, + lens_model_kinematics_bool=[True, False, False, False, False], + anisotropy_model=anisotropy_model, + kwargs_mge_light=kwargs_mge, + kwargs_mge_mass=kwargs_mge, + sampling_number=1000, + MGE_light=False, + MGE_mass=False, + Hernquist_approx=True, + ) + v_sigma_hernquist = kinematicAPI.velocity_dispersion( + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + ) + # v_sigma_hernquist = kinematicAPI.velocity_dispersion_numerical(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, # kwargs_aperture, kwargs_psf, anisotropy_model, # MGE_light=False, MGE_mass=False, # r_eff=r_eff, Hernquist_approx=True) - vel_disp_temp = kinematicAPI.velocity_dispersion_analytical(theta_E, gamma, r_ani=r_ani, r_eff=r_eff) + vel_disp_temp = kinematicAPI.velocity_dispersion_analytical( + theta_E, gamma, r_ani=r_ani, r_eff=r_eff + ) print(v_sigma, vel_disp_temp) - #assert 1 == 0 + # assert 1 == 0 npt.assert_almost_equal(v_sigma / vel_disp_temp, 1, decimal=1) npt.assert_almost_equal(v_sigma_mge_lens / v_sigma, 1, decimal=1) npt.assert_almost_equal(v_sigma / v_sigma_hernquist, 1, decimal=1) @@ -88,256 +171,561 @@ def test_velocity_dispersion(self): def test_galkin_settings(self): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_model_list': ['SIS'], - 'lens_light_model_list': ['HERNQUIST']} + kwargs_model = { + "lens_model_list": ["SIS"], + "lens_light_model_list": ["HERNQUIST"], + } - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kwargs_lens_light = [{'amp': 1, 'Rs': 1, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kwargs_lens_light = [{"amp": 1, "Rs": 1, "center_x": 0, "center_y": 0}] r_ani = 0.62 - kwargs_anisotropy = {'r_ani': r_ani} + kwargs_anisotropy = {"r_ani": r_ani} R_slit = 3.8 - dR_slit = 1. - aperture_type = 'slit' - kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit, - 'angle': 0, 'center_dec': 0} + dR_slit = 1.0 + aperture_type = "slit" + kwargs_aperture = { + "aperture_type": aperture_type, + "center_ra": 0, + "width": dR_slit, + "length": R_slit, + "angle": 0, + "center_dec": 0, + } psf_fwhm = 0.7 - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - anisotropy_model = 'OM' - kwargs_mge = {'n_comp': 20} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=kwargs_aperture, - kwargs_seeing=kwargs_psf, analytic_kinematics=True, - anisotropy_model=anisotropy_model, - kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000) - galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=None, - theta_E=None, gamma=None) - npt.assert_almost_equal(kwargs_profile['gamma'], 2, decimal=2) - - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture=[kwargs_aperture], - kwargs_seeing=[kwargs_psf], analytic_kinematics=True, - anisotropy_model=anisotropy_model, multi_observations=True, - kwargs_mge_light=kwargs_mge, kwargs_mge_mass=kwargs_mge, sampling_number=1000) - galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings(kwargs_lens, kwargs_lens_light, r_eff=None, - theta_E=None, gamma=None) - npt.assert_almost_equal(kwargs_profile['gamma'], 2, decimal=2) + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + anisotropy_model = "OM" + kwargs_mge = {"n_comp": 20} + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_psf, + analytic_kinematics=True, + anisotropy_model=anisotropy_model, + kwargs_mge_light=kwargs_mge, + kwargs_mge_mass=kwargs_mge, + sampling_number=1000, + ) + galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings( + kwargs_lens, kwargs_lens_light, r_eff=None, theta_E=None, gamma=None + ) + npt.assert_almost_equal(kwargs_profile["gamma"], 2, decimal=2) + + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture=[kwargs_aperture], + kwargs_seeing=[kwargs_psf], + analytic_kinematics=True, + anisotropy_model=anisotropy_model, + multi_observations=True, + kwargs_mge_light=kwargs_mge, + kwargs_mge_mass=kwargs_mge, + sampling_number=1000, + ) + galkin, kwargs_profile, kwargs_light = kinematicAPI.galkin_settings( + kwargs_lens, kwargs_lens_light, r_eff=None, theta_E=None, gamma=None + ) + npt.assert_almost_equal(kwargs_profile["gamma"], 2, decimal=2) def test_kinematic_light_profile(self): z_lens = 0.5 z_source = 1.5 - kwargs_options = {'lens_light_model_list': ['HERNQUIST_ELLIPSE', 'SERSIC']} - kwargs_mge = {'n_comp': 20} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') + kwargs_options = {"lens_light_model_list": ["HERNQUIST_ELLIPSE", "SERSIC"]} + kwargs_mge = {"n_comp": 20} + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_options, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) r_eff = 0.2 - kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'e1': 0., 'e2': 0, 'center_x': 0, 'center_y': 0}, - {'amp': 1, 'R_sersic': 1, 'n_sersic': 2, 'center_x': -10, 'center_y': -10}] - light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=True, - r_eff=r_eff, - model_kinematics_bool=[True, False], - kwargs_mge=kwargs_mge) - assert light_profile_list[0] == 'MULTI_GAUSSIAN' - - light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=False, - r_eff=r_eff, model_kinematics_bool=[True, False]) - assert light_profile_list[0] == 'HERNQUIST_ELLIPSE' - - light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile(kwargs_lens_light, MGE_fit=False, - Hernquist_approx=True, r_eff=r_eff, - model_kinematics_bool=[True, False]) - assert light_profile_list[0] == 'HERNQUIST' - npt.assert_almost_equal(kwargs_light[0]['Rs'] / kwargs_lens_light[0]['Rs'], 1, decimal=2) + kwargs_lens_light = [ + { + "amp": 1, + "Rs": r_eff * 0.551, + "e1": 0.0, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, + {"amp": 1, "R_sersic": 1, "n_sersic": 2, "center_x": -10, "center_y": -10}, + ] + light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile( + kwargs_lens_light, + MGE_fit=True, + r_eff=r_eff, + model_kinematics_bool=[True, False], + kwargs_mge=kwargs_mge, + ) + assert light_profile_list[0] == "MULTI_GAUSSIAN" + + light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile( + kwargs_lens_light, + MGE_fit=False, + r_eff=r_eff, + model_kinematics_bool=[True, False], + ) + assert light_profile_list[0] == "HERNQUIST_ELLIPSE" + + light_profile_list, kwargs_light = kinematicAPI.kinematic_light_profile( + kwargs_lens_light, + MGE_fit=False, + Hernquist_approx=True, + r_eff=r_eff, + model_kinematics_bool=[True, False], + ) + assert light_profile_list[0] == "HERNQUIST" + npt.assert_almost_equal( + kwargs_light[0]["Rs"] / kwargs_lens_light[0]["Rs"], 1, decimal=2 + ) def test_kinematic_lens_profiles(self): z_lens = 0.5 z_source = 1.5 - kwargs_options = {'lens_model_list': ['SPEP', 'SHEAR']} - kin_api = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_aperture={}, kwargs_seeing={}, anisotropy_model='OM') - kwargs_lens = [{'theta_E': 1.4272358196260446, 'e1': 0, 'center_x': -0.044798916793300093, - 'center_y': 0.0054408937891703788, 'e2': 0, 'gamma': 1.8}, - {'e1': -0.050871696555354479, 'e2': -0.0061601733920590464} - ] - - kwargs_mge = {'n_comp': 20} - mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles(kwargs_lens, MGE_fit=True, - kwargs_mge=kwargs_mge, theta_E=1.4, - model_kinematics_bool=[True, False]) - assert mass_profile_list[0] == 'MULTI_GAUSSIAN_KAPPA' - - mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles(kwargs_lens, MGE_fit=False, - model_kinematics_bool=[True, False]) - assert mass_profile_list[0] == 'SPEP' + kwargs_options = {"lens_model_list": ["SPEP", "SHEAR"]} + kin_api = KinematicsAPI( + z_lens, + z_source, + kwargs_options, + kwargs_aperture={}, + kwargs_seeing={}, + anisotropy_model="OM", + ) + kwargs_lens = [ + { + "theta_E": 1.4272358196260446, + "e1": 0, + "center_x": -0.044798916793300093, + "center_y": 0.0054408937891703788, + "e2": 0, + "gamma": 1.8, + }, + {"e1": -0.050871696555354479, "e2": -0.0061601733920590464}, + ] + + kwargs_mge = {"n_comp": 20} + mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles( + kwargs_lens, + MGE_fit=True, + kwargs_mge=kwargs_mge, + theta_E=1.4, + model_kinematics_bool=[True, False], + ) + assert mass_profile_list[0] == "MULTI_GAUSSIAN_KAPPA" + + mass_profile_list, kwargs_profile = kin_api.kinematic_lens_profiles( + kwargs_lens, MGE_fit=False, model_kinematics_bool=[True, False] + ) + assert mass_profile_list[0] == "SPEP" def test_model_dispersion(self): np.random.seed(42) z_lens = 0.5 z_source = 1.5 - r_eff = 1. - theta_E = 1. - kwargs_model = {'lens_model_list': ['SIS'], 'lens_light_model_list': ['HERNQUIST']} - kwargs_lens = [{'theta_E': theta_E, 'center_x': 0, 'center_y': 0}] - kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'center_x': 0, 'center_y': 0}] - kwargs_anisotropy = {'r_ani': 1} + r_eff = 1.0 + theta_E = 1.0 + kwargs_model = { + "lens_model_list": ["SIS"], + "lens_light_model_list": ["HERNQUIST"], + } + kwargs_lens = [{"theta_E": theta_E, "center_x": 0, "center_y": 0}] + kwargs_lens_light = [ + {"amp": 1, "Rs": r_eff * 0.551, "center_x": 0, "center_y": 0} + ] + kwargs_anisotropy = {"r_ani": 1} # settings R_slit = 3.8 - dR_slit = 1. - aperture_type = 'slit' - kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit, - 'angle': 0, 'center_dec': 0} + dR_slit = 1.0 + aperture_type = "slit" + kwargs_aperture = { + "aperture_type": aperture_type, + "center_ra": 0, + "width": dR_slit, + "length": R_slit, + "angle": 0, + "center_dec": 0, + } psf_fwhm = 0.7 - kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - anisotropy_model = 'OM' - kin_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing, - anisotropy_model=anisotropy_model) - - kwargs_numerics_galkin = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.0001} - kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=True, - Hernquist_approx=False, MGE_light=False, MGE_mass=False) - vel_disp_analytic = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff, - theta_E=theta_E, gamma=2) - - kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False, - Hernquist_approx=False, MGE_light=False, MGE_mass=False) - vel_disp_numerical = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy) #, - # r_eff=r_eff, theta_E=theta_E, gamma=2) + kwargs_seeing = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + anisotropy_model = "OM" + kin_api = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture, + kwargs_seeing, + anisotropy_model=anisotropy_model, + ) + + kwargs_numerics_galkin = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.0001, + } + kin_api.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=True, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + ) + vel_disp_analytic = kin_api.velocity_dispersion( + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + gamma=2, + ) + + kin_api.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=False, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + ) + vel_disp_numerical = kin_api.velocity_dispersion( + kwargs_lens, kwargs_lens_light, kwargs_anisotropy + ) # , + # r_eff=r_eff, theta_E=theta_E, gamma=2) npt.assert_almost_equal(vel_disp_numerical / vel_disp_analytic, 1, decimal=2) - kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False, - Hernquist_approx=False, MGE_light=False, MGE_mass=False, - kwargs_mge_light={'n_comp': 10}, kwargs_mge_mass={'n_comp': 5}) - assert kin_api._kwargs_mge_mass['n_comp'] == 5 - assert kin_api._kwargs_mge_light['n_comp'] == 10 + kin_api.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=False, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + kwargs_mge_light={"n_comp": 10}, + kwargs_mge_mass={"n_comp": 5}, + ) + assert kin_api._kwargs_mge_mass["n_comp"] == 5 + assert kin_api._kwargs_mge_light["n_comp"] == 10 def test_velocity_dispersion_map(self): np.random.seed(42) z_lens = 0.5 z_source = 1.5 - kwargs_options = {'lens_model_list': ['SIS'], 'lens_light_model_list': ['HERNQUIST']} - r_eff = 1. + kwargs_options = { + "lens_model_list": ["SIS"], + "lens_light_model_list": ["HERNQUIST"], + } + r_eff = 1.0 theta_E = 1 - kwargs_lens = [{'theta_E': theta_E, 'center_x': 0, 'center_y': 0}] - kwargs_lens_light = [{'amp': 1, 'Rs': r_eff * 0.551, 'center_x': 0, 'center_y': 0}] - kwargs_anisotropy = {'r_ani': 1} + kwargs_lens = [{"theta_E": theta_E, "center_x": 0, "center_y": 0}] + kwargs_lens_light = [ + {"amp": 1, "Rs": r_eff * 0.551, "center_x": 0, "center_y": 0} + ] + kwargs_anisotropy = {"r_ani": 1} r_bins = np.array([0, 0.5, 1]) - aperture_type = 'IFU_shells' - kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'r_bins': r_bins, 'center_dec': 0} + aperture_type = "IFU_shells" + kwargs_aperture = { + "aperture_type": aperture_type, + "center_ra": 0, + "r_bins": r_bins, + "center_dec": 0, + } psf_fwhm = 0.7 - kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - anisotropy_model = 'OM' - kin_api = KinematicsAPI(z_lens, z_source, kwargs_options, kwargs_aperture=kwargs_aperture, - kwargs_seeing=kwargs_seeing, anisotropy_model=anisotropy_model) - - kwargs_numerics_galkin = {'interpol_grid_num': 500, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001} - kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=True, - Hernquist_approx=False, MGE_light=False, MGE_mass=False, - num_kin_sampling=1000, num_psf_sampling=100) - vel_disp_analytic = kin_api.velocity_dispersion_map(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, - r_eff=r_eff, theta_E=theta_E, gamma=2) - - kin_api.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=False, - Hernquist_approx=False, MGE_light=False, MGE_mass=False, - num_kin_sampling=1000, num_psf_sampling=100) - vel_disp_numerical = kin_api.velocity_dispersion_map(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, - r_eff=r_eff, theta_E=theta_E, gamma=2) + kwargs_seeing = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + anisotropy_model = "OM" + kin_api = KinematicsAPI( + z_lens, + z_source, + kwargs_options, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_seeing, + anisotropy_model=anisotropy_model, + ) + + kwargs_numerics_galkin = { + "interpol_grid_num": 500, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + } + kin_api.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=True, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + num_kin_sampling=1000, + num_psf_sampling=100, + ) + vel_disp_analytic = kin_api.velocity_dispersion_map( + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + gamma=2, + ) + + kin_api.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=False, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + num_kin_sampling=1000, + num_psf_sampling=100, + ) + vel_disp_numerical = kin_api.velocity_dispersion_map( + kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=theta_E, + gamma=2, + ) print(vel_disp_numerical, vel_disp_analytic) npt.assert_almost_equal(vel_disp_numerical, vel_disp_analytic, decimal=-1) def test_interpolated_sersic(self): from lenstronomy.Analysis.light2mass import light2mass_interpol - kwargs_light = [{'n_sersic': 2, 'R_sersic': 0.5, 'amp': 1, 'center_x': 0.01, 'center_y': 0.01}] - kwargs_lens = [{'n_sersic': 2, 'R_sersic': 0.5, 'k_eff': 1, 'center_x': 0.01, 'center_y': 0.01}] + + kwargs_light = [ + { + "n_sersic": 2, + "R_sersic": 0.5, + "amp": 1, + "center_x": 0.01, + "center_y": 0.01, + } + ] + kwargs_lens = [ + { + "n_sersic": 2, + "R_sersic": 0.5, + "k_eff": 1, + "center_x": 0.01, + "center_y": 0.01, + } + ] deltaPix = 0.1 numPix = 100 - kwargs_interp = light2mass_interpol(['SERSIC'], kwargs_lens_light=kwargs_light, numPix=numPix, - deltaPix=deltaPix, subgrid_res=5) + kwargs_interp = light2mass_interpol( + ["SERSIC"], + kwargs_lens_light=kwargs_light, + numPix=numPix, + deltaPix=deltaPix, + subgrid_res=5, + ) kwargs_lens_interp = [kwargs_interp] from lenstronomy.Analysis.kinematics_api import KinematicsAPI + z_lens = 0.5 z_source = 1.5 r_ani = 0.62 - kwargs_anisotropy = {'r_ani': r_ani} + kwargs_anisotropy = {"r_ani": r_ani} R_slit = 3.8 - dR_slit = 1. - aperture_type = 'slit' - kwargs_aperture = {'center_ra': 0, 'width': dR_slit, 'length': R_slit, 'angle': 0, 'center_dec': 0, 'aperture_type': aperture_type} + dR_slit = 1.0 + aperture_type = "slit" + kwargs_aperture = { + "center_ra": 0, + "width": dR_slit, + "length": R_slit, + "angle": 0, + "center_dec": 0, + "aperture_type": aperture_type, + } psf_fwhm = 0.7 - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - anisotropy_model = 'OM' + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + anisotropy_model = "OM" r_eff = 0.5 - kwargs_model = {'lens_model_list': ['SERSIC'], - 'lens_light_model_list': ['SERSIC']} - kwargs_mge = {'n_comp': 20} - kinematic_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing=kwargs_psf, - anisotropy_model=anisotropy_model, MGE_light=True, MGE_mass=True, - kwargs_mge_mass=kwargs_mge, kwargs_mge_light=kwargs_mge) - - v_sigma = kinematic_api.velocity_dispersion(kwargs_lens, kwargs_light, kwargs_anisotropy, r_eff=r_eff, theta_E=1) - kwargs_model_interp = {'lens_model_list': ['INTERPOL'], - 'lens_light_model_list': ['SERSIC']} - kinematic_api_interp = KinematicsAPI(z_lens, z_source, kwargs_model_interp, kwargs_aperture, kwargs_seeing=kwargs_psf, - anisotropy_model=anisotropy_model, MGE_light=True, MGE_mass=True, - kwargs_mge_mass=kwargs_mge, - kwargs_mge_light=kwargs_mge) - v_sigma_interp = kinematic_api_interp.velocity_dispersion(kwargs_lens_interp, kwargs_light, kwargs_anisotropy, - theta_E=1., r_eff=r_eff) + kwargs_model = { + "lens_model_list": ["SERSIC"], + "lens_light_model_list": ["SERSIC"], + } + kwargs_mge = {"n_comp": 20} + kinematic_api = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_aperture, + kwargs_seeing=kwargs_psf, + anisotropy_model=anisotropy_model, + MGE_light=True, + MGE_mass=True, + kwargs_mge_mass=kwargs_mge, + kwargs_mge_light=kwargs_mge, + ) + + v_sigma = kinematic_api.velocity_dispersion( + kwargs_lens, kwargs_light, kwargs_anisotropy, r_eff=r_eff, theta_E=1 + ) + kwargs_model_interp = { + "lens_model_list": ["INTERPOL"], + "lens_light_model_list": ["SERSIC"], + } + kinematic_api_interp = KinematicsAPI( + z_lens, + z_source, + kwargs_model_interp, + kwargs_aperture, + kwargs_seeing=kwargs_psf, + anisotropy_model=anisotropy_model, + MGE_light=True, + MGE_mass=True, + kwargs_mge_mass=kwargs_mge, + kwargs_mge_light=kwargs_mge, + ) + v_sigma_interp = kinematic_api_interp.velocity_dispersion( + kwargs_lens_interp, + kwargs_light, + kwargs_anisotropy, + theta_E=1.0, + r_eff=r_eff, + ) npt.assert_almost_equal(v_sigma / v_sigma_interp, 1, 1) # use as kinematic constraints # compare with MGE Sersic kinematic estimate class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST']} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}] - kinematicAPI.kinematic_light_profile(kwargs_light, MGE_fit=False, - Hernquist_approx=True, r_eff=None, model_kinematics_bool=[True]) + kwargs_model = {"lens_light_model_list": ["HERNQUIST"]} + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kwargs_light = [{"Rs": 1, "amp": 1, "center_x": 0, "center_y": 0}] + kinematicAPI.kinematic_light_profile( + kwargs_light, + MGE_fit=False, + Hernquist_approx=True, + r_eff=None, + model_kinematics_bool=[True], + ) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST']} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}] - kinematicAPI.kinematic_light_profile(kwargs_light, MGE_fit=False, - Hernquist_approx=False, r_eff=None, analytic_kinematics=True) + kwargs_model = {"lens_light_model_list": ["HERNQUIST"]} + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kwargs_light = [{"Rs": 1, "amp": 1, "center_x": 0, "center_y": 0}] + kinematicAPI.kinematic_light_profile( + kwargs_light, + MGE_fit=False, + Hernquist_approx=False, + r_eff=None, + analytic_kinematics=True, + ) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kwargs_light = [{'Rs': 1, 'amp': 1, 'center_x': 0, 'center_y': 0}] - kinematicAPI.kinematic_lens_profiles(kwargs_light, MGE_fit=True, model_kinematics_bool=[True]) + kwargs_model = { + "lens_light_model_list": ["HERNQUIST"], + "lens_model_list": [], + } + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kwargs_light = [{"Rs": 1, "amp": 1, "center_x": 0, "center_y": 0}] + kinematicAPI.kinematic_lens_profiles( + kwargs_light, MGE_fit=True, model_kinematics_bool=[True] + ) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kinematicAPI.kinematic_lens_profiles(kwargs_lens=None, analytic_kinematics=True) + kwargs_model = { + "lens_light_model_list": ["HERNQUIST"], + "lens_model_list": [], + } + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kinematicAPI.kinematic_lens_profiles( + kwargs_lens=None, analytic_kinematics=True + ) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': []} - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kwargs_lens_light = [{'Rs': 1, 'center_x': 0, 'center_y': 0}] - kinematicAPI.kinematic_light_profile(kwargs_lens_light, r_eff=None, MGE_fit=True, model_kinematics_bool=None, - Hernquist_approx=False, kwargs_mge=None) + kwargs_model = { + "lens_light_model_list": ["HERNQUIST"], + "lens_model_list": [], + } + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kwargs_lens_light = [{"Rs": 1, "center_x": 0, "center_y": 0}] + kinematicAPI.kinematic_light_profile( + kwargs_lens_light, + r_eff=None, + MGE_fit=True, + model_kinematics_bool=None, + Hernquist_approx=False, + kwargs_mge=None, + ) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - kwargs_model = {'lens_light_model_list': ['HERNQUIST'], 'lens_model_list': ['SIS']} - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kinematicAPI = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model='OM') - kinematicAPI.kinematic_lens_profiles(kwargs_lens, MGE_fit=True, model_kinematics_bool=None, theta_E=None, - kwargs_mge={}) - - -if __name__ == '__main__': + kwargs_model = { + "lens_light_model_list": ["HERNQUIST"], + "lens_model_list": ["SIS"], + } + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kinematicAPI = KinematicsAPI( + z_lens, + z_source, + kwargs_model, + kwargs_seeing={}, + kwargs_aperture={}, + anisotropy_model="OM", + ) + kinematicAPI.kinematic_lens_profiles( + kwargs_lens, + MGE_fit=True, + model_kinematics_bool=None, + theta_E=None, + kwargs_mge={}, + ) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_lens_profile.py b/test/test_Analysis/test_lens_profile.py index e0ad3da72..b75218b1c 100644 --- a/test/test_Analysis/test_lens_profile.py +++ b/test/test_Analysis/test_lens_profile.py @@ -10,86 +10,126 @@ class TestLensProfileAnalysis(object): - def setup_method(self): pass def test_profile_slope(self): - lens_model = LensProfileAnalysis(LensModel(lens_model_list=['SPP'])) - gamma_in = 2. - kwargs_lens = [{'theta_E': 1., 'gamma': gamma_in, 'center_x': 0, 'center_y': 0}] + lens_model = LensProfileAnalysis(LensModel(lens_model_list=["SPP"])) + gamma_in = 2.0 + kwargs_lens = [ + {"theta_E": 1.0, "gamma": gamma_in, "center_x": 0, "center_y": 0} + ] gamma_out = lens_model.profile_slope(kwargs_lens, radius=1) npt.assert_array_almost_equal(gamma_out, gamma_in, decimal=3) gamma_in = 1.7 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma_in, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + {"theta_E": 1.0, "gamma": gamma_in, "center_x": 0, "center_y": 0} + ] gamma_out = lens_model.profile_slope(kwargs_lens, radius=1) npt.assert_array_almost_equal(gamma_out, gamma_in, decimal=3) gamma_in = 2.5 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma_in, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + {"theta_E": 1.0, "gamma": gamma_in, "center_x": 0, "center_y": 0} + ] gamma_out = lens_model.profile_slope(kwargs_lens, radius=1) npt.assert_array_almost_equal(gamma_out, gamma_in, decimal=3) - lens_model = LensProfileAnalysis(LensModel(lens_model_list=['SPEP'])) - gamma_in = 2. + lens_model = LensProfileAnalysis(LensModel(lens_model_list=["SPEP"])) + gamma_in = 2.0 phi, q = 0.34403343049704888, 0.89760957136967312 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_lens = [{'theta_E': 1.4516812130749424, 'e1': e1, 'e2': e2, 'center_x': -0.04507598845306314, - 'center_y': 0.054491803177414651, 'gamma': gamma_in}] + kwargs_lens = [ + { + "theta_E": 1.4516812130749424, + "e1": e1, + "e2": e2, + "center_x": -0.04507598845306314, + "center_y": 0.054491803177414651, + "gamma": gamma_in, + } + ] gamma_out = lens_model.profile_slope(kwargs_lens, radius=1.45) npt.assert_array_almost_equal(gamma_out, gamma_in, decimal=3) def test_effective_einstein_radius(self): - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - lensModel = LensProfileAnalysis(LensModel(lens_model_list=['SIS'])) + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + lensModel = LensProfileAnalysis(LensModel(lens_model_list=["SIS"])) ret = lensModel.effective_einstein_radius_grid(kwargs_lens, get_precision=True) assert len(ret) == 2 - npt.assert_almost_equal(ret[0], 1., decimal=2) - theta_E = lensModel.effective_einstein_radius(kwargs_lens, r_min=1e-3, r_max=1e1, num_points=30) + npt.assert_almost_equal(ret[0], 1.0, decimal=2) + theta_E = lensModel.effective_einstein_radius( + kwargs_lens, r_min=1e-3, r_max=1e1, num_points=30 + ) npt.assert_almost_equal(theta_E, 1, decimal=3) - kwargs_lens_bad = [{'theta_E': 100, 'center_x': 0, 'center_y': 0}] - ret_nan, precision = lensModel.effective_einstein_radius_grid(kwargs_lens_bad, - get_precision=True) + kwargs_lens_bad = [{"theta_E": 100, "center_x": 0, "center_y": 0}] + ret_nan, precision = lensModel.effective_einstein_radius_grid( + kwargs_lens_bad, get_precision=True + ) assert np.isnan(ret_nan) - theta_E = lensModel.effective_einstein_radius(kwargs_lens_bad, r_min=1e-3, r_max=1e1, num_points=30) + theta_E = lensModel.effective_einstein_radius( + kwargs_lens_bad, r_min=1e-3, r_max=1e1, num_points=30 + ) assert np.isnan(theta_E) # test interpolated profile numPix = 101 deltaPix = 0.02 from lenstronomy.Util import util + x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) from lenstronomy.LensModel.Profiles.sis import SIS + sis = SIS() - center_x, center_y = 0., -0. - kwargs_SIS = {'theta_E': 1., 'center_x': center_x, 'center_y': center_y} + center_x, center_y = 0.0, -0.0 + kwargs_SIS = {"theta_E": 1.0, "center_x": center_x, "center_y": center_y} f_ = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x, f_y = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) f_xx, f_xy, f_yx, f_yy = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) - kwargs_interpol = [{'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_), - 'f_x': util.array2image(f_x), 'f_y': util.array2image(f_y), 'f_xx': util.array2image(f_xx), - 'f_xy': util.array2image(f_xy), 'f_yy': util.array2image(f_yy)}] - lensModel = LensProfileAnalysis(LensModel(lens_model_list=['INTERPOL'])) - theta_E_return = lensModel.effective_einstein_radius_grid(kwargs_interpol, - get_precision=False, verbose=True, center_x=center_x, center_y=center_y) + kwargs_interpol = [ + { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_), + "f_x": util.array2image(f_x), + "f_y": util.array2image(f_y), + "f_xx": util.array2image(f_xx), + "f_xy": util.array2image(f_xy), + "f_yy": util.array2image(f_yy), + } + ] + lensModel = LensProfileAnalysis(LensModel(lens_model_list=["INTERPOL"])) + theta_E_return = lensModel.effective_einstein_radius_grid( + kwargs_interpol, + get_precision=False, + verbose=True, + center_x=center_x, + center_y=center_y, + ) npt.assert_almost_equal(theta_E_return, 1, decimal=2) # sub-critical mass profile - lensModel = LensProfileAnalysis(LensModel(lens_model_list=['NFW'])) - kwargs_nfw =[{'Rs': 1, 'alpha_Rs': 0.2, 'center_x': 0, 'center_y': 0}] - theta_E_subcrit = lensModel.effective_einstein_radius_grid(kwargs_nfw, get_precision=False) + lensModel = LensProfileAnalysis(LensModel(lens_model_list=["NFW"])) + kwargs_nfw = [{"Rs": 1, "alpha_Rs": 0.2, "center_x": 0, "center_y": 0}] + theta_E_subcrit = lensModel.effective_einstein_radius_grid( + kwargs_nfw, get_precision=False + ) assert np.isnan(theta_E_subcrit) - theta_E_subcrit, _ = lensModel.effective_einstein_radius_grid(kwargs_nfw, get_precision=True) + theta_E_subcrit, _ = lensModel.effective_einstein_radius_grid( + kwargs_nfw, get_precision=True + ) assert np.isnan(theta_E_subcrit) def test_external_lensing_effect(self): - lens_model_list = ['SHEAR'] - kwargs_lens = [{'gamma1': 0.1, 'gamma2': 0.01}] + lens_model_list = ["SHEAR"] + kwargs_lens = [{"gamma1": 0.1, "gamma2": 0.01}] lensModel = LensProfileAnalysis(LensModel(lens_model_list)) - alpha0_x, alpha0_y, kappa_ext, shear1, shear2 = lensModel.local_lensing_effect(kwargs_lens, model_list_bool=[0]) + alpha0_x, alpha0_y, kappa_ext, shear1, shear2 = lensModel.local_lensing_effect( + kwargs_lens, model_list_bool=[0] + ) print(alpha0_x, alpha0_y, kappa_ext, shear1, shear2) assert alpha0_x == 0 assert alpha0_y == 0 @@ -98,83 +138,118 @@ def test_external_lensing_effect(self): assert kappa_ext == 0 def test_multi_gaussian_lens(self): - kwargs_options = {'lens_model_list': ['SPEP']} + kwargs_options = {"lens_model_list": ["SPEP"]} lensModel = LensModel(**kwargs_options) lensAnalysis = LensProfileAnalysis(lens_model=lensModel) e1, e2 = param_util.phi_q2_ellipticity(0, 0.9) - kwargs_lens = [{'gamma': 1.8, 'theta_E': 0.6, 'e1': e1, 'e2': e2, 'center_x': 0.5, 'center_y': -0.1}] - amplitudes, sigmas, center_x, center_y = lensAnalysis.multi_gaussian_lens(kwargs_lens, n_comp=20) + kwargs_lens = [ + { + "gamma": 1.8, + "theta_E": 0.6, + "e1": e1, + "e2": e2, + "center_x": 0.5, + "center_y": -0.1, + } + ] + amplitudes, sigmas, center_x, center_y = lensAnalysis.multi_gaussian_lens( + kwargs_lens, n_comp=20 + ) model = MultiGaussianKappa() x = np.logspace(-2, 0.5, 10) + 0.5 y = np.zeros_like(x) - 0.1 - f_xx, fxy, fyx, f_yy = model.hessian(x, y, amplitudes, sigmas, center_x=0.5, center_y=-0.1) + f_xx, fxy, fyx, f_yy = model.hessian( + x, y, amplitudes, sigmas, center_x=0.5, center_y=-0.1 + ) kappa_mge = (f_xx + f_yy) / 2 kappa_true = lensAnalysis._lens_model.kappa(x, y, kwargs_lens) - print(kappa_true/kappa_mge) + print(kappa_true / kappa_mge) for i in range(len(x)): - npt.assert_almost_equal(kappa_mge[i]/kappa_true[i], 1, decimal=1) + npt.assert_almost_equal(kappa_mge[i] / kappa_true[i], 1, decimal=1) def test_mass_fraction_within_radius(self): center_x, center_y = 0.5, -1 theta_E = 1.1 - kwargs_lens = [{'theta_E': 1.1, 'center_x': center_x, 'center_y': center_y}] - lensModel = LensModel(**{'lens_model_list': ['SIS']}) + kwargs_lens = [{"theta_E": 1.1, "center_x": center_x, "center_y": center_y}] + lensModel = LensModel(**{"lens_model_list": ["SIS"]}) lensAnalysis = LensProfileAnalysis(lens_model=lensModel) - kappa_mean_list = lensAnalysis.mass_fraction_within_radius(kwargs_lens, center_x, center_y, theta_E, numPix=100) + kappa_mean_list = lensAnalysis.mass_fraction_within_radius( + kwargs_lens, center_x, center_y, theta_E, numPix=100 + ) npt.assert_almost_equal(kappa_mean_list[0], 1, 2) def test_lens_center(self): center_x, center_y = 0.43, -0.67 - kwargs_lens = [{'theta_E': 1, 'center_x': center_x, 'center_y': center_y}] - lensModel = LensModel(**{'lens_model_list': ['SIS']}) + kwargs_lens = [{"theta_E": 1, "center_x": center_x, "center_y": center_y}] + lensModel = LensModel(**{"lens_model_list": ["SIS"]}) profileAnalysis = LensProfileAnalysis(lens_model=lensModel) center_x_out, center_y_out = profileAnalysis.convergence_peak(kwargs_lens) npt.assert_almost_equal(center_x_out, center_x, 2) npt.assert_almost_equal(center_y_out, center_y, 2) def test_mst_invariant_differential(self): - # testing with a SPP profile - lensModel = LensModel(**{'lens_model_list': ['SPP']}) + lensModel = LensModel(**{"lens_model_list": ["SPP"]}) profileAnalysis = LensProfileAnalysis(lens_model=lensModel) gamma_list = [2, 1.8, 2.2] theta_E_list = [1, 0.5, 2] for gamma in gamma_list: for theta_E in theta_E_list: - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}] - xi = profileAnalysis.mst_invariant_differential(kwargs_lens, radius=theta_E, center_x=None, - center_y=None, model_list_bool=None, num_points=10) + kwargs_lens = [ + {"theta_E": theta_E, "gamma": gamma, "center_x": 0, "center_y": 0} + ] + xi = profileAnalysis.mst_invariant_differential( + kwargs_lens, + radius=theta_E, + center_x=None, + center_y=None, + model_list_bool=None, + num_points=10, + ) xi_true = (gamma - 2) / theta_E npt.assert_almost_equal(xi, xi_true, decimal=3) # it should also work in the elliptical regime - lensModel = LensModel(**{'lens_model_list': ['EPL']}) + lensModel = LensModel(**{"lens_model_list": ["EPL"]}) profileAnalysis = LensProfileAnalysis(lens_model=lensModel) gamma_list = [2, 1.8, 2.2] theta_E_list = [1, 0.5, 2] q_list = [1, 0.9, 0.8, 0.7] for gamma in gamma_list: for theta_E in theta_E_list: - for q in q_list: - e1, e2 = param_util.phi_q2_ellipticity(phi=0, q=q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - - theta_E_prime = np.sqrt(2 * q / (1 + q ** 2)) * theta_E - xi = profileAnalysis.mst_invariant_differential(kwargs_lens, radius=theta_E_prime, center_x=None, - center_y=None, - model_list_bool=None, num_points=10) - print(theta_E, gamma, q, 'test') - xi_true = (gamma - 2) / theta_E_prime - npt.assert_almost_equal(xi, xi_true, decimal=2) + for q in q_list: + e1, e2 = param_util.phi_q2_ellipticity(phi=0, q=q) + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + + theta_E_prime = np.sqrt(2 * q / (1 + q**2)) * theta_E + xi = profileAnalysis.mst_invariant_differential( + kwargs_lens, + radius=theta_E_prime, + center_x=None, + center_y=None, + model_list_bool=None, + num_points=10, + ) + print(theta_E, gamma, q, "test") + xi_true = (gamma - 2) / theta_E_prime + npt.assert_almost_equal(xi, xi_true, decimal=2) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): raise ValueError() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_light2mass.py b/test/test_Analysis/test_light2mass.py index 50e84b21f..e89df68d7 100644 --- a/test/test_Analysis/test_light2mass.py +++ b/test/test_Analysis/test_light2mass.py @@ -6,7 +6,6 @@ class TestLight2Mass(object): - def setup_method(self): pass @@ -14,17 +13,25 @@ def test_light2mass_conversion(self): numPix = 100 deltaPix = 0.05 - lightModel = LightModel(light_model_list=['SERSIC_ELLIPSE', 'SERSIC']) - kwargs_lens_light = [{'R_sersic': 0.5, 'n_sersic': 4, 'amp': 2, 'e1': 0, 'e2': 0.05}, - {'R_sersic': 1.5, 'n_sersic': 1, 'amp': 2}] - - kwargs_interpol = light2mass.light2mass_interpol(lens_light_model_list=['SERSIC_ELLIPSE', 'SERSIC'], - kwargs_lens_light=kwargs_lens_light, numPix=numPix, - deltaPix=deltaPix, subgrid_res=1) + lightModel = LightModel(light_model_list=["SERSIC_ELLIPSE", "SERSIC"]) + kwargs_lens_light = [ + {"R_sersic": 0.5, "n_sersic": 4, "amp": 2, "e1": 0, "e2": 0.05}, + {"R_sersic": 1.5, "n_sersic": 1, "amp": 2}, + ] + + kwargs_interpol = light2mass.light2mass_interpol( + lens_light_model_list=["SERSIC_ELLIPSE", "SERSIC"], + kwargs_lens_light=kwargs_lens_light, + numPix=numPix, + deltaPix=deltaPix, + subgrid_res=1, + ) from lenstronomy.LensModel.lens_model import LensModel - lensModel = LensModel(lens_model_list=['INTERPOL_SCALED']) + + lensModel = LensModel(lens_model_list=["INTERPOL_SCALED"]) kwargs_lens = [kwargs_interpol] import lenstronomy.Util.util as util + x_grid, y_grid = util.make_grid(numPix, deltapix=deltaPix) kappa = lensModel.kappa(x_grid, y_grid, kwargs=kwargs_lens) kappa = util.array2image(kappa) @@ -43,5 +50,5 @@ def test_light2mass_conversion(self): npt.assert_almost_equal(flux[0, 0], kappa[0, 0], decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_light_profile.py b/test/test_Analysis/test_light_profile.py index f6a2b4460..758f284e5 100644 --- a/test/test_Analysis/test_light_profile.py +++ b/test/test_Analysis/test_light_profile.py @@ -12,42 +12,89 @@ class TestLightAnalysis(object): - def setup_method(self): pass def test_ellipticity(self): - # GAUSSIAN e1_in = 0.1 e2_in = 0 - kwargs_light = [{'amp': 1, 'sigma': 1., 'center_x': 0, 'center_y': 0, 'e1': e1_in, 'e2': e2_in}] - light_model_list = ['GAUSSIAN_ELLIPSE'] - lensAnalysis = LightProfileAnalysis(LightModel(light_model_list=light_model_list)) - e1, e2 = lensAnalysis.ellipticity(kwargs_light, center_x=0, center_y=0, model_bool_list=None, grid_spacing=0.1, - grid_num=200) + kwargs_light = [ + { + "amp": 1, + "sigma": 1.0, + "center_x": 0, + "center_y": 0, + "e1": e1_in, + "e2": e2_in, + } + ] + light_model_list = ["GAUSSIAN_ELLIPSE"] + lensAnalysis = LightProfileAnalysis( + LightModel(light_model_list=light_model_list) + ) + e1, e2 = lensAnalysis.ellipticity( + kwargs_light, + center_x=0, + center_y=0, + model_bool_list=None, + grid_spacing=0.1, + grid_num=200, + ) npt.assert_almost_equal(e1, e1_in, decimal=4) npt.assert_almost_equal(e2, e2_in, decimal=4) # off-centered e1_in = 0.1 e2_in = 0 - kwargs_light = [{'amp': 1, 'sigma': 1., 'center_x': .2, 'center_y': 0, 'e1': e1_in, 'e2': e2_in}] - light_model_list = ['GAUSSIAN_ELLIPSE'] - lensAnalysis = LightProfileAnalysis(LightModel(light_model_list=light_model_list)) - e1, e2 = lensAnalysis.ellipticity(kwargs_light, model_bool_list=None, grid_spacing=0.1, - grid_num=200) + kwargs_light = [ + { + "amp": 1, + "sigma": 1.0, + "center_x": 0.2, + "center_y": 0, + "e1": e1_in, + "e2": e2_in, + } + ] + light_model_list = ["GAUSSIAN_ELLIPSE"] + lensAnalysis = LightProfileAnalysis( + LightModel(light_model_list=light_model_list) + ) + e1, e2 = lensAnalysis.ellipticity( + kwargs_light, model_bool_list=None, grid_spacing=0.1, grid_num=200 + ) npt.assert_almost_equal(e1, e1_in, decimal=4) npt.assert_almost_equal(e2, e2_in, decimal=4) - #SERSIC + # SERSIC e1_in = 0.1 e2_in = 0 - kwargs_light = [{'amp': 1, 'n_sersic': 2., 'R_sersic': 1, 'center_x': 0,'center_y': 0, 'e1': e1_in, 'e2': e2_in}] - light_model_list = ['SERSIC_ELLIPSE'] - lensAnalysis = LightProfileAnalysis(LightModel(light_model_list=light_model_list)) - e1, e2 = lensAnalysis.ellipticity(kwargs_light, center_x=0, center_y=0, model_bool_list=None, grid_spacing=0.2, - grid_num=400, iterative=True, num_iterative=30) + kwargs_light = [ + { + "amp": 1, + "n_sersic": 2.0, + "R_sersic": 1, + "center_x": 0, + "center_y": 0, + "e1": e1_in, + "e2": e2_in, + } + ] + light_model_list = ["SERSIC_ELLIPSE"] + lensAnalysis = LightProfileAnalysis( + LightModel(light_model_list=light_model_list) + ) + e1, e2 = lensAnalysis.ellipticity( + kwargs_light, + center_x=0, + center_y=0, + model_bool_list=None, + grid_spacing=0.2, + grid_num=400, + iterative=True, + num_iterative=30, + ) print(e1, e2) npt.assert_almost_equal(e1, e1_in, decimal=3) npt.assert_almost_equal(e2, e2_in, decimal=3) @@ -55,104 +102,166 @@ def test_ellipticity(self): # Power-law e1_in = 0.3 e2_in = 0 - center_x, center_y = 0., 0 + center_x, center_y = 0.0, 0 kwargs_light = [ - {'gamma': 2., 'amp': 1, 'center_x': center_x, 'center_y': center_y, 'e1': e1_in, 'e2': e2_in}] - light_model_list = ['POWER_LAW'] - lensAnalysis = LightProfileAnalysis(LightModel(light_model_list=light_model_list)) - e1, e2 = lensAnalysis.ellipticity(kwargs_light, center_x=center_x, center_y=center_y, model_bool_list=None, - grid_spacing=0.05, grid_num=401, iterative=True, num_iterative=30) + { + "gamma": 2.0, + "amp": 1, + "center_x": center_x, + "center_y": center_y, + "e1": e1_in, + "e2": e2_in, + } + ] + light_model_list = ["POWER_LAW"] + lensAnalysis = LightProfileAnalysis( + LightModel(light_model_list=light_model_list) + ) + e1, e2 = lensAnalysis.ellipticity( + kwargs_light, + center_x=center_x, + center_y=center_y, + model_bool_list=None, + grid_spacing=0.05, + grid_num=401, + iterative=True, + num_iterative=30, + ) print(e1, e2) npt.assert_almost_equal(e1, e1_in, decimal=2) npt.assert_almost_equal(e2, e2_in, decimal=3) def test_half_light_radius(self): - Rs = 1. - kwargs_profile = [{'Rs': Rs, 'amp': 1., 'center_x': 0, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_profile = [{"Rs": Rs, "amp": 1.0, "center_x": 0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lensAnalysis = LightProfileAnalysis(LightModel(**kwargs_options)) r_eff_true = Rs / 0.551 - r_eff = lensAnalysis.half_light_radius(kwargs_profile, grid_num=500, grid_spacing=0.2) - npt.assert_almost_equal(r_eff/r_eff_true, 1, 2) + r_eff = lensAnalysis.half_light_radius( + kwargs_profile, grid_num=500, grid_spacing=0.2 + ) + npt.assert_almost_equal(r_eff / r_eff_true, 1, 2) # now we shift the center - Rs = 1. - kwargs_profile = [{'Rs': Rs, 'amp': 1., 'center_x': 1., 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_profile = [{"Rs": Rs, "amp": 1.0, "center_x": 1.0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lensAnalysis = LightProfileAnalysis(LightModel(**kwargs_options)) r_eff_true = Rs / 0.551 - r_eff = lensAnalysis.half_light_radius(kwargs_profile, grid_num=500, grid_spacing=0.2) + r_eff = lensAnalysis.half_light_radius( + kwargs_profile, grid_num=500, grid_spacing=0.2 + ) npt.assert_almost_equal(r_eff / r_eff_true, 1, 2) # now we add ellipticity - Rs = 1. - kwargs_profile = [{'Rs': Rs, 'amp': 1., 'e1': 0.1, 'e2': -0.1, 'center_x': 0., 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST_ELLIPSE']} + Rs = 1.0 + kwargs_profile = [ + { + "Rs": Rs, + "amp": 1.0, + "e1": 0.1, + "e2": -0.1, + "center_x": 0.0, + "center_y": 0, + } + ] + kwargs_options = {"light_model_list": ["HERNQUIST_ELLIPSE"]} lensAnalysis = LightProfileAnalysis(LightModel(**kwargs_options)) r_eff_true = Rs / 0.551 - r_eff = lensAnalysis.half_light_radius(kwargs_profile, grid_num=500, grid_spacing=0.2) + r_eff = lensAnalysis.half_light_radius( + kwargs_profile, grid_num=500, grid_spacing=0.2 + ) npt.assert_almost_equal(r_eff / r_eff_true, 1, 2) def test_radial_profile(self): - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'center_x': 0, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_light = [{"Rs": Rs, "amp": 1.0, "center_x": 0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) r_list = np.linspace(start=0.01, stop=10, num=10) - I_r = profile.radial_light_profile(r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None) + I_r = profile.radial_light_profile( + r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None + ) I_r_true = lightModel.surface_brightness(r_list, 0, kwargs_light) npt.assert_almost_equal(I_r, I_r_true, decimal=5) # test off-center - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'center_x': 1., 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_light = [{"Rs": Rs, "amp": 1.0, "center_x": 1.0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) r_list = np.linspace(start=0.01, stop=10, num=10) - I_r = profile.radial_light_profile(r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None) + I_r = profile.radial_light_profile( + r_list, kwargs_light, center_x=None, center_y=None, model_bool_list=None + ) I_r_true = lightModel.surface_brightness(r_list + 1, 0, kwargs_light) npt.assert_almost_equal(I_r, I_r_true, decimal=5) def test_multi_gaussian_decomposition(self): - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'center_x': 0, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_light = [{"Rs": Rs, "amp": 1.0, "center_x": 0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) - amplitudes, sigmas, center_x, center_y = profile.multi_gaussian_decomposition(kwargs_light, grid_spacing=0.01, grid_num=100, model_bool_list=None, n_comp=20, - center_x=None, center_y=None) + amplitudes, sigmas, center_x, center_y = profile.multi_gaussian_decomposition( + kwargs_light, + grid_spacing=0.01, + grid_num=100, + model_bool_list=None, + n_comp=20, + center_x=None, + center_y=None, + ) mge = MultiGaussian() r_array = np.logspace(start=-2, stop=0.5, num=10) - print(r_array, 'test r_array') - flux = mge.function(r_array, 0, amp=amplitudes, sigma=sigmas, center_x=center_x, center_y=center_y) + print(r_array, "test r_array") + flux = mge.function( + r_array, + 0, + amp=amplitudes, + sigma=sigmas, + center_x=center_x, + center_y=center_y, + ) flux_true = lightModel.surface_brightness(r_array, 0, kwargs_light) npt.assert_almost_equal(flux / flux_true, 1, decimal=2) # test off-center - Rs = 1. - offset = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'center_x': offset, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + offset = 1.0 + kwargs_light = [{"Rs": Rs, "amp": 1.0, "center_x": offset, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) - amplitudes, sigmas, center_x, center_y = profile.multi_gaussian_decomposition(kwargs_light, grid_spacing=0.01, - grid_num=100, model_bool_list=None, - n_comp=20, - center_x=None, center_y=None) + amplitudes, sigmas, center_x, center_y = profile.multi_gaussian_decomposition( + kwargs_light, + grid_spacing=0.01, + grid_num=100, + model_bool_list=None, + n_comp=20, + center_x=None, + center_y=None, + ) assert center_x == offset assert center_y == 0 mge = MultiGaussian() r_array = np.logspace(start=-2, stop=0.5, num=10) - print(r_array, 'test r_array') - flux = mge.function(r_array, 0, amp=amplitudes, sigma=sigmas, center_x=center_x, center_y=center_y) + print(r_array, "test r_array") + flux = mge.function( + r_array, + 0, + amp=amplitudes, + sigma=sigmas, + center_x=center_x, + center_y=center_y, + ) flux_true = lightModel.surface_brightness(r_array, 0, kwargs_light) npt.assert_almost_equal(flux / flux_true, 1, decimal=2) - """ import matplotlib.pyplot as plt @@ -164,16 +273,21 @@ def test_multi_gaussian_decomposition(self): """ def test_multi_gaussian_decomposition_ellipse(self): - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'center_x': 0, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST']} + Rs = 1.0 + kwargs_light = [{"Rs": Rs, "amp": 1.0, "center_x": 0, "center_y": 0}] + kwargs_options = {"light_model_list": ["HERNQUIST"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) - kwargs_mge = profile.multi_gaussian_decomposition_ellipse(kwargs_light, grid_spacing=0.01, - grid_num=100, model_bool_list=None, - n_comp=20, - center_x=None, center_y=None) + kwargs_mge = profile.multi_gaussian_decomposition_ellipse( + kwargs_light, + grid_spacing=0.01, + grid_num=100, + model_bool_list=None, + n_comp=20, + center_x=None, + center_y=None, + ) mge = MultiGaussianEllipse() r_array = np.logspace(start=-2, stop=0.5, num=10) flux = mge.function(r_array, 0, **kwargs_mge) @@ -182,18 +296,25 @@ def test_multi_gaussian_decomposition_ellipse(self): # elliptic - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1., 'e1': 0.1, 'e2': 0, 'center_x': 0, 'center_y': 0}] - kwargs_options = {'light_model_list': ['HERNQUIST_ELLIPSE']} + Rs = 1.0 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0, "e1": 0.1, "e2": 0, "center_x": 0, "center_y": 0} + ] + kwargs_options = {"light_model_list": ["HERNQUIST_ELLIPSE"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) - kwargs_mge = profile.multi_gaussian_decomposition_ellipse(kwargs_light, grid_spacing=0.1, - grid_num=400, model_bool_list=None, - n_comp=20, - center_x=None, center_y=None) - - print(kwargs_mge['e1']) + kwargs_mge = profile.multi_gaussian_decomposition_ellipse( + kwargs_light, + grid_spacing=0.1, + grid_num=400, + model_bool_list=None, + n_comp=20, + center_x=None, + center_y=None, + ) + + print(kwargs_mge["e1"]) mge = MultiGaussianEllipse() r_array = np.logspace(start=-2, stop=0.5, num=10) flux = mge.function(r_array, 0, **kwargs_mge) @@ -203,16 +324,18 @@ def test_multi_gaussian_decomposition_ellipse(self): def test_flux_components(self): amp = 1 - kwargs_profile = [{'amp': amp}] - kwargs_options = {'light_model_list': ['UNIFORM']} + kwargs_profile = [{"amp": amp}] + kwargs_options = {"light_model_list": ["UNIFORM"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) grid_num = 40 grid_spacing = 0.1 - flux_list, R_h_list = profile.flux_components(kwargs_profile, grid_num=grid_num, grid_spacing=grid_spacing) + flux_list, R_h_list = profile.flux_components( + kwargs_profile, grid_num=grid_num, grid_spacing=grid_spacing + ) assert len(flux_list) == 1 - area = (grid_num * grid_spacing)**2 - npt.assert_almost_equal(flux_list[0], area*amp, decimal=8) + area = (grid_num * grid_spacing) ** 2 + npt.assert_almost_equal(flux_list[0], area * amp, decimal=8) phi, q = -0.37221683730659516, 0.70799587973181288 e1, e2 = param_util.phi_q2_ellipticity(phi, q) @@ -220,23 +343,37 @@ def test_flux_components(self): phi2, q2 = 0.14944144075912402, 0.4105628122365978 e12, e22 = param_util.phi_q2_ellipticity(phi2, q2) - kwargs_profile = [{'Rs': 0.16350224766074103, 'e1': e12, 'e2': e22, 'center_x': -0.019983826426838536, - 'center_y': 0.90000011282957304, 'amp': 1.3168943578511678}, - {'Rs': 0.29187068596715743, 'e1': e1, 'e2': e2, 'center_x': 0.020568531548241405, - 'center_y': 0.036038490364800925, 'Ra': 0.020000382843298824, - 'amp': 85.948773973262391}] - kwargs_options = {'light_model_list': ['HERNQUIST_ELLIPSE', 'PJAFFE_ELLIPSE']} + kwargs_profile = [ + { + "Rs": 0.16350224766074103, + "e1": e12, + "e2": e22, + "center_x": -0.019983826426838536, + "center_y": 0.90000011282957304, + "amp": 1.3168943578511678, + }, + { + "Rs": 0.29187068596715743, + "e1": e1, + "e2": e2, + "center_x": 0.020568531548241405, + "center_y": 0.036038490364800925, + "Ra": 0.020000382843298824, + "amp": 85.948773973262391, + }, + ] + kwargs_options = {"light_model_list": ["HERNQUIST_ELLIPSE", "PJAFFE_ELLIPSE"]} lightModel = LightModel(**kwargs_options) profile = LightProfileAnalysis(light_model=lightModel) - flux_list, R_h_list = profile.flux_components(kwargs_profile, grid_num=400, grid_spacing=0.01) + flux_list, R_h_list = profile.flux_components( + kwargs_profile, grid_num=400, grid_spacing=0.01 + ) assert len(flux_list) == 2 npt.assert_almost_equal(flux_list[0], 0.1940428118053717, decimal=6) npt.assert_almost_equal(flux_list[1], 3.0964046927612707, decimal=6) - - """ def test_light2mass_mge(self): @@ -324,11 +461,10 @@ def test_lens_center(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): raise ValueError() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_multi_patch_reconstruction.py b/test/test_Analysis/test_multi_patch_reconstruction.py index 334efc32d..766eca3c9 100644 --- a/test/test_Analysis/test_multi_patch_reconstruction.py +++ b/test/test_Analysis/test_multi_patch_reconstruction.py @@ -13,65 +13,98 @@ import numpy.testing as npt import unittest -from lenstronomy.Analysis.multi_patch_reconstruction import MultiPatchReconstruction, _update_frame_size +from lenstronomy.Analysis.multi_patch_reconstruction import ( + MultiPatchReconstruction, + _update_frame_size, +) class TestMultiPatchReconstruction(object): - def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel (Gaussian) - exp_time = 100. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) + sigma_bkg = 0.05 # background noise per pixel (Gaussian) + exp_time = 100.0 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.1 # full width half max of PSF (only valid when psf_type='gaussian') - psf_type = 'GAUSSIAN' # 'GAUSSIAN', 'PIXEL', 'NONE' + psf_type = "GAUSSIAN" # 'GAUSSIAN', 'PIXEL', 'NONE' # generate the coordinate grid and image properties - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) - kwargs_data['exposure_time'] = exp_time * np.ones_like(kwargs_data['image_data']) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) + kwargs_data["exposure_time"] = exp_time * np.ones_like( + kwargs_data["image_data"] + ) data_class = ImageData(**kwargs_data) # generate the psf variables - kwargs_psf = {'psf_type': psf_type, 'pixel_size': deltaPix, 'fwhm': fwhm} + kwargs_psf = {"psf_type": psf_type, "pixel_size": deltaPix, "fwhm": fwhm} # kwargs_psf = sim_util.psf_configure_simple(psf_type=psf_type, fwhm=fwhm, kernelsize=kernel_size, deltaPix=deltaPix, kernel=kernel) psf_class = PSF(**kwargs_psf) # lensing quantities - kwargs_shear = {'gamma1': 0.02, 'gamma2': -0.04} # shear values to the source plane - kwargs_spemd = {'theta_E': 1.26, 'gamma': 2., 'center_x': 0.0, 'center_y': 0.0, 'e1': -0.1, - 'e2': 0.05} # parameters of the deflector lens model + kwargs_shear = { + "gamma1": 0.02, + "gamma2": -0.04, + } # shear values to the source plane + kwargs_spemd = { + "theta_E": 1.26, + "gamma": 2.0, + "center_x": 0.0, + "center_y": 0.0, + "e1": -0.1, + "e2": 0.05, + } # parameters of the deflector lens model # the lens model is a supperposition of an elliptical lens model with external shear - lens_model_list = ['EPL', 'SHEAR'] + lens_model_list = ["EPL", "SHEAR"] kwargs_lens_true = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # choice of source type - source_type = 'SERSIC' # 'SERSIC' or 'SHAPELETS' + source_type = "SERSIC" # 'SERSIC' or 'SHAPELETS' - source_x = 0. + source_x = 0.0 source_y = 0.05 # Sersic parameters in the initial simulation phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_sersic_source = {'amp': 1000, 'R_sersic': 0.05, 'n_sersic': 1, 'e1': e1, 'e2': e2, 'center_x': source_x, - 'center_y': source_y} + kwargs_sersic_source = { + "amp": 1000, + "R_sersic": 0.05, + "n_sersic": 1, + "e1": e1, + "e2": e2, + "center_x": source_x, + "center_y": source_y, + } # kwargs_else = {'sourcePos_x': source_x, 'sourcePos_y': source_y, 'quasar_amp': 400., 'gamma1_foreground': 0.0, 'gamma2_foreground':-0.0} - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source_true = [kwargs_sersic_source] source_model_class = LightModel(light_model_list=source_model_list) lensEquationSolver = LensEquationSolver(lens_model_class) - x_image, y_image = lensEquationSolver.findBrightImage(source_x, source_y, kwargs_lens_true, numImages=4, - min_distance=deltaPix, search_window=numPix * deltaPix) + x_image, y_image = lensEquationSolver.findBrightImage( + source_x, + source_y, + kwargs_lens_true, + numImages=4, + min_distance=deltaPix, + search_window=numPix * deltaPix, + ) mag = lens_model_class.magnification(x_image, y_image, kwargs=kwargs_lens_true) - kwargs_numerics = {'supersampling_factor': 1} + kwargs_numerics = {"supersampling_factor": 1} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - kwargs_numerics=kwargs_numerics) + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + kwargs_numerics=kwargs_numerics, + ) # generate image model = imageModel.image(kwargs_lens_true, kwargs_source_true) @@ -80,11 +113,12 @@ def setup_method(self): image_sim = model + bkg + poisson data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - } + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + } # make cutous and data instances of them x_pos, y_pos = data_class.map_coord2pix(x_image, y_image) @@ -95,31 +129,54 @@ def setup_method(self): n_cut = 12 x_c = int(x_pos[i]) y_c = int(y_pos[i]) - image_cut = image_sim[int(y_c - n_cut):int(y_c + n_cut), int(x_c - n_cut):int(x_c + n_cut)] - exposure_map_cut = data_class.exposure_map[int(y_c - n_cut):int(y_c + n_cut), - int(x_c - n_cut):int(x_c + n_cut)] + image_cut = image_sim[ + int(y_c - n_cut) : int(y_c + n_cut), int(x_c - n_cut) : int(x_c + n_cut) + ] + exposure_map_cut = data_class.exposure_map[ + int(y_c - n_cut) : int(y_c + n_cut), int(x_c - n_cut) : int(x_c + n_cut) + ] kwargs_data_i = { - 'background_rms': data_class.background_rms, - 'exposure_time': exposure_map_cut, - 'ra_at_xy_0': ra_grid[y_c - n_cut, x_c - n_cut], 'dec_at_xy_0': dec_grid[y_c - n_cut, x_c - n_cut], - 'transform_pix2angle': data_class.transform_pix2angle - , 'image_data': image_cut + "background_rms": data_class.background_rms, + "exposure_time": exposure_map_cut, + "ra_at_xy_0": ra_grid[y_c - n_cut, x_c - n_cut], + "dec_at_xy_0": dec_grid[y_c - n_cut, x_c - n_cut], + "transform_pix2angle": data_class.transform_pix2angle, + "image_data": image_cut, } multi_band_list.append([kwargs_data_i, kwargs_psf, kwargs_numerics]) - kwargs_params = {'kwargs_lens': kwargs_lens_true, 'kwargs_source': kwargs_source_true} - self.multiPatch = MultiPatchReconstruction(multi_band_list, kwargs_model, kwargs_params, - multi_band_type='joint-linear', kwargs_likelihood=None, verbose=True) + kwargs_params = { + "kwargs_lens": kwargs_lens_true, + "kwargs_source": kwargs_source_true, + } + self.multiPatch = MultiPatchReconstruction( + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="joint-linear", + kwargs_likelihood=None, + verbose=True, + ) self.data_class = data_class self.model = model self.lens_model_class = lens_model_class self.kwargs_lens = kwargs_lens_true # test multi_patch with initial pixel grid - kwargs_pixel_grid = {'nx': numPix, 'ny': numPix, 'transform_pix2angle': kwargs_data['transform_pix2angle'], - 'ra_at_xy_0': kwargs_data['ra_at_xy_0'], 'dec_at_xy_0': kwargs_data['dec_at_xy_0']} - multiPatch = MultiPatchReconstruction(multi_band_list, kwargs_model, kwargs_params, - multi_band_type='joint-linear', kwargs_pixel_grid=kwargs_pixel_grid) + kwargs_pixel_grid = { + "nx": numPix, + "ny": numPix, + "transform_pix2angle": kwargs_data["transform_pix2angle"], + "ra_at_xy_0": kwargs_data["ra_at_xy_0"], + "dec_at_xy_0": kwargs_data["dec_at_xy_0"], + } + multiPatch = MultiPatchReconstruction( + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="joint-linear", + kwargs_pixel_grid=kwargs_pixel_grid, + ) def test_pixel_grid(self): pixel_grid = self.multiPatch.pixel_grid_joint @@ -137,23 +194,32 @@ def test_image_joint(self): x0, y0 = self.data_class.map_coord2pix(ra, dec) # cutout original data = self.data_class.data - data_cut = data[int(y0):int(y0+ny), int(x0):int(x0+nx)] - model_cut = self.model[int(y0):int(y0 + ny), int(x0):int(x0 + nx)] + data_cut = data[int(y0) : int(y0 + ny), int(x0) : int(x0 + nx)] + model_cut = self.model[int(y0) : int(y0 + ny), int(x0) : int(x0 + nx)] # compare with original - npt.assert_almost_equal(data_cut[image_joint > 0], image_joint[image_joint > 0], decimal=5) + npt.assert_almost_equal( + data_cut[image_joint > 0], image_joint[image_joint > 0], decimal=5 + ) model_cut[model_joint == 0] = 0 - print(np.sum(model_cut), np.sum(model_joint), 'test sum') - #import matplotlib.pyplot as plt - #plt.matshow((model_joint - model_cut)) - #plt.show() + print(np.sum(model_cut), np.sum(model_joint), "test sum") + # import matplotlib.pyplot as plt + # plt.matshow((model_joint - model_cut)) + # plt.show() - #plt.matshow(model_cut) - #plt.show() + # plt.matshow(model_cut) + # plt.show() # TODO make this test more precise (to do with narrower PSF convolution?) - npt.assert_almost_equal(model_cut[model_joint > 0], model_joint[model_joint > 0], decimal=1) + npt.assert_almost_equal( + model_cut[model_joint > 0], model_joint[model_joint > 0], decimal=1 + ) def test_lens_model_joint(self): - kappa_joint, magnification_joint, alpha_x_joint, alpha_y_joint = self.multiPatch.lens_model_joint() + ( + kappa_joint, + magnification_joint, + alpha_x_joint, + alpha_y_joint, + ) = self.multiPatch.lens_model_joint() # compute pixel shift from original pixel_grid = self.multiPatch.pixel_grid_joint @@ -163,22 +229,27 @@ def test_lens_model_joint(self): # cutout original x_grid, y_grid = self.data_class.pixel_coordinates kappa = self.lens_model_class.kappa(x_grid, y_grid, self.kwargs_lens) - kappa_cut = kappa[int(y0):int(y0+ny), int(x0):int(x0+nx)] + kappa_cut = kappa[int(y0) : int(y0 + ny), int(x0) : int(x0 + nx)] # compare with original - npt.assert_almost_equal(kappa_cut[kappa_joint > 0], kappa_joint[kappa_joint > 0], decimal=5) + npt.assert_almost_equal( + kappa_cut[kappa_joint > 0], kappa_joint[kappa_joint > 0], decimal=5 + ) alpha_x, alpha_y = self.lens_model_class.alpha(x_grid, y_grid, self.kwargs_lens) - alpha_x_cut = alpha_x[int(y0):int(y0 + ny), int(x0):int(x0 + nx)] + alpha_x_cut = alpha_x[int(y0) : int(y0 + ny), int(x0) : int(x0 + nx)] # compare with original - npt.assert_almost_equal(alpha_x_cut[alpha_x_joint > 0], alpha_x_joint[alpha_x_joint > 0], decimal=5) + npt.assert_almost_equal( + alpha_x_cut[alpha_x_joint > 0], alpha_x_joint[alpha_x_joint > 0], decimal=5 + ) def test_source(self): - source, coords = self.multiPatch.source(num_pix=50, delta_pix=0.01, center=None) nx, ny = np.shape(source) assert nx == 50 - source, coords = self.multiPatch.source(num_pix=50, delta_pix=0.01, center=[0, 0]) + source, coords = self.multiPatch.source( + num_pix=50, delta_pix=0.01, center=[0, 0] + ) nx, ny = np.shape(source) assert nx == 50 @@ -188,12 +259,17 @@ def test__update_frame_size(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - MultiPatchReconstruction(multi_band_list=[], kwargs_model={}, kwargs_params={}, - multi_band_type='multi-linear', kwargs_likelihood=None, verbose=True) + MultiPatchReconstruction( + multi_band_list=[], + kwargs_model={}, + kwargs_params={}, + multi_band_type="multi-linear", + kwargs_likelihood=None, + verbose=True, + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Analysis/test_td_cosmography.py b/test/test_Analysis/test_td_cosmography.py index c5c6d7b54..22401e27f 100644 --- a/test/test_Analysis/test_td_cosmography.py +++ b/test/test_Analysis/test_td_cosmography.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import pytest @@ -11,35 +11,56 @@ class TestTDCosmography(object): - def setup_method(self): - kwargs_model = {'lens_light_model_list': ['HERNQUIST'], - 'lens_model_list': ['SIE'], - 'point_source_model_list': ['LENSED_POSITION']} + kwargs_model = { + "lens_light_model_list": ["HERNQUIST"], + "lens_model_list": ["SIE"], + "point_source_model_list": ["LENSED_POSITION"], + } z_lens = 0.5 z_source = 2.5 R_slit = 3.8 - dR_slit = 1. - aperture_type = 'slit' - kwargs_aperture = {'aperture_type': aperture_type, 'center_ra': 0, 'width': dR_slit, 'length': R_slit, - 'angle': 0, 'center_dec': 0} + dR_slit = 1.0 + aperture_type = "slit" + kwargs_aperture = { + "aperture_type": aperture_type, + "center_ra": 0, + "width": dR_slit, + "length": R_slit, + "angle": 0, + "center_dec": 0, + } psf_fwhm = 0.7 - kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} + kwargs_seeing = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} TDCosmography(z_lens, z_source, kwargs_model) from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) - self.td_cosmo = TDCosmography(z_lens, z_source, kwargs_model, cosmo_fiducial=cosmo, lens_model_kinematics_bool=None, - kwargs_aperture = kwargs_aperture, kwargs_seeing = kwargs_seeing, - light_model_kinematics_bool=None) - self.lens = LensModel(lens_model_list=['SIE'], cosmo=cosmo, z_lens=z_lens, z_source=z_source) + self.td_cosmo = TDCosmography( + z_lens, + z_source, + kwargs_model, + cosmo_fiducial=cosmo, + lens_model_kinematics_bool=None, + kwargs_aperture=kwargs_aperture, + kwargs_seeing=kwargs_seeing, + light_model_kinematics_bool=None, + ) + self.lens = LensModel( + lens_model_list=["SIE"], cosmo=cosmo, z_lens=z_lens, z_source=z_source + ) self.solver = LensEquationSolver(lensModel=self.lens) - self.kwargs_lens = [{'theta_E': 1, 'e1': 0.1, 'e2': -0.2, 'center_x': 0, 'center_y': 0}] + self.kwargs_lens = [ + {"theta_E": 1, "e1": 0.1, "e2": -0.2, "center_x": 0, "center_y": 0} + ] source_x, source_y = 0, 0.05 - image_x, image_y = self.solver.image_position_from_source(source_x, source_y, self.kwargs_lens, min_distance=0.1, search_window=10) - self.kwargs_ps = [{'ra_image': image_x, 'dec_image': image_y}] + image_x, image_y = self.solver.image_position_from_source( + source_x, source_y, self.kwargs_lens, min_distance=0.1, search_window=10 + ) + self.kwargs_ps = [{"ra_image": image_x, "dec_image": image_y}] self.image_x, self.image_y = image_x, image_y def test_time_delays(self): @@ -49,18 +70,21 @@ def test_time_delays(self): def test_fermat_potential(self): fermat_pot = self.td_cosmo.fermat_potential(self.kwargs_lens, self.kwargs_ps) - fermat_pot_true = self.lens.fermat_potential(self.image_x, self.image_y, self.kwargs_lens) + fermat_pot_true = self.lens.fermat_potential( + self.image_x, self.image_y, self.kwargs_lens + ) npt.assert_almost_equal(fermat_pot, fermat_pot_true, decimal=6) diff = 0.1 - kwargs_ps = [{'ra_image': self.image_x+diff, 'dec_image': self.image_y}] + kwargs_ps = [{"ra_image": self.image_x + diff, "dec_image": self.image_y}] fermat_pot = self.td_cosmo.fermat_potential(self.kwargs_lens, kwargs_ps) - fermat_pot_true = self.lens.fermat_potential(self.image_x+diff, self.image_y, self.kwargs_lens) + fermat_pot_true = self.lens.fermat_potential( + self.image_x + diff, self.image_y, self.kwargs_lens + ) ratio = fermat_pot / fermat_pot_true assert np.max(np.abs(ratio)) > 1.05 def test_cosmo_inference(self): - # set up a cosmology # compute image postions # compute J and velocity dispersion @@ -68,48 +92,83 @@ def test_cosmo_inference(self): D_d = self.td_cosmo._lens_cosmo.dd D_s = self.td_cosmo._lens_cosmo.ds D_ds = self.td_cosmo._lens_cosmo.dds - fermat_potential_list = self.td_cosmo.fermat_potential(self.kwargs_lens, self.kwargs_ps) - dt_list = self.td_cosmo.time_delays(self.kwargs_lens, self.kwargs_ps, kappa_ext=0) + fermat_potential_list = self.td_cosmo.fermat_potential( + self.kwargs_lens, self.kwargs_ps + ) + dt_list = self.td_cosmo.time_delays( + self.kwargs_lens, self.kwargs_ps, kappa_ext=0 + ) dt = dt_list[0] - dt_list[1] d_fermat = fermat_potential_list[0] - fermat_potential_list[1] - D_dt_infered = self.td_cosmo.ddt_from_time_delay(d_fermat_model=d_fermat, dt_measured=dt) + D_dt_infered = self.td_cosmo.ddt_from_time_delay( + d_fermat_model=d_fermat, dt_measured=dt + ) npt.assert_almost_equal(D_dt_infered, D_dt, decimal=5) r_eff = 0.5 - kwargs_lens_light = [{'Rs': r_eff * 0.551, 'center_x': 0, 'center_y': 0}] - kwargs_anisotropy = {'r_ani': 1} - - anisotropy_model = 'OM' - kwargs_numerics_galkin = {'interpol_grid_num': 500, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001} - self.td_cosmo.kinematics_modeling_settings(anisotropy_model, kwargs_numerics_galkin, analytic_kinematics=True, - Hernquist_approx=False, MGE_light=False, MGE_mass=False) - - J = self.td_cosmo.velocity_dispersion_dimension_less(self.kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff, - theta_E=self.kwargs_lens[0]['theta_E'], gamma=2) - - J_map = self.td_cosmo.velocity_dispersion_map_dimension_less(self.kwargs_lens, kwargs_lens_light, - kwargs_anisotropy, r_eff=r_eff, - theta_E=self.kwargs_lens[0]['theta_E'], gamma=2) + kwargs_lens_light = [{"Rs": r_eff * 0.551, "center_x": 0, "center_y": 0}] + kwargs_anisotropy = {"r_ani": 1} + + anisotropy_model = "OM" + kwargs_numerics_galkin = { + "interpol_grid_num": 500, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + } + self.td_cosmo.kinematics_modeling_settings( + anisotropy_model, + kwargs_numerics_galkin, + analytic_kinematics=True, + Hernquist_approx=False, + MGE_light=False, + MGE_mass=False, + ) + + J = self.td_cosmo.velocity_dispersion_dimension_less( + self.kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=self.kwargs_lens[0]["theta_E"], + gamma=2, + ) + + J_map = self.td_cosmo.velocity_dispersion_map_dimension_less( + self.kwargs_lens, + kwargs_lens_light, + kwargs_anisotropy, + r_eff=r_eff, + theta_E=self.kwargs_lens[0]["theta_E"], + gamma=2, + ) assert len(J_map) == 1 - npt.assert_almost_equal(J_map[0]/J, 1, decimal=1) - sigma_v2 = J * D_s/D_ds * const.c ** 2 - sigma_v = np.sqrt(sigma_v2) / 1000. # convert to [km/s] - print(sigma_v, 'test sigma_v') + npt.assert_almost_equal(J_map[0] / J, 1, decimal=1) + sigma_v2 = J * D_s / D_ds * const.c**2 + sigma_v = np.sqrt(sigma_v2) / 1000.0 # convert to [km/s] + print(sigma_v, "test sigma_v") Ds_Dds = self.td_cosmo.ds_dds_from_kinematics(sigma_v, J, kappa_s=0, kappa_ds=0) - npt.assert_almost_equal(Ds_Dds, D_s/D_ds) + npt.assert_almost_equal(Ds_Dds, D_s / D_ds) # now we perform a mass-sheet transform in the observables but leave the models identical with a convergence correction kappa_s = 0.5 - dt_list = self.td_cosmo.time_delays(self.kwargs_lens, self.kwargs_ps, kappa_ext=kappa_s) - sigma_v_kappa = sigma_v * np.sqrt(1-kappa_s) + dt_list = self.td_cosmo.time_delays( + self.kwargs_lens, self.kwargs_ps, kappa_ext=kappa_s + ) + sigma_v_kappa = sigma_v * np.sqrt(1 - kappa_s) dt = dt_list[0] - dt_list[1] - D_dt_infered, D_d_infered = self.td_cosmo.ddt_dd_from_time_delay_and_kinematics(d_fermat_model=d_fermat, dt_measured=dt, - sigma_v_measured=sigma_v_kappa, J=J, kappa_s=kappa_s, - kappa_ds=0, kappa_d=0) + D_dt_infered, D_d_infered = self.td_cosmo.ddt_dd_from_time_delay_and_kinematics( + d_fermat_model=d_fermat, + dt_measured=dt, + sigma_v_measured=sigma_v_kappa, + J=J, + kappa_s=kappa_s, + kappa_ds=0, + kappa_d=0, + ) npt.assert_almost_equal(D_dt_infered, D_dt, decimal=6) npt.assert_almost_equal(D_d_infered, D_d, decimal=6) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Conf/test_config_loader.py b/test/test_Conf/test_config_loader.py index 79d9f4457..a4c12d0e5 100644 --- a/test/test_Conf/test_config_loader.py +++ b/test/test_Conf/test_config_loader.py @@ -1,22 +1,21 @@ - import pytest from lenstronomy.Conf import config_loader def test_numba_conf(): numba_conf = config_loader.numba_conf() - assert 'nopython' in numba_conf - assert 'cache' in numba_conf - assert 'parallel' in numba_conf - assert 'enable' in numba_conf - assert 'fastmath' in numba_conf - assert 'error_model' in numba_conf + assert "nopython" in numba_conf + assert "cache" in numba_conf + assert "parallel" in numba_conf + assert "enable" in numba_conf + assert "fastmath" in numba_conf + assert "error_model" in numba_conf def test_conventions_conf(): conf = config_loader.conventions_conf() - assert 'sersic_major_axis' in conf + assert "sersic_major_axis" in conf -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_background.py b/test/test_Cosmo/test_background.py index e1fc43db3..46f131fa1 100644 --- a/test/test_Cosmo/test_background.py +++ b/test/test_Cosmo/test_background.py @@ -5,23 +5,24 @@ class TestCosmoProp(object): - def setup_method(self): self.z_L = 0.8 self.z_S = 3.0 from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.bkg = Background(cosmo=cosmo) def test_scale_factor(self): z = 0.7 - assert self.bkg.a_z(z) == 1./(1+z) + assert self.bkg.a_z(z) == 1.0 / (1 + z) def test_rho_crit(self): assert self.bkg.rho_crit == 135955133951.10692 def test_interpol(self): from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) bkg = Background(cosmo=cosmo) @@ -31,5 +32,5 @@ def test_interpol(self): npt.assert_almost_equal(d_xy_interp / d_xy, 1, decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_cosmo_interp.py b/test/test_Cosmo/test_cosmo_interp.py index 82bfa9549..71dd32a7a 100644 --- a/test/test_Cosmo/test_cosmo_interp.py +++ b/test/test_Cosmo/test_cosmo_interp.py @@ -5,26 +5,37 @@ class TestCosmoInterp(object): - """ + """""" - """ def setup_method(self): self.H0_true = 70 self.omega_m_true = 0.3 self._ok_true = 0.1 self.cosmo = FlatLambdaCDM(H0=self.H0_true, Om0=self.omega_m_true, Ob0=0.05) self.cosmo_interp = CosmoInterp(cosmo=self.cosmo, z_stop=3, num_interp=100) - self.cosmo_ok = LambdaCDM(H0=self.H0_true, Om0=self.omega_m_true, Ode0=1.0 - self.omega_m_true - self._ok_true) - self.cosmo_interp_ok = CosmoInterp(cosmo=self.cosmo_ok, z_stop=3, num_interp=100) + self.cosmo_ok = LambdaCDM( + H0=self.H0_true, + Om0=self.omega_m_true, + Ode0=1.0 - self.omega_m_true - self._ok_true, + ) + self.cosmo_interp_ok = CosmoInterp( + cosmo=self.cosmo_ok, z_stop=3, num_interp=100 + ) - self.cosmo_ok_neg = LambdaCDM(H0=self.H0_true, Om0=self.omega_m_true, Ode0=1.0 - self.omega_m_true + self._ok_true) - self.cosmo_interp_ok_neg = CosmoInterp(cosmo=self.cosmo_ok_neg, z_stop=3, num_interp=100) + self.cosmo_ok_neg = LambdaCDM( + H0=self.H0_true, + Om0=self.omega_m_true, + Ode0=1.0 - self.omega_m_true + self._ok_true, + ) + self.cosmo_interp_ok_neg = CosmoInterp( + cosmo=self.cosmo_ok_neg, z_stop=3, num_interp=100 + ) def test_angular_diameter_distance(self): - z = 1. + z = 1.0 da = self.cosmo.angular_diameter_distance(z=[z]) da_interp = self.cosmo_interp.angular_diameter_distance(z=[z]) - npt.assert_almost_equal(da_interp/da, 1, decimal=3) + npt.assert_almost_equal(da_interp / da, 1, decimal=3) assert da.unit == da_interp.unit da = self.cosmo_ok.angular_diameter_distance(z=z) @@ -39,8 +50,8 @@ def test_angular_diameter_distance(self): def test_angular_diameter_distance_array(self): # test for array input - z1 = 1. - z2 = 2. + z1 = 1.0 + z2 = 2.0 da_z1 = self.cosmo.angular_diameter_distance(z=[z1]) da_z2 = self.cosmo.angular_diameter_distance(z=[z2]) da_interp = self.cosmo_interp.angular_diameter_distance(z=[z1, z2]) @@ -54,18 +65,20 @@ def test_angular_diameter_distance_array(self): npt.assert_almost_equal(da_z12[1] / da_z2, 1, decimal=3) def test_angular_diameter_distance_z1z2(self): - z1 = .3 - z2 = 2. + z1 = 0.3 + z2 = 2.0 delta_a = self.cosmo.angular_diameter_distance_z1z2(z1=z1, z2=z2) delta_a_interp = self.cosmo_interp.angular_diameter_distance_z1z2(z1=z1, z2=z2) - npt.assert_almost_equal(delta_a_interp/delta_a, 1, decimal=3) + npt.assert_almost_equal(delta_a_interp / delta_a, 1, decimal=3) assert delta_a.unit == delta_a_interp.unit delta_a = self.cosmo_ok.angular_diameter_distance_z1z2(z1=z1, z2=z2) - delta_a_interp = self.cosmo_interp_ok.angular_diameter_distance_z1z2(z1=z1, z2=z2) + delta_a_interp = self.cosmo_interp_ok.angular_diameter_distance_z1z2( + z1=z1, z2=z2 + ) npt.assert_almost_equal(delta_a_interp / delta_a, 1, decimal=3) assert delta_a.unit == delta_a_interp.unit -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_cosmo_solver.py b/test/test_Cosmo/test_cosmo_solver.py index a7376a9ca..e61dee831 100644 --- a/test/test_Cosmo/test_cosmo_solver.py +++ b/test/test_Cosmo/test_cosmo_solver.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.Cosmo.cosmo_solver import SolverFlatLCDM, InvertCosmo from lenstronomy.Cosmo.cosmo_solver import cosmo2angular_diameter_distances, ddt2h0 @@ -10,7 +10,6 @@ class TestCompare(object): - def setup_method(self): self.z_d, self.z_s = 0.295, 0.658 self.solver = SolverFlatLCDM(z_d=self.z_d, z_s=self.z_s) @@ -26,10 +25,10 @@ def test_ddt2h0(self): z_lens, z_source = 0.5, 2 omega_m = 0.3 h0_true = 73 - cosmo = FlatLambdaCDM(H0=h0_true, Om0=omega_m, Ob0=0.) + cosmo = FlatLambdaCDM(H0=h0_true, Om0=omega_m, Ob0=0.0) lensCosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) ddt_true = lensCosmo.ddt - cosmo_fiducial = FlatLambdaCDM(H0=60, Om0=omega_m, Ob0=0.) + cosmo_fiducial = FlatLambdaCDM(H0=60, Om0=omega_m, Ob0=0.0) h0_inferred = ddt2h0(ddt_true, z_lens, z_source, cosmo_fiducial) npt.assert_almost_equal(h0_inferred, h0_true, decimal=4) @@ -54,26 +53,32 @@ def test_solver(self): H0 = 30 omega_m = 0.1 Dd, Ds_Dds = cosmo2angular_diameter_distances(H0, omega_m, self.z_d, self.z_s) - print(Dd, Ds_Dds, 'Dd, Ds_Dds') + print(Dd, Ds_Dds, "Dd, Ds_Dds") x = self.solver.solve(init, Dd, Ds_Dds) - print(x, 'x') + print(x, "x") npt.assert_almost_equal(x[0], H0, decimal=5) npt.assert_almost_equal(x[1], omega_m, decimal=5) Dd, Ds_Dds = 4000, 0.4 x = self.solver.solve(init, Dd, Ds_Dds) - print(x, 'x') - Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances(x[0], abs(x[1]) % 1, self.z_d, self.z_s) + print(x, "x") + Dd_new, Ds_Dds_new = cosmo2angular_diameter_distances( + x[0], abs(x[1]) % 1, self.z_d, self.z_s + ) print(Dd, Ds_Dds, Dd_new, Ds_Dds_new) - #npt.assert_almost_equal(Dd, Dd_new, decimal=3) - #npt.assert_almost_equal(Ds_Dds, Ds_Dds_new, decimal=3) + # npt.assert_almost_equal(Dd, Dd_new, decimal=3) + # npt.assert_almost_equal(Ds_Dds, Ds_Dds_new, decimal=3) class TestInvertCosmo(object): def setup_method(self): self.z_d, self.z_s = 0.295, 0.658 - self.invertCosmo = InvertCosmo(z_d=self.z_d, z_s=self.z_s, H0_range=np.linspace(10, 100, 50), - omega_m_range=np.linspace(0.05, 1, 50)) + self.invertCosmo = InvertCosmo( + z_d=self.z_d, + z_s=self.z_s, + H0_range=np.linspace(10, 100, 50), + omega_m_range=np.linspace(0.05, 1, 50), + ) self.invertCosmo_default = InvertCosmo(z_d=self.z_d, z_s=self.z_s) def test_get_cosmo(self): @@ -92,5 +97,5 @@ def test_get_cosmo(self): assert H0_new == -1 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_kde_likelihood.py b/test/test_Cosmo/test_kde_likelihood.py index 168f4236b..65ee4a06b 100644 --- a/test/test_Cosmo/test_kde_likelihood.py +++ b/test/test_Cosmo/test_kde_likelihood.py @@ -1,16 +1,18 @@ import numpy as np import numpy.testing as npt import pytest + # import lenstronomy module dealing with cosmological quantities in a lens system from lenstronomy.Cosmo.lens_cosmo import LensCosmo + # import KDELikelihood module from lenstronomy.Cosmo.kde_likelihood import KDELikelihood + # import astropy.cosmology class from astropy.cosmology import FlatLambdaCDM class TestKDELikelihood(object): - def setup_method(self): # set up seed np.random.seed(seed=41) @@ -41,7 +43,9 @@ def test_kde_likelihood(self): D_d_samples = np.random.normal(Dd_true, sigma_Dd, num_samples) # initialize a KDELikelihood class with the posterior sample - kdeLikelihood = KDELikelihood(D_d_samples, D_dt_samples, kde_type='scipy_gaussian', bandwidth=2) + kdeLikelihood = KDELikelihood( + D_d_samples, D_dt_samples, kde_type="scipy_gaussian", bandwidth=2 + ) # evaluate the maximum likelihood (arbitrary normalization!) logL_max = kdeLikelihood.logLikelihood(Dd_true, D_dt_true) # evaluate the likelihood 1-sigma away from Dd @@ -60,7 +64,9 @@ def test_kde_likelihood(self): npt.assert_almost_equal(delta_log, 0.5, decimal=2) # initialize a KDELikelihood class with the posterior sample - kdeLikelihood = KDELikelihood(D_d_samples, D_dt_samples, kde_type='gaussian', bandwidth=20) + kdeLikelihood = KDELikelihood( + D_d_samples, D_dt_samples, kde_type="gaussian", bandwidth=20 + ) # evaluate the maximum likelihood (arbitrary normalization!) logL_max = kdeLikelihood.logLikelihood(Dd_true, D_dt_true) # evaluate the likelihood 1-sigma away from Dd @@ -79,5 +85,5 @@ def test_kde_likelihood(self): npt.assert_almost_equal(delta_log, 0.5, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_lcdm.py b/test/test_Cosmo/test_lcdm.py index db2817f10..8d31eaa3a 100644 --- a/test/test_Cosmo/test_lcdm.py +++ b/test/test_Cosmo/test_lcdm.py @@ -1,9 +1,9 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import pytest -from lenstronomy.Cosmo.lcdm import LCDM +from lenstronomy.Cosmo.lcdm import LCDM class TestFlatLCDM(object): @@ -37,5 +37,5 @@ def test_D_dt(self): npt.assert_almost_equal(D_dt, D_dt_k, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_lens_cosmo.py b/test/test_Cosmo/test_lens_cosmo.py index 17c057706..e178fa8cf 100644 --- a/test/test_Cosmo/test_lens_cosmo.py +++ b/test/test_Cosmo/test_lens_cosmo.py @@ -1,21 +1,21 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt import pytest from lenstronomy.Cosmo.lens_cosmo import LensCosmo -from lenstronomy.Util import util +from lenstronomy.Util import util class TestLensCosmo(object): - """ - tests the UnitManager class routines - """ + """Tests the UnitManager class routines.""" + def setup_method(self): z_L = 0.8 z_S = 3.0 from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.lensCosmo = LensCosmo(z_L, z_S, cosmo=cosmo) @@ -25,10 +25,10 @@ def test_ang_dist(self): npt.assert_almost_equal(self.lensCosmo.dds, 892.0038749095863, decimal=8) def test_epsilon_crit(self): - npt.assert_almost_equal(self.lensCosmo.sigma_crit / 1.9121e+15, 1, decimal=3) + npt.assert_almost_equal(self.lensCosmo.sigma_crit / 1.9121e15, 1, decimal=3) def test_arcsec2phys(self): - arcsec = np.array([1, 2]) # pixel coordinate from center + arcsec = np.array([1, 2]) # pixel coordinate from center physcoord = self.lensCosmo.arcsec2phys_lens(arcsec) npt.assert_almost_equal(physcoord[0], 0.0075083362428338641, decimal=8) npt.assert_almost_equal(physcoord[1], 0.015016672485667728, decimal=8) @@ -38,7 +38,7 @@ def test_arcsec2phys(self): npt.assert_almost_equal(physcoord[1], 0.01540661626172821, decimal=8) def test_phys2arcsec_lens(self): - phys = 1. + phys = 1.0 arc_sec = self.lensCosmo.phys2arcsec_lens(phys) phys_new = self.lensCosmo.arcsec2phys_lens(arc_sec) npt.assert_almost_equal(phys_new, phys, decimal=8) @@ -54,7 +54,7 @@ def test_kappa2proj_mass(self): npt.assert_almost_equal(mass, kappa * self.lensCosmo.sigma_crit, decimal=3) def test_mass_in_coin(self): - theta_E = 1. + theta_E = 1.0 m_coin = self.lensCosmo.mass_in_coin(theta_E) npt.assert_almost_equal(m_coin, 165279526936.52194, decimal=0) @@ -63,41 +63,40 @@ def test_D_dt_model(self): npt.assert_almost_equal(D_dt, 4965.660384441859, decimal=8) def test_nfw_angle2physical(self): - Rs_angle = 6. - alpha_Rs = 1. + Rs_angle = 6.0 + alpha_Rs = 1.0 rho0, Rs, c, r200, M200 = self.lensCosmo.nfw_angle2physical(Rs_angle, alpha_Rs) assert Rs * c == r200 def test_nfw_physical2angle(self): - M = 10.**13.5 + M = 10.0**13.5 c = 4 Rs_angle, alpha_Rs = self.lensCosmo.nfw_physical2angle(M, c) - rho0, Rs, c_out, r200, M200 = self.lensCosmo.nfw_angle2physical(Rs_angle, alpha_Rs) + rho0, Rs, c_out, r200, M200 = self.lensCosmo.nfw_angle2physical( + Rs_angle, alpha_Rs + ) npt.assert_almost_equal(c_out, c, decimal=3) npt.assert_almost_equal(np.log10(M200), np.log10(M), decimal=4) def test_sis_theta_E2sigma_v(self): - theta_E = 2. + theta_E = 2.0 sigma_v = self.lensCosmo.sis_theta_E2sigma_v(theta_E) theta_E_out = self.lensCosmo.sis_sigma_v2theta_E(sigma_v) npt.assert_almost_equal(theta_E_out, theta_E, decimal=5) def test_fermat2delays(self): - fermat_pot = 0.5 dt_days = self.lensCosmo.time_delay_units(fermat_pot) fermat_pot_out = self.lensCosmo.time_delay2fermat_pot(dt_days) npt.assert_almost_equal(fermat_pot, fermat_pot_out, decimal=10) def test_uldm_angular2phys(self): - kappa_0, theta_c = 0.1, 3 mlog10, Mlog10 = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c) npt.assert_almost_equal(mlog10, -24.3610006, decimal=5) npt.assert_almost_equal(Mlog10, 11.7195843, decimal=5) def test_uldm_mphys2angular(self): - m_log10, M_log10 = -24, 11 kappa_0, theta_c = self.lensCosmo.uldm_mphys2angular(m_log10, M_log10) mcheck, Mcheck = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c) @@ -105,7 +104,6 @@ def test_uldm_mphys2angular(self): npt.assert_almost_equal(Mcheck, M_log10, decimal=4) def test_a_z(self): - a = self.lensCosmo.background.a_z(z=1) npt.assert_almost_equal(a, 0.5) @@ -125,12 +123,14 @@ def test_hernquist_angular2phys(self): # test bijective transformation sigma0, rs_angle = self.lensCosmo.hernquist_phys2angular(mass=m_star, rs=rs) - m_star_new, rs_new = self.lensCosmo.hernquist_angular2phys(sigma0=sigma0, rs_angle=rs_angle) + m_star_new, rs_new = self.lensCosmo.hernquist_angular2phys( + sigma0=sigma0, rs_angle=rs_angle + ) npt.assert_almost_equal(m_star_new, m_star, decimal=1) npt.assert_almost_equal(rs_new, rs, decimal=8) def test_hernquist_mass_normalization(self): - m_star = 10 ** 10 # in M_sun + m_star = 10**10 # in M_sun rs = 0.01 # in Mpc # test bijective transformation @@ -138,21 +138,22 @@ def test_hernquist_mass_normalization(self): # test mass integrals # make large grid - delta_pix = rs_angle / 30. + delta_pix = rs_angle / 30.0 x, y = util.make_grid(numPix=501, deltapix=delta_pix) # compute convergence from lenstronomy.LensModel.lens_model import LensModel - lens_model = LensModel(lens_model_list=['HERNQUIST']) - kwargs = [{'sigma0': sigma0, 'Rs': rs_angle, 'center_x': 0, 'center_y': 0}] + + lens_model = LensModel(lens_model_list=["HERNQUIST"]) + kwargs = [{"sigma0": sigma0, "Rs": rs_angle, "center_x": 0, "center_y": 0}] kappa = lens_model.kappa(x, y, kwargs) # sum up convergence - kappa_tot = np.sum(kappa) * delta_pix ** 2 + kappa_tot = np.sum(kappa) * delta_pix**2 # transform to mass mass_tot = kappa_tot * self.lensCosmo.sigma_crit_angle # compare - npt.assert_almost_equal(mass_tot/ m_star, 1, decimal=1) + npt.assert_almost_equal(mass_tot / m_star, 1, decimal=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Cosmo/test_micro_lensing.py b/test/test_Cosmo/test_micro_lensing.py index 0ac38d3b0..4ae60f130 100644 --- a/test/test_Cosmo/test_micro_lensing.py +++ b/test/test_Cosmo/test_micro_lensing.py @@ -4,7 +4,6 @@ def test_einstein_radius(): - # from Wikipedia, a 60 M_jupiter mass object at 4000 pc with a source at 8000pc results in an Einstein radius of # about 0.00024 arc seconds mass = 60 * constants.M_jupiter / constants.M_sun diff --git a/test/test_Cosmo/test_nfw_param.py b/test/test_Cosmo/test_nfw_param.py index da50d21e7..6d51f6fde 100644 --- a/test/test_Cosmo/test_nfw_param.py +++ b/test/test_Cosmo/test_nfw_param.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer', 'gilmanda' +__author__ = "sibirrer", "gilmanda" import numpy as np import numpy.testing as npt @@ -9,11 +9,9 @@ class TestLensCosmo(object): - """ - tests the UnitManager class routines - """ - def setup_method(self): + """Tests the UnitManager class routines.""" + def setup_method(self): cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.nfwParam = NFWParam(cosmo=cosmo) self.z = 0.5 # needed fixed redshift for the inversion function @@ -23,20 +21,20 @@ def test_rho0_c(self): for c in c_list: rho0 = self.nfwParam.rho0_c(c, z=self.z) c_out = self.nfwParam.c_rho0(rho0, z=self.z) - print(c, 'c') + print(c, "c") npt.assert_almost_equal(c_out, c, decimal=3) def test_rhoc_z(self): z = 0 rho0_z = self.nfwParam.rhoc_z(z=z) - npt.assert_almost_equal(self.nfwParam.rhoc * (1+z)**3, rho0_z) + npt.assert_almost_equal(self.nfwParam.rhoc * (1 + z) ** 3, rho0_z) def test_M200(self): M200 = self.nfwParam.M200(rs=1, rho0=1, c=1) npt.assert_almost_equal(M200, 2.4271590540348216, decimal=5) def test_profileMain(self): - M = 10 ** 13.5 + M = 10**13.5 z = 0.5 r200, rho0, c, Rs = self.nfwParam.nfw_Mz(M, z) @@ -58,34 +56,46 @@ def test_against_colossus(self): from colossus.cosmology import cosmology as cosmology_colossus from colossus.halo.profile_nfw import NFWProfile - colossus_kwargs = {'H0': 70, 'Om0': 0.285, 'Ob0': 0.05, 'ns': 0.96, 'sigma8': 0.82, 'persistence': ''} - colossus = cosmology_colossus.setCosmology('custom', colossus_kwargs) - m200 = 10 ** 8 - c = 17. + colossus_kwargs = { + "H0": 70, + "Om0": 0.285, + "Ob0": 0.05, + "ns": 0.96, + "sigma8": 0.82, + "persistence": "", + } + colossus = cosmology_colossus.setCosmology("custom", colossus_kwargs) + + m200 = 10**8 + c = 17.0 zvals = np.linspace(0.0, 2, 50) h = 0.7 for z in zvals: - nfw_colossus = NFWProfile(M=m200 * h, z=z, c=c, mdef='200c') - rhos_colossus, rs_colossus = nfw_colossus.nativeParameters(m200 * h, c, z, mdef='200c') + nfw_colossus = NFWProfile(M=m200 * h, z=z, c=c, mdef="200c") + rhos_colossus, rs_colossus = nfw_colossus.nativeParameters( + m200 * h, c, z, mdef="200c" + ) r200_colossus = rs_colossus * c # according to colossus documentation the density is in physical units[M h^2/kpc^3] and distance [kpc/h] - rs_colossus *= h ** -1 - rhos_colossus *= h ** 2 + rs_colossus *= h**-1 + rhos_colossus *= h**2 r200_lenstronomy = nfw_param.r200_M(m200 * h, z) / h # physical radius r200 rs_lenstronomy = r200_lenstronomy / c - rhos_lenstronomy = nfw_param.rho0_c(c, z) * h ** 2 # physical density in M_sun/Mpc**3 + rhos_lenstronomy = ( + nfw_param.rho0_c(c, z) * h**2 + ) # physical density in M_sun/Mpc**3 # convert Mpc to kpc - rhos_lenstronomy *= 1000 ** -3 + rhos_lenstronomy *= 1000**-3 rs_lenstronomy *= 1000 - npt.assert_almost_equal(rs_lenstronomy/rs_colossus, 1, decimal=3) + npt.assert_almost_equal(rs_lenstronomy / rs_colossus, 1, decimal=3) npt.assert_almost_equal(rhos_lenstronomy / rhos_colossus, 1, decimal=3) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Data/test_coord_transforms.py b/test/test_Data/test_coord_transforms.py index 4fffe8924..ade0f8032 100644 --- a/test/test_Data/test_coord_transforms.py +++ b/test/test_Data/test_coord_transforms.py @@ -13,9 +13,11 @@ def setup_method(self): def test_init(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) ra, dec = coords.map_pix2coord(0, 0) assert ra == ra_0 assert dec == dec_0 @@ -28,9 +30,11 @@ def test_init(self): def test_map_coord2pix(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) x, y = coords.map_coord2pix(2, 1) assert x == 20 assert y == 0 @@ -38,9 +42,11 @@ def test_map_coord2pix(self): def test_map_pix2coord(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) x, y = coords.map_pix2coord(1, 0) assert x == deltaPix + ra_0 assert y == dec_0 @@ -48,25 +54,35 @@ def test_map_pix2coord(self): def test_pixel_size(self): deltaPix = -0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) deltaPix_out = coords.pixel_width assert deltaPix_out == -deltaPix def test_rescaled_grid(self): import lenstronomy.Util.util as util + numPix = 10 theta = 0.5 deltaPix = 0.05 subgrid_res = 3 - Mpix2a = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) * deltaPix + Mpix2a = ( + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + * deltaPix + ) x_grid, y_grid = util.make_grid_transformed(numPix, Mpix2a) - coords = Coordinates(Mpix2a, ra_at_xy_0=x_grid[0], - dec_at_xy_0=y_grid[0]) - x_grid_high_res, y_grid_high_res = util.make_subgrid(x_grid, y_grid, subgrid_res=subgrid_res) - coords_sub = Coordinates(Mpix2a/subgrid_res, ra_at_xy_0=x_grid_high_res[0], - dec_at_xy_0=y_grid_high_res[0]) + coords = Coordinates(Mpix2a, ra_at_xy_0=x_grid[0], dec_at_xy_0=y_grid[0]) + x_grid_high_res, y_grid_high_res = util.make_subgrid( + x_grid, y_grid, subgrid_res=subgrid_res + ) + coords_sub = Coordinates( + Mpix2a / subgrid_res, + ra_at_xy_0=x_grid_high_res[0], + dec_at_xy_0=y_grid_high_res[0], + ) x, y = coords_sub.map_coord2pix(x_grid[1], y_grid[1]) npt.assert_almost_equal(x, 4, decimal=10) @@ -79,14 +95,14 @@ def test_rescaled_grid(self): npt.assert_almost_equal(ra, x_grid[0], decimal=10) npt.assert_almost_equal(dec, y_grid[0], decimal=10) - ra, dec = coords_sub.map_pix2coord(1 + 2*subgrid_res, 1) + ra, dec = coords_sub.map_pix2coord(1 + 2 * subgrid_res, 1) npt.assert_almost_equal(ra, x_grid[2], decimal=10) npt.assert_almost_equal(dec, y_grid[2], decimal=10) x_2d = util.array2image(x_grid) y_2d = util.array2image(y_grid) - ra, dec = coords_sub.map_pix2coord(1 + 2*subgrid_res, 1 + 3*subgrid_res) + ra, dec = coords_sub.map_pix2coord(1 + 2 * subgrid_res, 1 + 3 * subgrid_res) npt.assert_almost_equal(ra, x_2d[3, 2], decimal=10) npt.assert_almost_equal(dec, y_2d[3, 2], decimal=10) @@ -97,9 +113,11 @@ def test_rescaled_grid(self): def test_coordinate_grid(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) ra_grid, dec_grid = coords.coordinate_grid(nx=10, ny=10) assert ra_grid[0, 0] == ra_0 @@ -112,9 +130,11 @@ def test_coordinate_grid(self): def test_xy_at_radec_0(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) x_at_radec_0, y_at_radec_0 = coords.xy_at_radec_0 npt.assert_almost_equal(x_at_radec_0, -20, decimal=8) npt.assert_almost_equal(x_at_radec_0, -20, decimal=8) @@ -125,21 +145,24 @@ def test_xy_at_radec_0(self): def test_shift_coordinate_system(self): deltaPix = 0.05 Mpix2a = np.array([[1, 0], [0, 1]]) * deltaPix - ra_0 = 1. - dec_0 = 1. - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + ra_0 = 1.0 + dec_0 = 1.0 + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) x0, y0 = coords.xy_at_radec_0 coords.shift_coordinate_system(x_shift=deltaPix, y_shift=0, pixel_unit=False) x0_new, y0_new = coords.xy_at_radec_0 assert x0_new == x0 - 1 - coords = Coordinates(transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0) + coords = Coordinates( + transform_pix2angle=Mpix2a, ra_at_xy_0=ra_0, dec_at_xy_0=dec_0 + ) x0, y0 = coords.xy_at_radec_0 coords.shift_coordinate_system(x_shift=1, y_shift=0, pixel_unit=True) x0_new, y0_new = coords.xy_at_radec_0 assert x0_new == x0 - 1 - -if __name__ == '__main__': - pytest.main() \ No newline at end of file +if __name__ == "__main__": + pytest.main() diff --git a/test/test_Data/test_image_noise.py b/test/test_Data/test_image_noise.py index 7d7355844..5ee99e87f 100644 --- a/test/test_Data/test_image_noise.py +++ b/test/test_Data/test_image_noise.py @@ -10,26 +10,41 @@ class TestData(object): def setup_method(self): self.numPix = 10 - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': 1, 'background_rms': 1, - 'noise_map': None, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": 1, + "background_rms": 1, + "noise_map": None, + "verbose": True, + } self.Noise = ImageNoise(**kwargs_noise) - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': np.ones((self.numPix, self.numPix)), 'background_rms': 1, - 'noise_map': None, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": np.ones((self.numPix, self.numPix)), + "background_rms": 1, + "noise_map": None, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) def test_get_covariance_matrix(self): d = np.array([1, 2, 3]) sigma_b = 1 - f = 10. + f = 10.0 result = image_noise.covariance_matrix(d, sigma_b, f) assert result[0] == 1.1 assert result[1] == 1.2 def test_noise_map(self): noise_map = np.ones((self.numPix, self.numPix)) - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': 1, 'background_rms': 1, - 'noise_map': noise_map, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": 1, + "background_rms": 1, + "noise_map": noise_map, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) noise_map_out = noise.C_D npt.assert_almost_equal(noise_map_out, noise_map, decimal=8) @@ -37,22 +52,37 @@ def test_noise_map(self): noise_map_out = noise.C_D_model(model=np.ones((self.numPix, self.numPix))) npt.assert_almost_equal(noise_map_out, noise_map, decimal=8) - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': 1, - 'noise_map': noise_map, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": 1, + "noise_map": noise_map, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) bkg = noise.background_rms npt.assert_almost_equal(bkg, np.median(noise_map)) def test_exposure_time(self): - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': 0., 'background_rms': 1, - 'noise_map': None, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": 0.0, + "background_rms": 1, + "noise_map": None, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) exp_map = noise.exposure_map assert exp_map > 0 def test_gradient_boost(self): - kwargs_noise = {'image_data': np.zeros((self.numPix, self.numPix)), 'exposure_time': 1000000000000, 'background_rms': 1, - 'noise_map': None, 'gradient_boost_factor': 1., 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((self.numPix, self.numPix)), + "exposure_time": 1000000000000, + "background_rms": 1, + "noise_map": None, + "gradient_boost_factor": 1.0, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) model = np.zeros((self.numPix, self.numPix)) CD_zero = noise.C_D_model(model) @@ -61,11 +91,16 @@ def test_gradient_boost(self): CD_one = noise.C_D_model(model) npt.assert_almost_equal(CD_zero, CD_one) -class TestRaise(unittest.TestCase): +class TestRaise(unittest.TestCase): def test_raise(self): - kwargs_noise = {'image_data': np.zeros((10, 10)), 'exposure_time': None, - 'background_rms': None, 'noise_map': None, 'verbose': True} + kwargs_noise = { + "image_data": np.zeros((10, 10)), + "exposure_time": None, + "background_rms": None, + "noise_map": None, + "verbose": True, + } noise = ImageNoise(**kwargs_noise) with self.assertRaises(ValueError): @@ -74,5 +109,5 @@ def test_raise(self): out = noise.exposure_map -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Data/test_imaging_data.py b/test/test_Data/test_imaging_data.py index e0978b87f..2204f7b56 100644 --- a/test/test_Data/test_imaging_data.py +++ b/test/test_Data/test_imaging_data.py @@ -11,26 +11,44 @@ class TestData(object): def setup_method(self): self.numPix = 10 - kwargs_data = {'image_data': np.zeros((self.numPix, self.numPix)), 'noise_map': np.ones((self.numPix, self.numPix))} + kwargs_data = { + "image_data": np.zeros((self.numPix, self.numPix)), + "noise_map": np.ones((self.numPix, self.numPix)), + } self.Data = ImageData(**kwargs_data) def test_numData(self): - assert self.Data.num_pixel == self.numPix ** 2 + assert self.Data.num_pixel == self.numPix**2 def test_shift_coords(self): numPix = 10 deltaPix = 0.05 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True) + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True + ) # mask (1= model this pixel, 0= leave blanck) - kwargs_data = {'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, - 'transform_pix2angle': Mpix2coord, 'image_data': np.ones((numPix, numPix))} + kwargs_data = { + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + "transform_pix2angle": Mpix2coord, + "image_data": np.ones((numPix, numPix)), + } data = ImageData(**kwargs_data) ra_shift = 0.05 - dec_shift = 0. - kwargs_data['ra_shift'] = ra_shift - kwargs_data['dec_shift'] = dec_shift + dec_shift = 0.0 + kwargs_data["ra_shift"] = ra_shift + kwargs_data["dec_shift"] = dec_shift data_shift = ImageData(**kwargs_data) ra, dec = data.map_pix2coord(1, 1) @@ -53,11 +71,25 @@ def test_shift_coordinate_system(self): numPix = 10 deltaPix = 0.05 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True) - - kwargs_data = {'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, - 'transform_pix2angle': Mpix2coord, 'image_data': np.ones((numPix, numPix))} + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=True + ) + + kwargs_data = { + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + "transform_pix2angle": Mpix2coord, + "image_data": np.ones((numPix, numPix)), + } data = ImageData(**kwargs_data) data_new = copy.deepcopy(data) @@ -70,37 +102,40 @@ def test_shift_coordinate_system(self): ra, dec = data.map_pix2coord(x, y) ra_new, dec_new = data_new.map_pix2coord(x, y) - npt.assert_almost_equal(ra, ra_new-x_shift, decimal=10) - npt.assert_almost_equal(dec, dec_new-y_shift, decimal=10) + npt.assert_almost_equal(ra, ra_new - x_shift, decimal=10) + npt.assert_almost_equal(dec, dec_new - y_shift, decimal=10) x_coords, y_coords = data.pixel_coordinates x_coords_new, y_coords_new = data_new.pixel_coordinates - npt.assert_almost_equal(x_coords[0], x_coords_new[0]-x_shift, decimal=10) - npt.assert_almost_equal(y_coords[0], y_coords_new[0]-y_shift, decimal=10) + npt.assert_almost_equal(x_coords[0], x_coords_new[0] - x_shift, decimal=10) + npt.assert_almost_equal(y_coords[0], y_coords_new[0] - y_shift, decimal=10) def test_update_data(self): - kwargs_data = {'image_data': np.zeros((self.numPix, self.numPix)), - 'noise_map': None, 'exposure_time': 1, 'background_rms': 1} + kwargs_data = { + "image_data": np.zeros((self.numPix, self.numPix)), + "noise_map": None, + "exposure_time": 1, + "background_rms": 1, + } data = ImageData(**kwargs_data) C_D = data.C_D data.update_data(image_data=np.ones((self.numPix, self.numPix))) C_D_new = data.C_D - assert C_D_new[0,0] > C_D[0, 0] + assert C_D_new[0, 0] > C_D[0, 0] data_new = data.data npt.assert_almost_equal(data_new, np.ones((self.numPix, self.numPix))) class TestRaise(unittest.TestCase): - def test_raise(self): - kwargs_data = {'image_data': np.zeros((10, 10))} + kwargs_data = {"image_data": np.zeros((10, 10))} Data = ImageData(**kwargs_data) image_data_new = np.zeros((5, 5)) with self.assertRaises(ValueError): out = Data.update_data(image_data_new) with self.assertRaises(ValueError): - ImageData(**kwargs_data, likelihood_method = 'WRONG') + ImageData(**kwargs_data, likelihood_method="WRONG") -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Data/test_imaging_data_with_interferometric_changes.py b/test/test_Data/test_imaging_data_with_interferometric_changes.py index bb181b341..659648da5 100644 --- a/test/test_Data/test_imaging_data_with_interferometric_changes.py +++ b/test/test_Data/test_imaging_data_with_interferometric_changes.py @@ -2,26 +2,32 @@ import numpy.testing as npt from lenstronomy.Data.imaging_data import ImageData + def test_interferometry_likelihood(): - """ - - test interferometry natural weighting likelihood function, test likelihood_method function output + """Test interferometry natural weighting likelihood function, test likelihood_method + function output.""" + + test_data = np.zeros((5, 5)) + test_data[0, :] = 1 + test_data[:, 4] = 2 + + mask = np.ones((5, 5)) + model_unconvolved = np.zeros((5, 5)) + model_convolved = np.zeros((5, 5)) + model_unconvolved[0, 4] = 1.2 + model_convolved[0, :] = 1 + model_convolved[:, 4] = 1 + + data_class = ImageData( + image_data=test_data, + background_rms=2.5, + log_likelihood_constant=-1.0, + likelihood_method="interferometry_natwt", + ) - """ - - test_data = np.zeros((5,5)) - test_data[0,:] = 1 - test_data[:,4] = 2 - - mask = np.ones((5,5)) - model_unconvolved = np.zeros((5,5)) - model_convolved = np.zeros((5,5)) - model_unconvolved[0,4] = 1.2 - model_convolved[0,:] = 1 - model_convolved[:,4] = 1 - - data_class = ImageData(image_data = test_data, background_rms = 2.5, log_likelihood_constant = -1.0, likelihood_method = 'interferometry_natwt') - - assert data_class.likelihood_method() == 'interferometry_natwt' - npt.assert_almost_equal(data_class.log_likelihood([model_unconvolved,model_convolved],mask), -0.712, decimal=8) - \ No newline at end of file + assert data_class.likelihood_method() == "interferometry_natwt" + npt.assert_almost_equal( + data_class.log_likelihood([model_unconvolved, model_convolved], mask), + -0.712, + decimal=8, + ) diff --git a/test/test_Data/test_psf.py b/test/test_Data/test_psf.py index 9c2ad8511..0d585f651 100644 --- a/test/test_Data/test_psf.py +++ b/test/test_Data/test_psf.py @@ -10,14 +10,20 @@ class TestData(object): - def setup_method(self): self.deltaPix = 0.05 fwhm = 0.2 - kwargs_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': self.deltaPix} + kwargs_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": self.deltaPix, + } self.psf_gaussian = PSF(**kwargs_gaussian) - kernel_point_source = kernel_util.kernel_gaussian(num_pix=21, delta_pix=self.deltaPix, fwhm=fwhm) - kwargs_pixel = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source} + kernel_point_source = kernel_util.kernel_gaussian( + num_pix=21, delta_pix=self.deltaPix, fwhm=fwhm + ) + kwargs_pixel = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} self.psf_pixel = PSF(**kwargs_pixel) def test_kernel_point_source(self): @@ -26,12 +32,21 @@ def test_kernel_point_source(self): assert len(kernel_gaussian) == 21 assert len(kernel_pixel) == 21 - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 0.2, 'truncation': 3, 'pixel_size': 0.05} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": 0.2, + "truncation": 3, + "pixel_size": 0.05, + } psf_class = PSF(**kwargs_psf) kernel_point_source = psf_class.kernel_point_source assert len(kernel_point_source) == 13 - kernel_super = psf_class.kernel_point_source_supersampled(supersampling_factor=3) - npt.assert_almost_equal(np.sum(kernel_point_source), np.sum(kernel_super), decimal=9) + kernel_super = psf_class.kernel_point_source_supersampled( + supersampling_factor=3 + ) + npt.assert_almost_equal( + np.sum(kernel_point_source), np.sum(kernel_super), decimal=9 + ) npt.assert_almost_equal(np.sum(kernel_point_source), 1, decimal=9) def test_kernel_subsampled(self): @@ -43,22 +58,36 @@ def test_kernel_subsampled(self): # create Gaussian/Pixelized kernels # first we create the sub-sampled kernel - kernel_point_source_subsampled = kernel_util.kernel_gaussian(num_pix=11 * subsampling_res, delta_pix=deltaPix / subsampling_res, fwhm=fwhm) + kernel_point_source_subsampled = kernel_util.kernel_gaussian( + num_pix=11 * subsampling_res, + delta_pix=deltaPix / subsampling_res, + fwhm=fwhm, + ) # to have the same consistent kernel, we re-size (average over the sub-sampled pixels) the sub-sampled kernel - kernel_point_source = image_util.re_size(kernel_point_source_subsampled, subsampling_res) + kernel_point_source = image_util.re_size( + kernel_point_source_subsampled, subsampling_res + ) # here we create the two PSF() classes - kwargs_pixel_subsampled = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source_subsampled, - 'point_source_supersampling_factor': subsampling_res} + kwargs_pixel_subsampled = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source_subsampled, + "point_source_supersampling_factor": subsampling_res, + } psf_pixel_subsampled = PSF(**kwargs_pixel_subsampled) - psf_pixel_subsampled.kernel_point_source_supersampled(supersampling_factor=subsampling_res+1) + psf_pixel_subsampled.kernel_point_source_supersampled( + supersampling_factor=subsampling_res + 1 + ) kernel_point_source /= np.sum(kernel_point_source) - kwargs_pixel = {'psf_type': 'PIXEL', - 'kernel_point_source': kernel_point_source} + kwargs_pixel = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} psf_pixel = PSF(**kwargs_pixel) kernel_point_source = psf_pixel.kernel_point_source - kernel_super = psf_pixel.kernel_point_source_supersampled(supersampling_factor=3) - npt.assert_almost_equal(np.sum(kernel_point_source), np.sum(kernel_super), decimal=8) + kernel_super = psf_pixel.kernel_point_source_supersampled( + supersampling_factor=3 + ) + npt.assert_almost_equal( + np.sum(kernel_point_source), np.sum(kernel_super), decimal=8 + ) npt.assert_almost_equal(np.sum(kernel_point_source), 1, decimal=8) deltaPix = 0.05 # pixel size of image @@ -69,57 +98,91 @@ def test_kernel_subsampled(self): # create Gaussian/Pixelized kernels # first we create the sub-sampled kernel - kernel_point_source_subsampled = kernel_util.kernel_gaussian(num_pix=11 * subsampling_res + 1, - delta_pix=deltaPix / subsampling_res, fwhm=fwhm) + kernel_point_source_subsampled = kernel_util.kernel_gaussian( + num_pix=11 * subsampling_res + 1, + delta_pix=deltaPix / subsampling_res, + fwhm=fwhm, + ) - kwargs_pixel_subsampled = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source_subsampled, - 'point_source_supersampling_factor': subsampling_res} + kwargs_pixel_subsampled = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source_subsampled, + "point_source_supersampling_factor": subsampling_res, + } psf_pixel_subsampled = PSF(**kwargs_pixel_subsampled) kernel_point_source /= np.sum(kernel_point_source) - kwargs_pixel = {'psf_type': 'PIXEL', - 'kernel_point_source': kernel_point_source} + kwargs_pixel = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} psf_pixel = PSF(**kwargs_pixel) kernel_point_source = psf_pixel.kernel_point_source kernel_point_source_new = psf_pixel_subsampled.kernel_point_source - npt.assert_almost_equal(np.sum(kernel_point_source), np.sum(kernel_point_source_new), decimal=8) + npt.assert_almost_equal( + np.sum(kernel_point_source), np.sum(kernel_point_source_new), decimal=8 + ) npt.assert_almost_equal(np.sum(kernel_point_source), 1, decimal=8) - psf_none = PSF(psf_type='NONE') + psf_none = PSF(psf_type="NONE") kernel_super = psf_none.kernel_point_source_supersampled(supersampling_factor=5) npt.assert_almost_equal(kernel_super, psf_none.kernel_point_source, decimal=9) def test_fwhm(self): - deltaPix = 1. + deltaPix = 1.0 fwhm = 5.6 - kwargs = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_kernel = PSF(**kwargs) fwhm_compute = psf_kernel.fwhm assert fwhm_compute == fwhm kernel = kernel_util.kernel_gaussian(num_pix=31, delta_pix=deltaPix, fwhm=fwhm) - kwargs = {'psf_type': 'PIXEL', 'truncation': 5, 'pixel_size': deltaPix, 'kernel_point_source': kernel} + kwargs = { + "psf_type": "PIXEL", + "truncation": 5, + "pixel_size": deltaPix, + "kernel_point_source": kernel, + } psf_kernel = PSF(**kwargs) fwhm_compute = psf_kernel.fwhm npt.assert_almost_equal(fwhm_compute, fwhm, decimal=1) - kwargs = {'psf_type': 'PIXEL', 'truncation': 5, 'pixel_size': deltaPix, 'kernel_point_source': kernel, - 'point_source_supersampling_factor': 1} + kwargs = { + "psf_type": "PIXEL", + "truncation": 5, + "pixel_size": deltaPix, + "kernel_point_source": kernel, + "point_source_supersampling_factor": 1, + } psf_kernel = PSF(**kwargs) fwhm_compute = psf_kernel.fwhm npt.assert_almost_equal(fwhm_compute, fwhm, decimal=1) def test_kernel_pixel(self): - deltaPix = 1. + deltaPix = 1.0 fwhm = 5.6 - kwargs = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_kernel = PSF(**kwargs) kernel_pixel = psf_kernel.kernel_pixel - npt.assert_almost_equal(np.sum(kernel_pixel), np.sum(psf_kernel.kernel_point_source), decimal=9) + npt.assert_almost_equal( + np.sum(kernel_pixel), np.sum(psf_kernel.kernel_point_source), decimal=9 + ) def test_psf_error_map(self): - deltaPix = 1. + deltaPix = 1.0 fwhm = 5.6 - kwargs = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_kernel = PSF(**kwargs) error_map = psf_kernel.psf_error_map assert error_map.all() == 0 @@ -131,42 +194,49 @@ def test_warning(self): # create Gaussian/Pixelized kernels # first we create the sub-sampled kernel - kernel_point_source_subsampled = kernel_util.kernel_gaussian(num_pix=11 * subsampling_res + 1, - delta_pix=deltaPix / subsampling_res, fwhm=fwhm) - print(len(kernel_point_source_subsampled), 'test') - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source_subsampled, - 'point_source_supersampling_factor': subsampling_res, - 'psf_error_map': np.ones_like(kernel_point_source_subsampled)} + kernel_point_source_subsampled = kernel_util.kernel_gaussian( + num_pix=11 * subsampling_res + 1, + delta_pix=deltaPix / subsampling_res, + fwhm=fwhm, + ) + print(len(kernel_point_source_subsampled), "test") + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source_subsampled, + "point_source_supersampling_factor": subsampling_res, + "psf_error_map": np.ones_like(kernel_point_source_subsampled), + } psf_kernel = PSF(**kwargs_psf) n = len(psf_kernel.kernel_point_source) error_map = psf_kernel.psf_error_map assert len(error_map) == n - - class TestRaise(unittest.TestCase): - def test_raise(self): - psf = PSF(psf_type='PIXEL', kernel_point_source=np.ones((3, 3))) - psf.psf_type = 'WRONG' + psf = PSF(psf_type="PIXEL", kernel_point_source=np.ones((3, 3))) + psf.psf_type = "WRONG" with self.assertRaises(ValueError): - PSF(psf_type='GAUSSIAN') + PSF(psf_type="GAUSSIAN") with self.assertRaises(ValueError): - PSF(psf_type='PIXEL') + PSF(psf_type="PIXEL") with self.assertRaises(ValueError): - PSF(psf_type='PIXEL', kernel_point_source=np.ones((2, 2))) + PSF(psf_type="PIXEL", kernel_point_source=np.ones((2, 2))) with self.assertRaises(ValueError): - PSF(psf_type='WRONG') + PSF(psf_type="WRONG") with self.assertRaises(ValueError): - PSF(psf_type='PIXEL', kernel_point_source=np.ones((3, 3)), psf_error_map=np.ones((5, 5))) + PSF( + psf_type="PIXEL", + kernel_point_source=np.ones((3, 3)), + psf_error_map=np.ones((5, 5)), + ) psf.kernel_point_source_supersampled(supersampling_factor=3) with self.assertRaises(ValueError): - psf = PSF(psf_type='PIXEL', kernel_point_source=np.ones((3, 3))) - psf.psf_type = 'WRONG' + psf = PSF(psf_type="PIXEL", kernel_point_source=np.ones((3, 3))) + psf.psf_type = "WRONG" psf.kernel_point_source_supersampled(supersampling_factor=3) with self.assertRaises(ValueError): - psf = PSF(psf_type='GAUSSIAN', fwhm=100, pixel_size=0.0001) + psf = PSF(psf_type="GAUSSIAN", fwhm=100, pixel_size=0.0001) psf.kernel_point_source_supersampled(supersampling_factor=3) with warnings.catch_warnings(record=True) as w: @@ -175,15 +245,20 @@ def test_raise(self): # Trigger a warning. kernel_point_source_subsampled = np.ones((9, 9)) subsampling_res = 3 - kwargs_pixel_subsampled = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source_subsampled, - 'point_source_supersampling_factor': subsampling_res} + kwargs_pixel_subsampled = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source_subsampled, + "point_source_supersampling_factor": subsampling_res, + } psf_pixel_subsampled = PSF(**kwargs_pixel_subsampled) - psf_pixel_subsampled.kernel_point_source_supersampled(supersampling_factor=subsampling_res + 4) + psf_pixel_subsampled.kernel_point_source_supersampled( + supersampling_factor=subsampling_res + 4 + ) # Verify some things assert 1 == 1 - #assert len(w) == 1 - #assert issubclass(w[-1].category, Warning) + # assert len(w) == 1 + # assert issubclass(w[-1].category, Warning) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_analytic_kinematics.py b/test/test_GalKin/test_analytic_kinematics.py index 9f94bafdb..eda699b08 100644 --- a/test/test_GalKin/test_analytic_kinematics.py +++ b/test/test_GalKin/test_analytic_kinematics.py @@ -3,22 +3,31 @@ class TestAnalyticKinematics(object): - def setup_method(self): pass def test_sigma_s2(self): - kwargs_aperture = {'center_ra': 0, 'width': 1, 'length': 1, 'angle': 0, 'center_dec': 0, - 'aperture_type': 'slit'} - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 1} + kwargs_aperture = { + "center_ra": 0, + "width": 1, + "length": 1, + "angle": 0, + "center_dec": 0, + "aperture_type": "slit", + } + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1} kin = AnalyticKinematics(kwargs_cosmo) - kwargs_light = {'r_eff': 1} - sigma_s2 = kin.sigma_s2(r=1, R=0.1, kwargs_mass={'theta_E': 1, 'gamma': 2}, kwargs_light=kwargs_light, - kwargs_anisotropy={'r_ani': 1}) - assert 'a' in kwargs_light - + kwargs_light = {"r_eff": 1} + sigma_s2 = kin.sigma_s2( + r=1, + R=0.1, + kwargs_mass={"theta_E": 1, "gamma": 2}, + kwargs_light=kwargs_light, + kwargs_anisotropy={"r_ani": 1}, + ) + assert "a" in kwargs_light -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_anisotropy.py b/test/test_GalKin/test_anisotropy.py index acf0512a6..6a7375231 100644 --- a/test/test_GalKin/test_anisotropy.py +++ b/test/test_GalKin/test_anisotropy.py @@ -1,6 +1,4 @@ -""" -Tests for `galkin` module. -""" +"""Tests for `galkin` module.""" import pytest import numpy.testing as npt import numpy as np @@ -10,43 +8,41 @@ class TestAnisotropy(object): - def setup_method(self): - self._r_array = np.array([2., 3.]) - self._R_array = 1. + self._r_array = np.array([2.0, 3.0]) + self._R_array = 1.0 def test_K(self): - - anisoClass = Anisotropy(anisotropy_type='const') - kwargs = {'beta': 1.0} + anisoClass = Anisotropy(anisotropy_type="const") + kwargs = {"beta": 1.0} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.61418484930437822, decimal=5) - kwargs = {'beta': -0.49} + kwargs = {"beta": -0.49} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.7645553632433857, decimal=5) - anisoClass = Anisotropy(anisotropy_type='Colin') - kwargs = {'r_ani': 1} + anisoClass = Anisotropy(anisotropy_type="Colin") + kwargs = {"r_ani": 1} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.91696135187291117, decimal=5) - k = anisoClass.K(self._r_array, self._R_array-0.001, **kwargs) + k = anisoClass.K(self._r_array, self._R_array - 0.001, **kwargs) npt.assert_almost_equal(k[0], 0.91696135187291117, decimal=2) k = anisoClass.K(self._r_array, self._R_array + 0.001, **kwargs) npt.assert_almost_equal(k[0], 0.91696135187291117, decimal=2) - anisoClass = Anisotropy(anisotropy_type='radial') + anisoClass = Anisotropy(anisotropy_type="radial") kwargs = {} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.61418484930437856, decimal=5) - anisoClass = Anisotropy(anisotropy_type='isotropic') + anisoClass = Anisotropy(anisotropy_type="isotropic") kwargs = {} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.8660254037844386, decimal=5) - anisoClass = Anisotropy(anisotropy_type='OM') - kwargs = {'r_ani': 1} + anisoClass = Anisotropy(anisotropy_type="OM") + kwargs = {"r_ani": 1} k = anisoClass.K(self._r_array, self._R_array, **kwargs) npt.assert_almost_equal(k[0], 0.95827704196894481, decimal=5) @@ -55,78 +51,76 @@ def test_K(self): npt.assert_almost_equal(k, 0, decimal=5) def test_beta(self): - r = 2. + r = 2.0 - anisoClass = Anisotropy(anisotropy_type='const') - kwargs = {'beta': 1.} + anisoClass = Anisotropy(anisotropy_type="const") + kwargs = {"beta": 1.0} beta = anisoClass.beta_r(r, **kwargs) npt.assert_almost_equal(beta, 1, decimal=5) - anisoClass = Anisotropy(anisotropy_type='Colin') - kwargs = {'r_ani': 1} + anisoClass = Anisotropy(anisotropy_type="Colin") + kwargs = {"r_ani": 1} beta = anisoClass.beta_r(r, **kwargs) - npt.assert_almost_equal(beta, 1./3, decimal=5) + npt.assert_almost_equal(beta, 1.0 / 3, decimal=5) - anisoClass = Anisotropy(anisotropy_type='radial') + anisoClass = Anisotropy(anisotropy_type="radial") kwargs = {} beta = anisoClass.beta_r(r, **kwargs) npt.assert_almost_equal(beta, 1, decimal=5) - anisoClass = Anisotropy(anisotropy_type='isotropic') + anisoClass = Anisotropy(anisotropy_type="isotropic") kwargs = {} beta = anisoClass.beta_r(r, **kwargs) npt.assert_almost_equal(beta, 0, decimal=5) - anisoClass = Anisotropy(anisotropy_type='OM') - kwargs = {'r_ani': 1} + anisoClass = Anisotropy(anisotropy_type="OM") + kwargs = {"r_ani": 1} beta = anisoClass.beta_r(r, **kwargs) npt.assert_almost_equal(beta, 0.8, decimal=5) def test_radial_anisotropy(self): - # radial - r = 2. - R = 1. - radial = Anisotropy(anisotropy_type='radial') + r = 2.0 + R = 1.0 + radial = Anisotropy(anisotropy_type="radial") kwargs_rad = {} beta = radial.beta_r(r, **kwargs_rad) k = radial.K(r, R, **kwargs_rad) f = radial.anisotropy_solution(r, **kwargs_rad) assert f == r**2 - const = Anisotropy(anisotropy_type='const') - kwargs = {'beta': beta} - print(beta, 'beta') - #kwargs = {'beta': 1} + const = Anisotropy(anisotropy_type="const") + kwargs = {"beta": beta} + print(beta, "beta") + # kwargs = {'beta': 1} k_mamon = const.K(r, R, **kwargs) print(k, k_mamon) npt.assert_almost_equal(k, k_mamon, decimal=5) def test_isotropic_anisotropy(self): - # radial - r = 2. - R = 1. - isotropic = Anisotropy(anisotropy_type='isotropic') + r = 2.0 + R = 1.0 + isotropic = Anisotropy(anisotropy_type="isotropic") kwargs_iso = {} beta = isotropic.beta_r(r, **kwargs_iso) k = isotropic.K(r, R, **kwargs_iso) f = isotropic.anisotropy_solution(r, **kwargs_iso) assert f == 1 - print(beta, 'test') - const = Anisotropy(anisotropy_type='const') - kwargs = {'beta': beta} + print(beta, "test") + const = Anisotropy(anisotropy_type="const") + kwargs = {"beta": beta} k_const = const.K(r, R, **kwargs) print(k, k_const) npt.assert_almost_equal(k, k_const, decimal=5) def test_generalizedOM(self): # generalized OM model - gom = Anisotropy(anisotropy_type='GOM') + gom = Anisotropy(anisotropy_type="GOM") r = self._r_array R = 2 - om = Anisotropy(anisotropy_type='OM') - kwargs_om = {'r_ani': 1.} - kwargs_gom = {'r_ani': 1., 'beta_inf': 1.} + om = Anisotropy(anisotropy_type="OM") + kwargs_om = {"r_ani": 1.0} + kwargs_gom = {"r_ani": 1.0, "beta_inf": 1.0} beta_gom = gom.beta_r(r, **kwargs_gom) beta_om = om.beta_r(r, **kwargs_om) npt.assert_almost_equal(beta_gom, beta_om, decimal=5) @@ -138,39 +132,39 @@ def test_generalizedOM(self): K_gom = gom.K(r, R, **kwargs_gom) K_om = om.K(r, R, **kwargs_om) npt.assert_almost_equal(K_gom, K_om, decimal=3) - assert hasattr(gom._model, '_f_12_interp') - assert hasattr(gom._model, '_f_32_interp') + assert hasattr(gom._model, "_f_12_interp") + assert hasattr(gom._model, "_f_32_interp") gom.delete_anisotropy_cache() - if hasattr(gom._model, '_f_12_interp'): + if hasattr(gom._model, "_f_12_interp"): assert False - if hasattr(gom._model, '_f_32_interp'): + if hasattr(gom._model, "_f_32_interp"): assert False from lenstronomy.GalKin.anisotropy import GeneralizedOM + gom_class = GeneralizedOM() - _F = gom_class._F(a=3/2., z=0.5, beta_inf=1) - _F_array = gom_class._F(a=3 / 2., z=np.array([0.5]), beta_inf=1) + _F = gom_class._F(a=3 / 2.0, z=0.5, beta_inf=1) + _F_array = gom_class._F(a=3 / 2.0, z=np.array([0.5]), beta_inf=1) npt.assert_almost_equal(_F_array[0], _F, decimal=5) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - Anisotropy(anisotropy_type='wrong') + Anisotropy(anisotropy_type="wrong") with self.assertRaises(ValueError): - ani = Anisotropy(anisotropy_type='Colin') + ani = Anisotropy(anisotropy_type="Colin") ani.K(r=1, R=2, r_ani=1) with self.assertRaises(ValueError): - ani = Anisotropy(anisotropy_type='const') + ani = Anisotropy(anisotropy_type="const") ani.anisotropy_solution(r=1) with self.assertRaises(ValueError): - const = Anisotropy(anisotropy_type='const') - kwargs = {'beta': 1} + const = Anisotropy(anisotropy_type="const") + kwargs = {"beta": 1} f_const = const.anisotropy_solution(r=1, **kwargs) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_aperture.py b/test/test_GalKin/test_aperture.py index a58ed88ec..f13155078 100644 --- a/test/test_GalKin/test_aperture.py +++ b/test/test_GalKin/test_aperture.py @@ -5,21 +5,26 @@ class TestAperture(object): - def setup_method(self): pass def test_aperture_select(self): - kwargs_slit = {'length': 2, 'width': 0.5, 'center_ra': 0, 'center_dec': 0, 'angle': 0} - slit = Aperture(aperture_type='slit', **kwargs_slit) + kwargs_slit = { + "length": 2, + "width": 0.5, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } + slit = Aperture(aperture_type="slit", **kwargs_slit) bool, i = slit.aperture_select(ra=0.9, dec=0.2) assert bool is True bool, i = slit.aperture_select(ra=1.1, dec=0.2) assert bool is False assert slit.num_segments == 1 - kwargs_shell = {'r_in': 0.2, 'r_out': 1., 'center_ra': 0, 'center_dec': 0} - shell = Aperture(aperture_type='shell', **kwargs_shell) + kwargs_shell = {"r_in": 0.2, "r_out": 1.0, "center_ra": 0, "center_dec": 0} + shell = Aperture(aperture_type="shell", **kwargs_shell) bool, i = shell.aperture_select(ra=0.9, dec=0) assert bool is True bool, i = shell.aperture_select(ra=1.1, dec=0) @@ -28,8 +33,13 @@ def test_aperture_select(self): assert bool is False assert shell.num_segments == 1 - kwargs_boxhole = {'width_outer': 1, 'width_inner': 0.5, 'center_ra': 0, 'center_dec': 0} - frame = Aperture(aperture_type='frame', **kwargs_boxhole) + kwargs_boxhole = { + "width_outer": 1, + "width_inner": 0.5, + "center_ra": 0, + "center_dec": 0, + } + frame = Aperture(aperture_type="frame", **kwargs_boxhole) bool, i = frame.aperture_select(ra=0.4, dec=0) assert bool is True bool, i = frame.aperture_select(ra=0.2, dec=0) @@ -38,11 +48,10 @@ def test_aperture_select(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - Aperture(aperture_type='wrong', kwargs_aperture={}) + Aperture(aperture_type="wrong", kwargs_aperture={}) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_aperture_types.py b/test/test_GalKin/test_aperture_types.py index 1aa73d52a..dc6ba699b 100644 --- a/test/test_GalKin/test_aperture_types.py +++ b/test/test_GalKin/test_aperture_types.py @@ -4,32 +4,47 @@ class TestApertureTypes(object): - def setup_method(self): pass def test_shell_select(self): - #aperture = Aperture() + # aperture = Aperture() ra, dec = 1, 1 r_in = 2 r_out = 4 - bool_select = aperture_types.shell_select(ra, dec, r_in, r_out, center_ra=0, center_dec=0) + bool_select = aperture_types.shell_select( + ra, dec, r_in, r_out, center_ra=0, center_dec=0 + ) assert bool_select is False - bool_select = aperture_types.shell_select(3, 0, r_in, r_out, center_ra=0, center_dec=0) + bool_select = aperture_types.shell_select( + 3, 0, r_in, r_out, center_ra=0, center_dec=0 + ) assert bool_select is True def test_slit_select(self): - bool_select = aperture_types.slit_select(ra=0.9, dec=0, length=2, width=0.5, center_ra=0, center_dec=0, angle=0) + bool_select = aperture_types.slit_select( + ra=0.9, dec=0, length=2, width=0.5, center_ra=0, center_dec=0, angle=0 + ) assert bool_select is True - bool_select = aperture_types.slit_select(ra=0.9, dec=0, length=2, width=0.5, center_ra=0, center_dec=0, angle=np.pi/2) + bool_select = aperture_types.slit_select( + ra=0.9, + dec=0, + length=2, + width=0.5, + center_ra=0, + center_dec=0, + angle=np.pi / 2, + ) assert bool_select is False def test_ifu_shell_select(self): ra, dec = 1, 1 r_bin = np.linspace(0, 10, 11) - bool_select, i = aperture_types.shell_ifu_select(ra, dec, r_bin, center_ra=0, center_dec=0) + bool_select, i = aperture_types.shell_ifu_select( + ra, dec, r_bin, center_ra=0, center_dec=0 + ) assert bool_select is True assert i == 1 @@ -38,18 +53,39 @@ def test_frame(self): width_outer = 1.2 width_inner = 0.6 ra, dec = 0, 0 - bool_select = aperture_types.frame_select(ra, dec, width_inner=width_inner, width_outer=width_outer, center_ra=center_ra, center_dec=center_dec, angle=0) + bool_select = aperture_types.frame_select( + ra, + dec, + width_inner=width_inner, + width_outer=width_outer, + center_ra=center_ra, + center_dec=center_dec, + angle=0, + ) assert bool_select is False ra, dec = 0.5, 0 - bool_select = aperture_types.frame_select(ra, dec, width_inner=width_inner, width_outer=width_outer, - center_ra=center_ra, center_dec=center_dec, angle=0) + bool_select = aperture_types.frame_select( + ra, + dec, + width_inner=width_inner, + width_outer=width_outer, + center_ra=center_ra, + center_dec=center_dec, + angle=0, + ) assert bool_select is True ra, dec = 5, 5 - bool_select = aperture_types.frame_select(ra, dec, width_inner=width_inner, width_outer=width_outer, - center_ra=center_ra, center_dec=center_dec, angle=0) + bool_select = aperture_types.frame_select( + ra, + dec, + width_inner=width_inner, + width_outer=width_outer, + center_ra=center_ra, + center_dec=center_dec, + angle=0, + ) assert bool_select is False - -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_cosmo.py b/test/test_GalKin/test_cosmo.py index d2ddad6a6..bfaa6fba8 100644 --- a/test/test_GalKin/test_cosmo.py +++ b/test/test_GalKin/test_cosmo.py @@ -5,11 +5,10 @@ class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): Cosmo(d_d=-1, d_s=1, d_ds=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_galkin.py b/test/test_GalKin/test_galkin.py index 8d9028f3d..a9feccba4 100755 --- a/test/test_GalKin/test_galkin.py +++ b/test/test_GalKin/test_galkin.py @@ -1,6 +1,4 @@ -""" -Tests for `galkin` module. -""" +"""Tests for `galkin` module.""" import pytest import unittest import copy @@ -14,401 +12,671 @@ class TestGalkin(object): - def setup_method(self): np.random.seed(42) def test_compare_power_law(self): - """ - compare power-law profiles analytical vs. numerical + """Compare power-law profiles analytical vs. + + numerical :return: """ # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.5 - kwargs_light = [{'Rs': 0.551 * r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": 0.551 * r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' - length = 1. + aperture_type = "slit" + length = 1.0 width = 0.3 - kwargs_aperture = {'aperture_type': aperture_type, 'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0} - - psf_fwhm = 1. # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 1000, 'max_integrate': 1000, 'min_integrate': 0.001} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - - galkin_analytic = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics, analytic_kinematics=True) - sigma_v_analytic = galkin_analytic.dispersion(kwargs_mass={'gamma': gamma, 'theta_E': theta_E}, kwargs_light={'r_eff': r_eff}, - kwargs_anisotropy={'r_ani':r_ani}, sampling_number=1000) - kwargs_numerics['lum_weight_int_method'] = False - galkin_num_3d = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics, analytic_kinematics=False) - sigma_v_num_3d = galkin_num_3d.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) - - print(sigma_v_analytic, sigma_v_num_3d, 'sigma_v Galkin 3d numerics, sigma_v analytic') - npt.assert_almost_equal(sigma_v_num_3d/sigma_v_analytic, 1, decimal=2) + kwargs_aperture = { + "aperture_type": aperture_type, + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } + + psf_fwhm = 1.0 # Gaussian FWHM psf + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 1000, + "max_integrate": 1000, + "min_integrate": 0.001, + } + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + + galkin_analytic = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=True, + ) + sigma_v_analytic = galkin_analytic.dispersion( + kwargs_mass={"gamma": gamma, "theta_E": theta_E}, + kwargs_light={"r_eff": r_eff}, + kwargs_anisotropy={"r_ani": r_ani}, + sampling_number=1000, + ) + kwargs_numerics["lum_weight_int_method"] = False + galkin_num_3d = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=False, + ) + sigma_v_num_3d = galkin_num_3d.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + + print( + sigma_v_analytic, + sigma_v_num_3d, + "sigma_v Galkin 3d numerics, sigma_v analytic", + ) + npt.assert_almost_equal(sigma_v_num_3d / sigma_v_analytic, 1, decimal=2) # 2d projected integral calculation - kwargs_numerics = {'interpol_grid_num': 1000, 'max_integrate': 1000, 'min_integrate': 0.000001, - 'lum_weight_int_method': True, 'log_integration': True} - galkin_num_log_proj = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics, analytic_kinematics=False) - sigma_v_num_log_proj = galkin_num_log_proj.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) - - kwargs_numerics = {'interpol_grid_num': 10000, 'max_integrate': 1000, 'min_integrate': 0.0001, - 'lum_weight_int_method': True, 'log_integration': False} - galkin_num_lin_proj = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics, - analytic_kinematics=False) - sigma_v_num_lin_proj = galkin_num_lin_proj.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, - sampling_number=1000) - - print(sigma_v_num_log_proj / sigma_v_analytic, sigma_v_num_lin_proj / sigma_v_analytic, 'log proj, lin proj') + kwargs_numerics = { + "interpol_grid_num": 1000, + "max_integrate": 1000, + "min_integrate": 0.000001, + "lum_weight_int_method": True, + "log_integration": True, + } + galkin_num_log_proj = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=False, + ) + sigma_v_num_log_proj = galkin_num_log_proj.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + + kwargs_numerics = { + "interpol_grid_num": 10000, + "max_integrate": 1000, + "min_integrate": 0.0001, + "lum_weight_int_method": True, + "log_integration": False, + } + galkin_num_lin_proj = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=False, + ) + sigma_v_num_lin_proj = galkin_num_lin_proj.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + + print( + sigma_v_num_log_proj / sigma_v_analytic, + sigma_v_num_lin_proj / sigma_v_analytic, + "log proj, lin proj", + ) npt.assert_almost_equal(sigma_v_num_log_proj / sigma_v_analytic, 1, decimal=2) npt.assert_almost_equal(sigma_v_num_lin_proj / sigma_v_analytic, 1, decimal=2) def test_log_vs_linear_integral(self): - """ - here we test logarithmic vs linear integral in an end-to-end fashion. - We do not demand the highest level of precisions here!!! - We are using the luminosity-weighted velocity dispersion integration calculation in this test. + """Here we test logarithmic vs linear integral in an end-to-end fashion. + + We do not demand the highest level of precisions here!!! We are using the + luminosity-weighted velocity dispersion integration calculation in this test. """ # light profile - light_profile_list = ['HERNQUIST'] - Rs = .5 - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["HERNQUIST"] + Rs = 0.5 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' + aperture_type = "slit" length = 3.8 width = 0.9 - kwargs_aperture = {'aperture_type': aperture_type, 'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0} + kwargs_aperture = { + "aperture_type": aperture_type, + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics_log = {'interpol_grid_num': 1000, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001, - 'lum_weight_int_method': True} - kwargs_numerics_linear = {'interpol_grid_num': 1000, 'log_integration': False, - 'max_integrate': 10, 'min_integrate': 0.001, - 'lum_weight_int_method': True} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - galkin_linear = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_linear) - - sigma_v_lin = galkin_linear.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) - galkin_log = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_log) - sigma_v_log = galkin_log.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) - print(sigma_v_lin, sigma_v_log, 'sigma_v linear, sigma_v log') - print((sigma_v_lin/sigma_v_log)**2) - - npt.assert_almost_equal(sigma_v_lin/sigma_v_log, 1, decimal=2) + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics_log = { + "interpol_grid_num": 1000, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + "lum_weight_int_method": True, + } + kwargs_numerics_linear = { + "interpol_grid_num": 1000, + "log_integration": False, + "max_integrate": 10, + "min_integrate": 0.001, + "lum_weight_int_method": True, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + galkin_linear = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_linear, + ) + + sigma_v_lin = galkin_linear.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + galkin_log = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_log, + ) + sigma_v_log = galkin_log.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + print(sigma_v_lin, sigma_v_log, "sigma_v linear, sigma_v log") + print((sigma_v_lin / sigma_v_log) ** 2) + + npt.assert_almost_equal(sigma_v_lin / sigma_v_log, 1, decimal=2) def test_projected_light_integral_hernquist(self): """ :return: """ - light_profile_list = ['HERNQUIST'] - Rs = 1. - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["HERNQUIST"] + Rs = 1.0 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec lightProfile = LightProfile(light_profile_list) R = 2 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 100) - npt.assert_almost_equal(light2d, out[0]*2, decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 100, + ) + npt.assert_almost_equal(light2d, out[0] * 2, decimal=3) def test_projected_light_integral_hernquist_ellipse(self): """ :return: """ - light_profile_list = ['HERNQUIST_ELLIPSE'] - Rs = 1. + light_profile_list = ["HERNQUIST_ELLIPSE"] + Rs = 1.0 phi, q = 1, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_light = [{'Rs': Rs, 'amp': 1.,'e1': e1, 'e2': e2}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": Rs, "amp": 1.0, "e1": e1, "e2": e2} + ] # effective half light radius (2d projected) in arcsec lightProfile = LightProfile(light_profile_list) R = 2 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 10) - npt.assert_almost_equal(light2d, out[0]*2, decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 10, + ) + npt.assert_almost_equal(light2d, out[0] * 2, decimal=3) def test_projected_light_integral_pjaffe(self): """ :return: """ - light_profile_list = ['PJAFFE'] - kwargs_light = [{'Rs': .5, 'Ra': 0.01, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["PJAFFE"] + kwargs_light = [ + {"Rs": 0.5, "Ra": 0.01, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec lightProfile = LightProfile(light_profile_list) R = 0.01 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 100) - print(out, 'out') - npt.assert_almost_equal(light2d/(out[0]*2), 1., decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 100, + ) + print(out, "out") + npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3) def test_realistic_0(self): - """ - realistic test example - :return: - """ - light_profile_list = ['HERNQUIST'] - kwargs_light = [{'Rs': 0.10535462602138289, 'center_x': -0.02678473951679429, 'center_y': 0.88691126347462712, - 'amp': 3.7114695634960109}] + """Realistic test example :return:""" + light_profile_list = ["HERNQUIST"] + kwargs_light = [ + { + "Rs": 0.10535462602138289, + "center_x": -0.02678473951679429, + "center_y": 0.88691126347462712, + "amp": 3.7114695634960109, + } + ] lightProfile = LightProfile(light_profile_list) R = 0.01 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 100) - print(out, 'out') - npt.assert_almost_equal(light2d/(out[0]*2), 1., decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 100, + ) + print(out, "out") + npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3) def test_realistic_1(self): - """ - realistic test example - :return: - """ - light_profile_list = ['HERNQUIST_ELLIPSE'] + """Realistic test example :return:""" + light_profile_list = ["HERNQUIST_ELLIPSE"] phi, q = 0.74260706384506325, 0.46728323131925864 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_light = [{'Rs': 0.10535462602138289, 'e1': e1, 'e2': e2, 'center_x': -0.02678473951679429, - 'center_y': 0.88691126347462712, 'amp': 3.7114695634960109}] + kwargs_light = [ + { + "Rs": 0.10535462602138289, + "e1": e1, + "e2": e2, + "center_x": -0.02678473951679429, + "center_y": 0.88691126347462712, + "amp": 3.7114695634960109, + } + ] lightProfile = LightProfile(light_profile_list) R = 0.01 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 100) - print(out, 'out') - npt.assert_almost_equal(light2d/(out[0]*2), 1., decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 100, + ) + print(out, "out") + npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3) def test_realistic(self): - """ - realistic test example - :return: - """ - light_profile_list = ['HERNQUIST_ELLIPSE', 'PJAFFE_ELLIPSE'] + """Realistic test example :return:""" + light_profile_list = ["HERNQUIST_ELLIPSE", "PJAFFE_ELLIPSE"] phi, q = 0.74260706384506325, 0.46728323131925864 e1, e2 = param_util.phi_q2_ellipticity(phi, q) phi2, q2 = -0.33379268413794494, 0.66582356813012267 e12, e22 = param_util.phi_q2_ellipticity(phi2, q2) - kwargs_light = [{'Rs': 0.10535462602138289, 'e1': e1, 'e2': e2, 'center_x': -0.02678473951679429, 'center_y': 0.88691126347462712, 'amp': 3.7114695634960109}, - {'Rs': 0.44955054610388684, 'e1': e12, 'e2': e22, 'center_x': 0.019536801118136753, 'center_y': 0.0218888643537157, 'Ra': 0.0010000053334891974, 'amp': 967.00280526319796}] + kwargs_light = [ + { + "Rs": 0.10535462602138289, + "e1": e1, + "e2": e2, + "center_x": -0.02678473951679429, + "center_y": 0.88691126347462712, + "amp": 3.7114695634960109, + }, + { + "Rs": 0.44955054610388684, + "e1": e12, + "e2": e22, + "center_x": 0.019536801118136753, + "center_y": 0.0218888643537157, + "Ra": 0.0010000053334891974, + "amp": 967.00280526319796, + }, + ] lightProfile = LightProfile(light_profile_list) R = 0.01 light2d = lightProfile.light_2d(R=R, kwargs_list=kwargs_light) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R**2+x**2), kwargs_light), 0, 100) - print(out, 'out') - npt.assert_almost_equal(light2d/(out[0]*2), 1., decimal=3) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_light), + 0, + 100, + ) + print(out, "out") + npt.assert_almost_equal(light2d / (out[0] * 2), 1.0, decimal=3) def test_dispersion_map(self): - """ - tests whether the old and new version provide the same answer - """ + """Tests whether the old and new version provide the same answer.""" # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.5 - kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_mass = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as shell - #aperture_type = 'shell' - #kwargs_aperture_inner = {'r_in': 0., 'r_out': 0.2, 'center_dec': 0, 'center_ra': 0} + # aperture_type = 'shell' + # kwargs_aperture_inner = {'r_in': 0., 'r_out': 0.2, 'center_dec': 0, 'center_ra': 0} - #kwargs_aperture_outer = {'r_in': 0., 'r_out': 1.5, 'center_dec': 0, 'center_ra': 0} + # kwargs_aperture_outer = {'r_in': 0., 'r_out': 1.5, 'center_dec': 0, 'center_ra': 0} # aperture as slit r_bins = np.linspace(0, 2, 3) - kwargs_ifu = {'r_bins': r_bins, 'center_ra': 0, 'center_dec': 0, 'aperture_type': 'IFU_shells'} - kwargs_aperture = {'aperture_type': 'shell', 'r_in': r_bins[0], 'r_out': r_bins[1], 'center_ra': 0, - 'center_dec': 0} - - psf_fwhm = 1. # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 500, 'log_integration': True, - 'max_integrate': 100} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - - galkinIFU = Galkin(kwargs_aperture=kwargs_ifu, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_model=kwargs_model, kwargs_numerics=kwargs_numerics, analytic_kinematics=True) - sigma_v_ifu = galkinIFU.dispersion_map(kwargs_mass={'theta_E': theta_E, 'gamma': gamma}, kwargs_light={'r_eff': r_eff}, - kwargs_anisotropy=kwargs_anisotropy, num_kin_sampling=1000) - galkin = Galkin(kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwargs_numerics, - analytic_kinematics=True) - sigma_v = galkin.dispersion(kwargs_mass={'theta_E': theta_E, 'gamma': gamma}, kwargs_light={'r_eff': r_eff}, - kwargs_anisotropy=kwargs_anisotropy, sampling_number=1000) + kwargs_ifu = { + "r_bins": r_bins, + "center_ra": 0, + "center_dec": 0, + "aperture_type": "IFU_shells", + } + kwargs_aperture = { + "aperture_type": "shell", + "r_in": r_bins[0], + "r_out": r_bins[1], + "center_ra": 0, + "center_dec": 0, + } + + psf_fwhm = 1.0 # Gaussian FWHM psf + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 500, + "log_integration": True, + "max_integrate": 100, + } + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + + galkinIFU = Galkin( + kwargs_aperture=kwargs_ifu, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_model=kwargs_model, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=True, + ) + sigma_v_ifu = galkinIFU.dispersion_map( + kwargs_mass={"theta_E": theta_E, "gamma": gamma}, + kwargs_light={"r_eff": r_eff}, + kwargs_anisotropy=kwargs_anisotropy, + num_kin_sampling=1000, + ) + galkin = Galkin( + kwargs_model, + kwargs_aperture, + kwargs_psf, + kwargs_cosmo, + kwargs_numerics, + analytic_kinematics=True, + ) + sigma_v = galkin.dispersion( + kwargs_mass={"theta_E": theta_E, "gamma": gamma}, + kwargs_light={"r_eff": r_eff}, + kwargs_anisotropy=kwargs_anisotropy, + sampling_number=1000, + ) npt.assert_almost_equal(sigma_v, sigma_v_ifu[0], decimal=-1) def test_projected_integral_vs_3d_rendering(self): - lum_weight_int_method = True # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.5 - kwargs_light = [{'Rs': 0.551 * r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": 0.551 * r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' - length = 1. + aperture_type = "slit" + length = 1.0 width = 0.3 - kwargs_aperture = {'aperture_type': aperture_type, 'length': length, 'width': width, 'center_ra': 0, - 'center_dec': 0, 'angle': 0} - - psf_fwhm = 1. # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics_3d = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.00001, 'lum_weight_int_method': False - } - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_3d) - sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) - - kwargs_numerics_2d = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.00001, 'lum_weight_int_method': True - } - - galkin = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_2d, analytic_kinematics=False) - sigma_v_int_method = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000) + kwargs_aperture = { + "aperture_type": aperture_type, + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } + + psf_fwhm = 1.0 # Gaussian FWHM psf + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics_3d = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.00001, + "lum_weight_int_method": False, + } + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_3d, + ) + sigma_v = galkin.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + + kwargs_numerics_2d = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.00001, + "lum_weight_int_method": True, + } + + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_2d, + analytic_kinematics=False, + ) + sigma_v_int_method = galkin.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) npt.assert_almost_equal(sigma_v_int_method / sigma_v, 1, decimal=2) def test_2d_vs_3d_power_law(self): # set up power-law light profile - light_model = ['POWER_LAW'] - kwargs_light = [{'gamma': 2, 'amp': 1, 'e1': 0, 'e2': 0}] + light_model = ["POWER_LAW"] + kwargs_light = [{"gamma": 2, "amp": 1, "e1": 0, "e2": 0}] - lens_model = ['SIS'] - kwargs_mass = [{'theta_E': 1}] + lens_model = ["SIS"] + kwargs_mass = [{"theta_E": 1}] - anisotropy_type = 'isotropic' + anisotropy_type = "isotropic" kwargs_anisotropy = {} - kwargs_model = {'mass_profile_list': lens_model, - 'light_profile_list': light_model, - 'anisotropy_model': anisotropy_type} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 50, 'min_integrate': 0.0001} + kwargs_model = { + "mass_profile_list": lens_model, + "light_profile_list": light_model, + "anisotropy_model": anisotropy_type, + } + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 50, + "min_integrate": 0.0001, + } kwargs_numerics_3d = copy.deepcopy(kwargs_numerics) - kwargs_numerics_3d['lum_weight_int_method'] = False + kwargs_numerics_3d["lum_weight_int_method"] = False kwargs_numerics_2d = copy.deepcopy(kwargs_numerics) - kwargs_numerics_2d['lum_weight_int_method'] = True + kwargs_numerics_2d["lum_weight_int_method"] = True - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # compute analytic velocity dispersion of SIS profile - v_sigma_c2 = kwargs_mass[0]['theta_E'] * const.arcsec / (4 * np.pi) * kwargs_cosmo['d_s'] / kwargs_cosmo['d_ds'] + v_sigma_c2 = ( + kwargs_mass[0]["theta_E"] + * const.arcsec + / (4 * np.pi) + * kwargs_cosmo["d_s"] + / kwargs_cosmo["d_ds"] + ) v_sigma_true = np.sqrt(v_sigma_c2) * const.c / 1000 # aperture as slit - aperture_type = 'slit' - length = 1. + aperture_type = "slit" + length = 1.0 width = 0.3 - kwargs_aperture = {'aperture_type': aperture_type, 'length': length, 'width': width, 'center_ra': 0, - 'center_dec': 0, 'angle': 0} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 0.5} - - galkin3d = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_3d) - - galkin2d = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_2d) + kwargs_aperture = { + "aperture_type": aperture_type, + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 0.5} + + galkin3d = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_3d, + ) + + galkin2d = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_2d, + ) sigma_draw_list = [] for i in range(100): - sigma_v_draw = galkin3d._draw_one_sigma2(kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma_v_draw = galkin3d._draw_one_sigma2( + kwargs_mass, kwargs_light, kwargs_anisotropy + ) sigma_draw_list.append(sigma_v_draw) # print(np.sqrt(sigma_v_draw)/ 1000) - #import matplotlib.pyplot as plt - #plt.plot(np.sqrt(sigma_draw_list) / 1000 / v_sigma_true) - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(np.sqrt(sigma_draw_list) / 1000 / v_sigma_true) + # plt.show() - print(np.sqrt(np.mean(sigma_draw_list)) / 1000, 'mean draw') - print('truth = ', v_sigma_true) - #assert 1 == 0 + print(np.sqrt(np.mean(sigma_draw_list)) / 1000, "mean draw") + print("truth = ", v_sigma_true) + # assert 1 == 0 - sigma_v_2d = galkin2d.dispersion(kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000) - sigma_v_3d = galkin3d.dispersion(kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000) + sigma_v_2d = galkin2d.dispersion( + kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) + sigma_v_3d = galkin3d.dispersion( + kwargs_mass, kwargs_light, kwargs_anisotropy, sampling_number=1000 + ) npt.assert_almost_equal(sigma_v_2d / v_sigma_true, 1, decimal=2) npt.assert_almost_equal(sigma_v_3d / v_sigma_true, 1, decimal=2) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - kwargs_model = {'anisotropy_model': 'const'} - kwargs_aperture = {'center_ra': 0, 'width': 1, 'length': 1, 'angle': 0, 'center_dec': 0, - 'aperture_type': 'slit'} - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 1} - Galkin(kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwargs_numerics={}, - analytic_kinematics=True) - - -if __name__ == '__main__': + kwargs_model = {"anisotropy_model": "const"} + kwargs_aperture = { + "center_ra": 0, + "width": 1, + "length": 1, + "angle": 0, + "center_dec": 0, + "aperture_type": "slit", + } + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1} + Galkin( + kwargs_model, + kwargs_aperture, + kwargs_psf, + kwargs_cosmo, + kwargs_numerics={}, + analytic_kinematics=True, + ) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_galkin_model.py b/test/test_GalKin/test_galkin_model.py index b50f8e42b..0e27aa068 100644 --- a/test/test_GalKin/test_galkin_model.py +++ b/test/test_GalKin/test_galkin_model.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.GalKin.galkin_model import GalkinModel @@ -7,34 +7,60 @@ class TestGalkinModel(object): - def setup_method(self): pass def test_radius_slope_anisotropy(self): - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_model = {'anisotropy_model': 'OM', 'mass_profile_list': ['SPP'], 'light_profile_list': ['HERNQUIST']} - kwargs_numerics = {'interpol_grid_num': 1000, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.001} - kin_analytic = GalkinModel(kwargs_model, kwargs_cosmo, analytic_kinematics=True, kwargs_numerics=kwargs_numerics) + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_model = { + "anisotropy_model": "OM", + "mass_profile_list": ["SPP"], + "light_profile_list": ["HERNQUIST"], + } + kwargs_numerics = { + "interpol_grid_num": 1000, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.001, + } + kin_analytic = GalkinModel( + kwargs_model, + kwargs_cosmo, + analytic_kinematics=True, + kwargs_numerics=kwargs_numerics, + ) r = 1 - theta_E, gamma = 1, 2. + theta_E, gamma = 1, 2.0 a_ani = 10 r_eff = 0.1 - out = kin_analytic.check_df(r, kwargs_mass={'theta_E': theta_E, 'gamma': gamma}, kwargs_light={'r_eff': r_eff}, - kwargs_anisotropy={'r_ani': a_ani*r_eff}) + out = kin_analytic.check_df( + r, + kwargs_mass={"theta_E": theta_E, "gamma": gamma}, + kwargs_light={"r_eff": r_eff}, + kwargs_anisotropy={"r_ani": a_ani * r_eff}, + ) assert out > 0 print(out) - kin_numeric = GalkinModel(kwargs_model, kwargs_cosmo, analytic_kinematics=False, kwargs_numerics=kwargs_numerics) - out_num = kin_numeric.check_df(r, kwargs_mass=[{'theta_E': theta_E, 'gamma': gamma}], - kwargs_light=[{'Rs': r_eff * 0.551, 'amp': 1}], kwargs_anisotropy={'r_ani': a_ani*r_eff}) + kin_numeric = GalkinModel( + kwargs_model, + kwargs_cosmo, + analytic_kinematics=False, + kwargs_numerics=kwargs_numerics, + ) + out_num = kin_numeric.check_df( + r, + kwargs_mass=[{"theta_E": theta_E, "gamma": gamma}], + kwargs_light=[{"Rs": r_eff * 0.551, "amp": 1}], + kwargs_anisotropy={"r_ani": a_ani * r_eff}, + ) assert out_num > 1 - npt.assert_almost_equal(out_num/out, 1, decimal=2) + npt.assert_almost_equal(out_num / out, 1, decimal=2) - kin_numeric_default = GalkinModel(kwargs_model, kwargs_cosmo, analytic_kinematics=False, kwargs_numerics=None) + kin_numeric_default = GalkinModel( + kwargs_model, kwargs_cosmo, analytic_kinematics=False, kwargs_numerics=None + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_galkin_multiobservation.py b/test/test_GalKin/test_galkin_multiobservation.py index bc5232bba..9a87c1dbc 100644 --- a/test/test_GalKin/test_galkin_multiobservation.py +++ b/test/test_GalKin/test_galkin_multiobservation.py @@ -3,50 +3,70 @@ class TestGalkinMultiObservation(object): - def setup_method(self): pass def test_dispersion(self): - light_profile_list = ['HERNQUIST'] - Rs = .5 - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["HERNQUIST"] + Rs = 0.5 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_mass = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 500, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001} + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 500, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + } # aperture as slit - aperture_type = 'slit' - kwargs_aperture_1 = {'width': 1, 'length': 1., 'aperture_type': aperture_type} - kwargs_psf_1 = {'psf_type': 'GAUSSIAN', 'fwhm': 0.7} + aperture_type = "slit" + kwargs_aperture_1 = {"width": 1, "length": 1.0, "aperture_type": aperture_type} + kwargs_psf_1 = {"psf_type": "GAUSSIAN", "fwhm": 0.7} - kwargs_aperture_2 = {'width': 3, 'length': 3., 'aperture_type': aperture_type} - kwargs_psf_2 = {'psf_type': 'GAUSSIAN', 'fwhm': 1.5} + kwargs_aperture_2 = {"width": 3, "length": 3.0, "aperture_type": aperture_type} + kwargs_psf_2 = {"psf_type": "GAUSSIAN", "fwhm": 1.5} kwargs_aperture_list = [kwargs_aperture_1, kwargs_aperture_2] kwargs_psf_list = [kwargs_psf_1, kwargs_psf_2] - galkin_multiobs = GalkinMultiObservation(kwargs_model, kwargs_aperture_list, kwargs_psf_list, kwargs_cosmo, - kwargs_numerics=kwargs_numerics, analytic_kinematics=False) + galkin_multiobs = GalkinMultiObservation( + kwargs_model, + kwargs_aperture_list, + kwargs_psf_list, + kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + analytic_kinematics=False, + ) - sigma_v_list = galkin_multiobs.dispersion_map(kwargs_mass=kwargs_mass, kwargs_light=kwargs_light, - kwargs_anisotropy=kwargs_anisotropy, num_kin_sampling=1000, num_psf_sampling=100) + sigma_v_list = galkin_multiobs.dispersion_map( + kwargs_mass=kwargs_mass, + kwargs_light=kwargs_light, + kwargs_anisotropy=kwargs_anisotropy, + num_kin_sampling=1000, + num_psf_sampling=100, + ) assert len(sigma_v_list) == 2 assert sigma_v_list[0] > sigma_v_list[1] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_galkin_shells.py b/test/test_GalKin/test_galkin_shells.py index 731cec0a0..87b4b495c 100644 --- a/test/test_GalKin/test_galkin_shells.py +++ b/test/test_GalKin/test_galkin_shells.py @@ -5,67 +5,105 @@ import pytest import unittest -class TestGalkinShells(object): +class TestGalkinShells(object): def test_vel_disp(self): - # light profile - light_profile_list = ['HERNQUIST'] - Rs = .5 - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["HERNQUIST"] + Rs = 0.5 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'IFU_shells' + aperture_type = "IFU_shells" r_bins = np.linspace(start=0, stop=2, num=5) - kwargs_aperture = {'aperture_type': aperture_type, 'r_bins': r_bins, 'center_ra': 0, 'center_dec': 0} + kwargs_aperture = { + "aperture_type": aperture_type, + "r_bins": r_bins, + "center_ra": 0, + "center_dec": 0, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics_log = {'interpol_grid_num': 1000, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001, - 'lum_weight_int_method': True} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics_log = { + "interpol_grid_num": 1000, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + "lum_weight_int_method": True, + } - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_log) - galkin_shells = GalkinShells(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics_log) + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_log, + ) + galkin_shells = GalkinShells( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics_log, + ) - vel_disp_bins = galkin_shells.dispersion_map(kwargs_mass=kwargs_profile, kwargs_light=kwargs_light, - kwargs_anisotropy=kwargs_anisotropy) - disp_map = galkin.dispersion_map(kwargs_mass=kwargs_profile, kwargs_light=kwargs_light, - kwargs_anisotropy=kwargs_anisotropy, - num_kin_sampling=1000, num_psf_sampling=100) + vel_disp_bins = galkin_shells.dispersion_map( + kwargs_mass=kwargs_profile, + kwargs_light=kwargs_light, + kwargs_anisotropy=kwargs_anisotropy, + ) + disp_map = galkin.dispersion_map( + kwargs_mass=kwargs_profile, + kwargs_light=kwargs_light, + kwargs_anisotropy=kwargs_anisotropy, + num_kin_sampling=1000, + num_psf_sampling=100, + ) npt.assert_almost_equal(vel_disp_bins / disp_map, 1, decimal=2) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - kwargs_model = {'mass_profile_list': ['SPP'], - 'light_profile_list': ['HERNQUIST'], - 'anisotropy_model': 'const'} - kwargs_aperture = {'center_ra': 0, 'width': 1, 'length': 1, 'angle': 0, 'center_dec': 0, - 'aperture_type': 'slit'} - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 1} + kwargs_model = { + "mass_profile_list": ["SPP"], + "light_profile_list": ["HERNQUIST"], + "anisotropy_model": "const", + } + kwargs_aperture = { + "center_ra": 0, + "width": 1, + "length": 1, + "angle": 0, + "center_dec": 0, + "aperture_type": "slit", + } + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": 1} GalkinShells(kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_gom.py b/test/test_GalKin/test_gom.py index e1b9089d8..4862ca5d5 100644 --- a/test/test_GalKin/test_gom.py +++ b/test/test_GalKin/test_gom.py @@ -8,57 +8,89 @@ def setup_method(self): np.random.seed(2) def test_OMvsGOM(self): - """ - test OsivkopMerrit vs generalized OM model - :return: - """ - light_profile_list = ['HERNQUIST'] + """Test OsivkopMerrit vs generalized OM model :return:""" + light_profile_list = ["HERNQUIST"] r_eff = 1.5 - kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # aperture as slit - aperture_type = 'slit' - length = 1. + aperture_type = "slit" + length = 1.0 width = 0.3 - kwargs_aperture = {'aperture_type': aperture_type, 'length': length, 'width': width, 'center_ra': 0, - 'center_dec': 0, 'angle': 0} + kwargs_aperture = { + "aperture_type": aperture_type, + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + } - psf_fwhm = 1. # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.00001} + psf_fwhm = 1.0 # Gaussian FWHM psf + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.00001, + } # anisotropy profile - anisotropy_type = 'OM' + anisotropy_type = "OM" r_ani = 0.2 - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics) - sigma_v_om = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=5000) + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + ) + sigma_v_om = galkin.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=5000 + ) # anisotropy profile - anisotropy_type = 'GOM' + anisotropy_type = "GOM" - kwargs_anisotropy = {'r_ani': r_ani, 'beta_inf': 1} # anisotropy radius [arcsec] + kwargs_anisotropy = { + "r_ani": r_ani, + "beta_inf": 1, + } # anisotropy radius [arcsec] - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - galkin_gom = Galkin(kwargs_model=kwargs_model, kwargs_aperture=kwargs_aperture, kwargs_psf=kwargs_psf, - kwargs_cosmo=kwargs_cosmo, kwargs_numerics=kwargs_numerics) - sigma_v_gom = galkin_gom.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=5000) + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + galkin_gom = Galkin( + kwargs_model=kwargs_model, + kwargs_aperture=kwargs_aperture, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_numerics=kwargs_numerics, + ) + sigma_v_gom = galkin_gom.dispersion( + kwargs_profile, kwargs_light, kwargs_anisotropy, sampling_number=5000 + ) # warning: this tests does not work to this precision for every random seed. To increase precision, increase # sampling_number npt.assert_almost_equal(sigma_v_gom / sigma_v_om, 1, decimal=2) diff --git a/test/test_GalKin/test_light_profile.py b/test/test_GalKin/test_light_profile.py index fe764590a..ec6d58760 100644 --- a/test/test_GalKin/test_light_profile.py +++ b/test/test_GalKin/test_light_profile.py @@ -1,6 +1,4 @@ -""" -Tests for `galkin` module. -""" +"""Tests for `galkin` module.""" import pytest import numpy.testing as npt import numpy as np @@ -12,19 +10,20 @@ class TestLightProfile(object): - def setup_method(self): pass def test_draw_light(self): np.random.seed(41) - lightProfile = LightProfile(profile_list=['HERNQUIST']) - kwargs_profile = [{'amp': 1., 'Rs': 0.8}] + lightProfile = LightProfile(profile_list=["HERNQUIST"]) + kwargs_profile = [{"amp": 1.0, "Rs": 0.8}] r_list = lightProfile.draw_light_2d(kwargs_profile, n=500000) - bins = np.linspace(0., 1, 20) + bins = np.linspace(0.0, 1, 20) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - light2d = lightProfile.light_2d(R=(bins_hist[1:] + bins_hist[:-1])/2., kwargs_list=kwargs_profile) - light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2. + light2d = lightProfile.light_2d( + R=(bins_hist[1:] + bins_hist[:-1]) / 2.0, kwargs_list=kwargs_profile + ) + light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2.0 light2d /= np.sum(light2d) hist /= np.sum(hist) print(light2d / hist) @@ -34,16 +33,29 @@ def test_draw_light(self): def test_draw_light_2d_linear(self): np.random.seed(41) - lightProfile = LightProfile(profile_list=['HERNQUIST'], interpol_grid_num=1000, max_interpolate=10, min_interpolate=0.01) - kwargs_profile = [{'amp': 1., 'Rs': 0.8}] + lightProfile = LightProfile( + profile_list=["HERNQUIST"], + interpol_grid_num=1000, + max_interpolate=10, + min_interpolate=0.01, + ) + kwargs_profile = [{"amp": 1.0, "Rs": 0.8}] r_list = lightProfile.draw_light_2d_linear(kwargs_profile, n=100000) - bins = np.linspace(0., 1, 20) + bins = np.linspace(0.0, 1, 20) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - light2d = lightProfile.light_2d(R=(bins_hist[1:] + bins_hist[:-1])/2., kwargs_list=kwargs_profile) - light2d_upper = lightProfile.light_2d(R=bins_hist[1:], kwargs_list=kwargs_profile) * bins_hist[1:] - light2d_lower = lightProfile.light_2d(R=bins_hist[:-1], kwargs_list=kwargs_profile) * bins_hist[:-1] - light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2. - print((light2d_upper - light2d_lower)/(light2d_upper + light2d_lower) * 2) + light2d = lightProfile.light_2d( + R=(bins_hist[1:] + bins_hist[:-1]) / 2.0, kwargs_list=kwargs_profile + ) + light2d_upper = ( + lightProfile.light_2d(R=bins_hist[1:], kwargs_list=kwargs_profile) + * bins_hist[1:] + ) + light2d_lower = ( + lightProfile.light_2d(R=bins_hist[:-1], kwargs_list=kwargs_profile) + * bins_hist[:-1] + ) + light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2.0 + print((light2d_upper - light2d_lower) / (light2d_upper + light2d_lower) * 2) light2d /= np.sum(light2d) hist /= np.sum(hist) print(light2d / hist) @@ -53,93 +65,109 @@ def test_draw_light_2d_linear(self): def test_draw_light_PJaffe(self): np.random.seed(41) - lightProfile = LightProfile(profile_list=['PJAFFE']) - kwargs_profile = [{'amp': 1., 'Rs': 0.5, 'Ra': 0.2}] + lightProfile = LightProfile(profile_list=["PJAFFE"]) + kwargs_profile = [{"amp": 1.0, "Rs": 0.5, "Ra": 0.2}] r_list = lightProfile.draw_light_2d(kwargs_profile, n=100000) bins = np.linspace(0, 2, 10) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - light2d = lightProfile.light_2d(R=(bins_hist[1:] + bins_hist[:-1])/2., kwargs_list=kwargs_profile) - light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2. + light2d = lightProfile.light_2d( + R=(bins_hist[1:] + bins_hist[:-1]) / 2.0, kwargs_list=kwargs_profile + ) + light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2.0 light2d /= np.sum(light2d) hist /= np.sum(hist) print(light2d / hist) - npt.assert_almost_equal(light2d[8]/hist[8], 1, decimal=1) + npt.assert_almost_equal(light2d[8] / hist[8], 1, decimal=1) - lightProfile = LightProfile(profile_list=['PJAFFE'], min_interpolate=0.0001, max_interpolate=20.) - kwargs_profile = [{'amp': 1., 'Rs': 0.04, 'Ra': 0.02}] + lightProfile = LightProfile( + profile_list=["PJAFFE"], min_interpolate=0.0001, max_interpolate=20.0 + ) + kwargs_profile = [{"amp": 1.0, "Rs": 0.04, "Ra": 0.02}] r_list = lightProfile.draw_light_2d(kwargs_profile, n=100000) - bins = np.linspace(0., 0.1, 10) + bins = np.linspace(0.0, 0.1, 10) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - light2d = lightProfile.light_2d(R=(bins_hist[1:] + bins_hist[:-1])/2., kwargs_list=kwargs_profile) - light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2. + light2d = lightProfile.light_2d( + R=(bins_hist[1:] + bins_hist[:-1]) / 2.0, kwargs_list=kwargs_profile + ) + light2d *= (bins_hist[1:] + bins_hist[:-1]) / 2.0 light2d /= np.sum(light2d) hist /= np.sum(hist) print(light2d / hist) npt.assert_almost_equal(light2d[5] / hist[5], 1, decimal=1) - assert hasattr(lightProfile, '_kwargs_light_circularized') + assert hasattr(lightProfile, "_kwargs_light_circularized") lightProfile.delete_cache() - if hasattr(lightProfile, '_kwargs_light_circularized'): + if hasattr(lightProfile, "_kwargs_light_circularized"): assert False def test_draw_light_3d_hernquist(self): - lightProfile = LightProfile(profile_list=['HERNQUIST'], min_interpolate=0.0001, max_interpolate=1000.) - kwargs_profile = [{'amp': 1., 'Rs': 0.5}] - r_list = lightProfile.draw_light_3d(kwargs_profile, n=1000000, new_compute=False) - print(r_list, 'r_list') + lightProfile = LightProfile( + profile_list=["HERNQUIST"], min_interpolate=0.0001, max_interpolate=1000.0 + ) + kwargs_profile = [{"amp": 1.0, "Rs": 0.5}] + r_list = lightProfile.draw_light_3d( + kwargs_profile, n=1000000, new_compute=False + ) + print(r_list, "r_list") # project it # test with draw light 2d profile routine # compare with 3d analytical solution vs histogram binned bins = np.linspace(0.0, 10, 20) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. + bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2.0 light3d = lightProfile.light_3d(r=bins_plot, kwargs_list=kwargs_profile) - light3d *= bins_plot ** 2 + light3d *= bins_plot**2 light3d /= np.sum(light3d) hist /= np.sum(hist) - #import matplotlib.pyplot as plt - #plt.plot(bins_plot , light3d/light3d[5], label='3d reference Hernquist') - #plt.plot(bins_plot, hist / hist[5], label='hist') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(bins_plot , light3d/light3d[5], label='3d reference Hernquist') + # plt.plot(bins_plot, hist / hist[5], label='hist') + # plt.legend() + # plt.show() print(light3d / hist) - #npt.assert_almost_equal(light3d / hist, 1, decimal=1) + # npt.assert_almost_equal(light3d / hist, 1, decimal=1) # compare with 2d analytical solution vs histogram binned - #bins = np.linspace(0.1, 1, 10) + # bins = np.linspace(0.1, 1, 10) R, x, y = velocity_util.project2d_random(np.array(r_list)) hist_2d, bins_hist = np.histogram(R, bins=bins, density=True) hist_2d /= np.sum(hist_2d) - bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. + bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2.0 light2d = lightProfile.light_2d(R=bins_plot, kwargs_list=kwargs_profile) - light2d *= bins_plot ** 1 + light2d *= bins_plot**1 light2d /= np.sum(light2d) - light2d_finite = lightProfile.light_2d_finite(R=bins_plot, kwargs_list=kwargs_profile) - light2d_finite *= bins_plot ** 1 + light2d_finite = lightProfile.light_2d_finite( + R=bins_plot, kwargs_list=kwargs_profile + ) + light2d_finite *= bins_plot**1 light2d_finite /= np.sum(light2d_finite) hist /= np.sum(hist) - #import matplotlib.pyplot as plt - #plt.plot(bins_plot, light2d/light2d[5], '--', label='2d reference Hernquist') - #plt.plot(bins_plot, light2d_finite / light2d_finite[5], '-.', label='2d reference Hernquist finite') - #plt.plot(bins_plot, hist_2d / hist_2d[5], label='hist') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(bins_plot, light2d/light2d[5], '--', label='2d reference Hernquist') + # plt.plot(bins_plot, light2d_finite / light2d_finite[5], '-.', label='2d reference Hernquist finite') + # plt.plot(bins_plot, hist_2d / hist_2d[5], label='hist') + # plt.legend() + # plt.show() print(light2d / hist_2d) - #plt.plot(R, r_list, '.', label='test') - #plt.legend() - #plt.xlim([0, 0.2]) - #plt.ylim([0, 0.2]) - #plt.show() + # plt.plot(R, r_list, '.', label='test') + # plt.legend() + # plt.xlim([0, 0.2]) + # plt.ylim([0, 0.2]) + # plt.show() npt.assert_almost_equal(light2d / hist_2d, 1, decimal=1) def test_draw_light_3d_power_law(self): - lightProfile = LightProfile(profile_list=['POWER_LAW'], min_interpolate=0.0001, max_interpolate=1000.) - kwargs_profile = [{'amp': 1., 'gamma': 2, 'e1': 0, 'e2': 0}] - r_list = lightProfile.draw_light_3d(kwargs_profile, n=1000000, new_compute=False) - print(r_list, 'r_list') + lightProfile = LightProfile( + profile_list=["POWER_LAW"], min_interpolate=0.0001, max_interpolate=1000.0 + ) + kwargs_profile = [{"amp": 1.0, "gamma": 2, "e1": 0, "e2": 0}] + r_list = lightProfile.draw_light_3d( + kwargs_profile, n=1000000, new_compute=False + ) + print(r_list, "r_list") # project it R, x, y = velocity_util.project2d_random(r_list) # test with draw light 2d profile routine @@ -147,39 +175,40 @@ def test_draw_light_3d_power_law(self): # compare with 3d analytical solution vs histogram binned bins = np.linspace(0.1, 10, 10) hist, bins_hist = np.histogram(r_list, bins=bins, density=True) - bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. + bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2.0 light3d = lightProfile.light_3d(r=bins_plot, kwargs_list=kwargs_profile) - light3d *= bins_plot ** 2 + light3d *= bins_plot**2 light3d /= np.sum(light3d) hist /= np.sum(hist) - #import matplotlib.pyplot as plt - #plt.plot(bins_plot , light3d/light3d[5], label='3d reference power-law') - #plt.plot(bins_plot, hist / hist[5], label='hist') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(bins_plot , light3d/light3d[5], label='3d reference power-law') + # plt.plot(bins_plot, hist / hist[5], label='hist') + # plt.legend() + # plt.show() print(light3d / hist) npt.assert_almost_equal(light3d / hist, 1, decimal=1) # compare with 2d analytical solution vs histogram binned - #bins = np.linspace(0.1, 1, 10) + # bins = np.linspace(0.1, 1, 10) hist, bins_hist = np.histogram(R, bins=bins, density=True) - bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. + bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2.0 light2d = lightProfile.light_2d_finite(R=bins_plot, kwargs_list=kwargs_profile) - light2d *= bins_plot ** 1 + light2d *= bins_plot**1 light2d /= np.sum(light2d) hist /= np.sum(hist) - #import matplotlib.pyplot as plt - #plt.plot(bins_plot , light2d/light2d[5], label='2d reference power-law') - #plt.plot(bins_plot, hist / hist[5], label='hist') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(bins_plot , light2d/light2d[5], label='2d reference power-law') + # plt.plot(bins_plot, hist / hist[5], label='hist') + # plt.legend() + # plt.show() print(light2d / hist) npt.assert_almost_equal(light2d / hist, 1, decimal=1) def test_ellipticity_in_profiles(self): np.random.seed(41) - lightProfile = ['HERNQUIST_ELLIPSE', 'PJAFFE_ELLIPSE'] + lightProfile = ["HERNQUIST_ELLIPSE", "PJAFFE_ELLIPSE"] import lenstronomy.Util.param_util as param_util + phi, q = 0.14944144075912402, 0.4105628122365978 e1, e2 = param_util.phi_q2_ellipticity(phi, q) @@ -187,45 +216,79 @@ def test_ellipticity_in_profiles(self): e12, e22 = param_util.phi_q2_ellipticity(phi2, q2) center_x = -0.019983826426838536 center_y = 0.90000011282957304 - kwargs_profile = [{'Rs': 0.16350224766074103, 'e1': e1, 'e2': e2, 'center_x': center_x, - 'center_y': center_y, 'amp': 1.3168943578511678}, - {'Rs': 0.29187068596715743, 'e1': e12, 'e2': e22, 'center_x': center_x, - 'center_y': center_y, 'Ra': 0.020000382843298824, - 'amp': 85.948773973262391}] - kwargs_options = {'lens_model_list': ['SPEP'], 'lens_light_model_list': lightProfile} + kwargs_profile = [ + { + "Rs": 0.16350224766074103, + "e1": e1, + "e2": e2, + "center_x": center_x, + "center_y": center_y, + "amp": 1.3168943578511678, + }, + { + "Rs": 0.29187068596715743, + "e1": e12, + "e2": e22, + "center_x": center_x, + "center_y": center_y, + "Ra": 0.020000382843298824, + "amp": 85.948773973262391, + }, + ] + kwargs_options = { + "lens_model_list": ["SPEP"], + "lens_light_model_list": lightProfile, + } lensAnalysis = LightProfileAnalysis(LightModel(light_model_list=lightProfile)) - r_eff = lensAnalysis.half_light_radius(kwargs_profile, center_x=center_x, center_y=center_y, grid_spacing=0.1, - grid_num=100) - kwargs_profile[0]['e1'], kwargs_profile[0]['e2'] = 0, 0 - kwargs_profile[1]['e1'], kwargs_profile[1]['e2'] = 0, 0 - r_eff_spherical = lensAnalysis.half_light_radius(kwargs_profile, center_x=center_x, center_y=center_y, - grid_spacing=0.1, grid_num=100) + r_eff = lensAnalysis.half_light_radius( + kwargs_profile, + center_x=center_x, + center_y=center_y, + grid_spacing=0.1, + grid_num=100, + ) + kwargs_profile[0]["e1"], kwargs_profile[0]["e2"] = 0, 0 + kwargs_profile[1]["e1"], kwargs_profile[1]["e2"] = 0, 0 + r_eff_spherical = lensAnalysis.half_light_radius( + kwargs_profile, + center_x=center_x, + center_y=center_y, + grid_spacing=0.1, + grid_num=100, + ) npt.assert_almost_equal(r_eff / r_eff_spherical, 1, decimal=2) def test_light_3d(self): np.random.seed(41) - lightProfile = LightProfile(profile_list=['HERNQUIST']) + lightProfile = LightProfile(profile_list=["HERNQUIST"]) r = np.logspace(-2, 2, 100) - kwargs_profile = [{'amp': 1., 'Rs': 0.5}] + kwargs_profile = [{"amp": 1.0, "Rs": 0.5}] light_3d = lightProfile.light_3d_interp(r, kwargs_profile) light_3d_exact = lightProfile.light_3d(r, kwargs_profile) for i in range(len(r)): - npt.assert_almost_equal(light_3d[i]/light_3d_exact[i], 1, decimal=3) + npt.assert_almost_equal(light_3d[i] / light_3d_exact[i], 1, decimal=3) def test_light_2d_finite(self): interpol_grid_num = 5000 max_interpolate = 10 min_interpolate = 0.0001 - lightProfile = LightProfile(profile_list=['HERNQUIST'], interpol_grid_num=interpol_grid_num, - max_interpolate=max_interpolate, min_interpolate=min_interpolate) - kwargs_profile = [{'amp': 1., 'Rs': 1.}] + lightProfile = LightProfile( + profile_list=["HERNQUIST"], + interpol_grid_num=interpol_grid_num, + max_interpolate=max_interpolate, + min_interpolate=min_interpolate, + ) + kwargs_profile = [{"amp": 1.0, "Rs": 1.0}] # check whether projected light integral is the same as analytic expression - R = 1. + R = 1.0 I_R = lightProfile.light_2d_finite(R, kwargs_profile) - out = integrate.quad(lambda x: lightProfile.light_3d(np.sqrt(R ** 2 + x ** 2), kwargs_profile), - min_interpolate, np.sqrt(max_interpolate ** 2 - R ** 2)) + out = integrate.quad( + lambda x: lightProfile.light_3d(np.sqrt(R**2 + x**2), kwargs_profile), + min_interpolate, + np.sqrt(max_interpolate**2 - R**2), + ) l_R_quad = out[0] * 2 npt.assert_almost_equal(l_R_quad / I_R, 1, decimal=2) @@ -234,15 +297,15 @@ def test_light_2d_finite(self): npt.assert_almost_equal(l_R / I_R, 1, decimal=2) def test_del_cache(self): - lightProfile = LightProfile(profile_list=['HERNQUIST']) + lightProfile = LightProfile(profile_list=["HERNQUIST"]) lightProfile._light_cdf = 1 lightProfile._light_cdf_log = 2 lightProfile._f_light_3d = 3 lightProfile.delete_cache() - assert hasattr(lightProfile, '_light_cdf') is False - assert hasattr(lightProfile, '_light_cdf_log') is False - assert hasattr(lightProfile, '_f_light_3d') is False + assert hasattr(lightProfile, "_light_cdf") is False + assert hasattr(lightProfile, "_light_cdf_log") is False + assert hasattr(lightProfile, "_f_light_3d") is False -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_multi_gauss_expansion.py b/test/test_GalKin/test_multi_gauss_expansion.py index f26301ce8..4fd082300 100644 --- a/test/test_GalKin/test_multi_gauss_expansion.py +++ b/test/test_GalKin/test_multi_gauss_expansion.py @@ -1,6 +1,4 @@ -""" -Tests for `galkin` module. -""" +"""Tests for `galkin` module.""" import pytest import numpy.testing as npt import lenstronomy.Util.multi_gauss_expansion as mge @@ -12,192 +10,286 @@ class TestGalkin(object): - def setup_method(self): pass def test_mge_hernquist_light(self): - """ - compare power-law profiles analytical vs. numerical + """Compare power-law profiles analytical vs. + + numerical :return: """ # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' + aperture_type = "slit" length = 3.8 width = 0.9 - kwargs_aperture = {'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0, 'aperture_type': aperture_type} + kwargs_aperture = { + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + "aperture_type": aperture_type, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.8 - kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # mge of light profile lightModel = LightModel(light_profile_list) r_array = np.logspace(-2, 2, 100) flux_r = lightModel.surface_brightness(r_array, 0, kwargs_light) amps, sigmas, norm = mge.mge_1d(r_array, flux_r, N=20) - light_profile_list_mge = ['MULTI_GAUSSIAN'] - kwargs_light_mge = [{'amp': amps, 'sigma': sigmas}] - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} + light_profile_list_mge = ["MULTI_GAUSSIAN"] + kwargs_light_mge = [{"amp": amps, "sigma": sigmas}] + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_numerics = {'interpol_grid_num': 100, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.01} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_numerics = { + "interpol_grid_num": 100, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.01, + } + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy) - kwargs_model_mge = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list_mge, - 'anisotropy_model': anisotropy_type} - galkin = Galkin(kwargs_model=kwargs_model_mge, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) - sigma_v2 = galkin.dispersion(kwargs_profile, kwargs_light_mge, kwargs_anisotropy) - - print(sigma_v, sigma_v2, 'sigma_v Galkin, sigma_v MGEn') - print((sigma_v/sigma_v2)**2) - - npt.assert_almost_equal((sigma_v-sigma_v2)/sigma_v2, 0, decimal=2) + kwargs_model_mge = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list_mge, + "anisotropy_model": anisotropy_type, + } + galkin = Galkin( + kwargs_model=kwargs_model_mge, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) + sigma_v2 = galkin.dispersion( + kwargs_profile, kwargs_light_mge, kwargs_anisotropy + ) + + print(sigma_v, sigma_v2, "sigma_v Galkin, sigma_v MGEn") + print((sigma_v / sigma_v2) ** 2) + + npt.assert_almost_equal((sigma_v - sigma_v2) / sigma_v2, 0, decimal=2) def test_mge_power_law_lens(self): - """ - compare power-law profiles analytical vs. numerical + """Compare power-law profiles analytical vs. + + numerical :return: """ # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' + aperture_type = "slit" length = 3.8 width = 0.9 - kwargs_aperture = {'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0, 'aperture_type': aperture_type} + kwargs_aperture = { + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + "aperture_type": aperture_type, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.8 - kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # mge of lens profile lensModel = LensModel(mass_profile_list) - r_array = np.logspace(-2, 2, 100)*theta_E + r_array = np.logspace(-2, 2, 100) * theta_E kappa_r = lensModel.kappa(r_array, 0, kwargs_profile) amps, sigmas, norm = mge.mge_1d(r_array, kappa_r, N=20) - mass_profile_list_mge = ['MULTI_GAUSSIAN_KAPPA'] - kwargs_profile_mge = [{'amp': amps, 'sigma': sigmas}] - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - kwargs_numerics = {'interpol_grid_num': 100, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.01} - - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) + mass_profile_list_mge = ["MULTI_GAUSSIAN_KAPPA"] + kwargs_profile_mge = [{"amp": amps, "sigma": sigmas}] + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + kwargs_numerics = { + "interpol_grid_num": 100, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.01, + } + + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy) - kwargs_model = {'mass_profile_list': mass_profile_list_mge, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) - sigma_v2 = galkin.dispersion(kwargs_profile_mge, kwargs_light, kwargs_anisotropy) - - print(sigma_v, sigma_v2, 'sigma_v Galkin, sigma_v MGEn') - print((sigma_v/sigma_v2)**2) - - npt.assert_almost_equal((sigma_v-sigma_v2)/sigma_v2, 0, decimal=2) + kwargs_model = { + "mass_profile_list": mass_profile_list_mge, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) + sigma_v2 = galkin.dispersion( + kwargs_profile_mge, kwargs_light, kwargs_anisotropy + ) + + print(sigma_v, sigma_v2, "sigma_v Galkin, sigma_v MGEn") + print((sigma_v / sigma_v2) ** 2) + + npt.assert_almost_equal((sigma_v - sigma_v2) / sigma_v2, 0, decimal=2) def test_mge_light_and_mass(self): # anisotropy profile - anisotropy_model = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_model = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' + aperture_type = "slit" length = 3.8 width = 0.9 - kwargs_aperture = {'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0, 'aperture_type': aperture_type} + kwargs_aperture = { + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + "aperture_type": aperture_type, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # light profile - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1.8 - kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": r_eff, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # mge of light profile lightModel = LightModel(light_profile_list) r_array = np.logspace(-2, 2, 200) * r_eff * 2 flux_r = lightModel.surface_brightness(r_array, 0, kwargs_light) amps, sigmas, norm = mge.mge_1d(r_array, flux_r, N=20) - light_profile_list_mge = ['MULTI_GAUSSIAN'] - kwargs_light_mge = [{'amp': amps, 'sigma': sigmas}] + light_profile_list_mge = ["MULTI_GAUSSIAN"] + kwargs_light_mge = [{"amp": amps, "sigma": sigmas}] # mge of lens profile lensModel = LensModel(mass_profile_list) r_array = np.logspace(-2, 2, 200) kappa_r = lensModel.kappa(r_array, 0, kwargs_profile) amps, sigmas, norm = mge.mge_1d(r_array, kappa_r, N=20) - mass_profile_list_mge = ['MULTI_GAUSSIAN_KAPPA'] - kwargs_profile_mge = [{'amp': amps, 'sigma': sigmas}] - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_model} - kwargs_numerics = {'interpol_grid_num': 100, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.01} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) + mass_profile_list_mge = ["MULTI_GAUSSIAN_KAPPA"] + kwargs_profile_mge = [{"amp": amps, "sigma": sigmas}] + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_model, + } + kwargs_numerics = { + "interpol_grid_num": 100, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.01, + } + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) sigma_v = galkin.dispersion(kwargs_profile, kwargs_light, kwargs_anisotropy) - kwargs_model_mge = {'mass_profile_list': mass_profile_list_mge, - 'light_profile_list': light_profile_list_mge, - 'anisotropy_model': anisotropy_model} - - galkin = Galkin(kwargs_model=kwargs_model_mge, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics=kwargs_numerics) - sigma_v2 = galkin.dispersion(kwargs_profile_mge, kwargs_light_mge, kwargs_anisotropy) - - print(sigma_v, sigma_v2, 'sigma_v Galkin, sigma_v MGEn') - print((sigma_v/sigma_v2)**2) - npt.assert_almost_equal((sigma_v-sigma_v2)/sigma_v2, 0, decimal=2) + kwargs_model_mge = { + "mass_profile_list": mass_profile_list_mge, + "light_profile_list": light_profile_list_mge, + "anisotropy_model": anisotropy_model, + } + + galkin = Galkin( + kwargs_model=kwargs_model_mge, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics=kwargs_numerics, + ) + sigma_v2 = galkin.dispersion( + kwargs_profile_mge, kwargs_light_mge, kwargs_anisotropy + ) + + print(sigma_v, sigma_v2, "sigma_v Galkin, sigma_v MGEn") + print((sigma_v / sigma_v2) ** 2) + npt.assert_almost_equal((sigma_v - sigma_v2) / sigma_v2, 0, decimal=2) def test_sersic_vs_hernquist_kinematics(self): """ @@ -207,69 +299,106 @@ def test_sersic_vs_hernquist_kinematics(self): :return: """ # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] # aperture as slit - aperture_type = 'slit' + aperture_type = "slit" length = 3.8 width = 0.9 - kwargs_aperture = {'length': length, 'width': width, 'center_ra': 0, 'center_dec': 0, 'angle': 0, 'aperture_type': aperture_type} + kwargs_aperture = { + "length": length, + "width": width, + "center_ra": 0, + "center_dec": 0, + "angle": 0, + "aperture_type": aperture_type, + } psf_fwhm = 0.7 # Gaussian FWHM psf - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # light profile - light_profile_list = ['SERSIC'] - r_sersic = .3 + light_profile_list = ["SERSIC"] + r_sersic = 0.3 n_sersic = 2.8 - kwargs_light = [{'amp': 1., 'R_sersic': r_sersic, 'n_sersic': n_sersic, 'center_x': 0, 'center_y': 0}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + { + "amp": 1.0, + "R_sersic": r_sersic, + "n_sersic": n_sersic, + "center_x": 0, + "center_y": 0, + } + ] # effective half light radius (2d projected) in arcsec # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # Hernquist fit to Sersic profile - profile_analysis = LightProfileAnalysis(LightModel(['SERSIC'])) - r_eff = profile_analysis.half_light_radius(kwargs_light, grid_spacing=0.1, grid_num=100) + profile_analysis = LightProfileAnalysis(LightModel(["SERSIC"])) + r_eff = profile_analysis.half_light_radius( + kwargs_light, grid_spacing=0.1, grid_num=100 + ) print(r_eff) - light_profile_list_hernquist = ['HERNQUIST'] - kwargs_light_hernquist = [{'Rs': r_eff*0.551, 'amp': 1.}] + light_profile_list_hernquist = ["HERNQUIST"] + kwargs_light_hernquist = [{"Rs": r_eff * 0.551, "amp": 1.0}] # mge of light profile lightModel = LightModel(light_profile_list) r_array = np.logspace(-3, 2, 100) * r_eff * 2 - print(r_sersic/r_eff, 'r_sersic/r_eff') + print(r_sersic / r_eff, "r_sersic/r_eff") flux_r = lightModel.surface_brightness(r_array, 0, kwargs_light) amps, sigmas, norm = mge.mge_1d(r_array, flux_r, N=20) - light_profile_list_mge = ['MULTI_GAUSSIAN'] - kwargs_light_mge = [{'amp': amps, 'sigma': sigmas}] - print(amps, sigmas, 'amp', 'sigma') - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm} - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list_hernquist, - 'anisotropy_model': anisotropy_type} - - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics={}) - sigma_v = galkin.dispersion(kwargs_profile, kwargs_light_hernquist, kwargs_anisotropy) - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list_mge, - 'anisotropy_model': anisotropy_type} - galkin = Galkin(kwargs_model=kwargs_model, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo, - kwargs_aperture=kwargs_aperture, kwargs_numerics={}) - sigma_v2 = galkin.dispersion(kwargs_profile, kwargs_light_mge, kwargs_anisotropy) - - print(sigma_v, sigma_v2, 'sigma_v Galkin, sigma_v MGEn') - print((sigma_v/sigma_v2)**2) - - npt.assert_almost_equal((sigma_v-sigma_v2)/sigma_v2, 0, decimal=1) - - -if __name__ == '__main__': + light_profile_list_mge = ["MULTI_GAUSSIAN"] + kwargs_light_mge = [{"amp": amps, "sigma": sigmas}] + print(amps, sigmas, "amp", "sigma") + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": psf_fwhm} + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list_hernquist, + "anisotropy_model": anisotropy_type, + } + + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics={}, + ) + sigma_v = galkin.dispersion( + kwargs_profile, kwargs_light_hernquist, kwargs_anisotropy + ) + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list_mge, + "anisotropy_model": anisotropy_type, + } + galkin = Galkin( + kwargs_model=kwargs_model, + kwargs_psf=kwargs_psf, + kwargs_cosmo=kwargs_cosmo, + kwargs_aperture=kwargs_aperture, + kwargs_numerics={}, + ) + sigma_v2 = galkin.dispersion( + kwargs_profile, kwargs_light_mge, kwargs_anisotropy + ) + + print(sigma_v, sigma_v2, "sigma_v Galkin, sigma_v MGEn") + print((sigma_v / sigma_v2) ** 2) + + npt.assert_almost_equal((sigma_v - sigma_v2) / sigma_v2, 0, decimal=1) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_numeric_kinematics.py b/test/test_GalKin/test_numeric_kinematics.py index b0bcabced..794337b1f 100644 --- a/test/test_GalKin/test_numeric_kinematics.py +++ b/test/test_GalKin/test_numeric_kinematics.py @@ -1,6 +1,4 @@ -""" -Tests for `Galkin` module. -""" +"""Tests for `Galkin` module.""" import pytest import numpy as np import numpy.testing as npt @@ -12,50 +10,65 @@ class TestMassProfile(object): - def setup_method(self): pass def test_mass_3d(self): - kwargs_model = {'mass_profile_list': ['HERNQUIST'], 'light_profile_list': ['HERNQUIST'], - 'anisotropy_model': 'isotropic'} - massProfile = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo={'d_d': 1., 'd_s': 2., 'd_ds': 1.}) + kwargs_model = { + "mass_profile_list": ["HERNQUIST"], + "light_profile_list": ["HERNQUIST"], + "anisotropy_model": "isotropic", + } + massProfile = NumericKinematics( + kwargs_model=kwargs_model, + kwargs_cosmo={"d_d": 1.0, "d_s": 2.0, "d_ds": 1.0}, + ) r = 0.3 - kwargs_profile = [{'sigma0': 1., 'Rs': 0.5}] + kwargs_profile = [{"sigma0": 1.0, "Rs": 0.5}] mass_3d = massProfile._mass_3d_interp(r, kwargs_profile) mass_3d_exact = massProfile.mass_3d(r, kwargs_profile) - npt.assert_almost_equal(mass_3d/mass_3d_exact, 1., decimal=3) + npt.assert_almost_equal(mass_3d / mass_3d_exact, 1.0, decimal=3) def test_sigma_r2(self): - """ - tests the solution of the Jeans equation for sigma**2(r), where r is the 3d radius. - Test is compared to analytic OM solution with power-law and Hernquist light profile + """Tests the solution of the Jeans equation for sigma**2(r), where r is the 3d + radius. Test is compared to analytic OM solution with power-law and Hernquist + light profile. :return: """ - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 0.5 Rs = 0.551 * r_eff - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 gamma = 1.95 - kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + kwargs_mass = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' + anisotropy_type = "OM" r_ani = 0.5 - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 4000, 'min_integrate': 0.001} - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] + + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 4000, + "min_integrate": 0.001, + } + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } analytic_kin = AnalyticKinematics(kwargs_cosmo, **kwargs_numerics) numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics) rho0_r0_gamma = analytic_kin._rho0_r0_gamma(theta_E, gamma) @@ -63,84 +76,129 @@ def test_sigma_r2(self): sigma_r_analytic_array = [] sigma_r_num_array = [] for r in r_array: - sigma_r2_analytic = analytic_kin._sigma_r2(r=r, a=Rs, gamma=gamma, r_ani=r_ani, rho0_r0_gamma=rho0_r0_gamma) - sigma_r2_num = numeric_kin.sigma_r2(r, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma_r2_analytic = analytic_kin._sigma_r2( + r=r, a=Rs, gamma=gamma, r_ani=r_ani, rho0_r0_gamma=rho0_r0_gamma + ) + sigma_r2_num = numeric_kin.sigma_r2( + r, kwargs_mass, kwargs_light, kwargs_anisotropy + ) sigma_r_analytic = np.sqrt(sigma_r2_analytic) / 1000 sigma_r_num = np.sqrt(sigma_r2_num) / 1000 sigma_r_num_array.append(sigma_r_num) sigma_r_analytic_array.append(sigma_r_analytic) npt.assert_almost_equal(sigma_r_num_array, sigma_r_analytic_array, decimal=-2) - npt.assert_almost_equal(np.array(sigma_r_num_array) / np.array(sigma_r_analytic_array), 1, decimal=-2) + npt.assert_almost_equal( + np.array(sigma_r_num_array) / np.array(sigma_r_analytic_array), + 1, + decimal=-2, + ) print(np.array(sigma_r_num_array) / np.array(sigma_r_analytic_array)) def test_sigma_s2(self): - """ - test LOS projected velocity dispersion at 3d ratios (numerical Jeans equation solution vs analytic one) - """ - light_profile_list = ['HERNQUIST'] + """Test LOS projected velocity dispersion at 3d ratios (numerical Jeans equation + solution vs analytic one)""" + light_profile_list = ["HERNQUIST"] r_eff = 0.5 Rs = 0.551 * r_eff - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 gamma = 1.95 - kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + kwargs_mass = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' + anisotropy_type = "OM" r_ani = 0.5 - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 4000, 'min_integrate': 0.001} - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] + + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 4000, + "min_integrate": 0.001, + } + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } analytic_kin = AnalyticKinematics(kwargs_cosmo, **kwargs_numerics) numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics) r_list = np.logspace(-2, 1, 10) for r in r_list: for R in np.linspace(start=0, stop=r, num=5): - sigma_s2_analytic, I_R = analytic_kin.sigma_s2(r, R, {'theta_E': theta_E, 'gamma': gamma}, {'r_eff': r_eff}, kwargs_anisotropy) - sigma_s2_full_num = numeric_kin.sigma_s2_r(r, R, kwargs_mass, kwargs_light, kwargs_anisotropy) - npt.assert_almost_equal(sigma_s2_full_num/sigma_s2_analytic, 1, decimal=2) + sigma_s2_analytic, I_R = analytic_kin.sigma_s2( + r, + R, + {"theta_E": theta_E, "gamma": gamma}, + {"r_eff": r_eff}, + kwargs_anisotropy, + ) + sigma_s2_full_num = numeric_kin.sigma_s2_r( + r, R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) + npt.assert_almost_equal( + sigma_s2_full_num / sigma_s2_analytic, 1, decimal=2 + ) def test_I_R_sigma_s2(self): - light_profile_list = ['HERNQUIST'] + light_profile_list = ["HERNQUIST"] r_eff = 1 Rs = 0.551 * r_eff - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 gamma = 1.95 - kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + kwargs_mass = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' + anisotropy_type = "OM" r_ani = 0.5 - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 4000, 'log_integration': True, - 'max_integrate': 100, 'min_integrate': 0.0001, 'max_light_draw': 50} - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] + + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 4000, + "log_integration": True, + "max_integrate": 100, + "min_integrate": 0.0001, + "max_light_draw": 50, + } + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics) # check whether projected light integral is the same as analytic expression R = 1 - I_R_sigma2, I_R = numeric_kin._I_R_sigma2(R, kwargs_mass, kwargs_light, kwargs_anisotropy) - out = integrate.quad(lambda x: numeric_kin.lightProfile.light_3d(np.sqrt(R ** 2 + x ** 2), kwargs_light), kwargs_numerics['min_integrate'], - np.sqrt(kwargs_numerics['max_integrate']**2 - R**2)) + I_R_sigma2, I_R = numeric_kin._I_R_sigma2( + R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) + out = integrate.quad( + lambda x: numeric_kin.lightProfile.light_3d( + np.sqrt(R**2 + x**2), kwargs_light + ), + kwargs_numerics["min_integrate"], + np.sqrt(kwargs_numerics["max_integrate"] ** 2 - R**2), + ) l_R_quad = out[0] * 2 npt.assert_almost_equal(l_R_quad / I_R, 1, decimal=2) @@ -149,108 +207,165 @@ def test_I_R_sigma_s2(self): def test_log_linear_integral(self): # light profile - light_profile_list = ['HERNQUIST'] - Rs = .5 - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + light_profile_list = ["HERNQUIST"] + Rs = 0.5 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics_linear = {'interpol_grid_num': 2000, 'log_integration': False, - 'max_integrate': 10, 'min_integrate': 0.001} - kwargs_numerics_log = {'interpol_grid_num': 1000, 'log_integration': True, - 'max_integrate': 10, 'min_integrate': 0.001} - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - - numerics_linear = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics_linear) - numerics_log = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics_log) + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] + + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics_linear = { + "interpol_grid_num": 2000, + "log_integration": False, + "max_integrate": 10, + "min_integrate": 0.001, + } + kwargs_numerics_log = { + "interpol_grid_num": 1000, + "log_integration": True, + "max_integrate": 10, + "min_integrate": 0.001, + } + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + + numerics_linear = NumericKinematics( + kwargs_model=kwargs_model, + kwargs_cosmo=kwargs_cosmo, + **kwargs_numerics_linear + ) + numerics_log = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics_log + ) R = np.logspace(-2, 0, 100) lin_I_R = np.zeros_like(R) log_I_R = np.zeros_like(R) for i in range(len(R)): - lin_I_R[i], _ = numerics_linear._I_R_sigma2(R[i], kwargs_profile, kwargs_light, kwargs_anisotropy) - log_I_R[i], _ = numerics_log._I_R_sigma2(R[i], kwargs_profile, kwargs_light, kwargs_anisotropy) - - #import matplotlib.pyplot as plt - #plt.semilogx(R, lin_I_R / log_I_R, 'r', label='lin /log integrate') - #plt.legend() - #plt.show() + lin_I_R[i], _ = numerics_linear._I_R_sigma2( + R[i], kwargs_profile, kwargs_light, kwargs_anisotropy + ) + log_I_R[i], _ = numerics_log._I_R_sigma2( + R[i], kwargs_profile, kwargs_light, kwargs_anisotropy + ) + + # import matplotlib.pyplot as plt + # plt.semilogx(R, lin_I_R / log_I_R, 'r', label='lin /log integrate') + # plt.legend() + # plt.show() R_ = 1 - r_array = np.logspace(start=np.log10(R_+0.001), stop=1, num=100) - integrad_a15 = numerics_linear._integrand_A15(r_array, R_, kwargs_profile, kwargs_light, kwargs_anisotropy) - #plt.loglog(r_array, integrad_a15) - #plt.show() + r_array = np.logspace(start=np.log10(R_ + 0.001), stop=1, num=100) + integrad_a15 = numerics_linear._integrand_A15( + r_array, R_, kwargs_profile, kwargs_light, kwargs_anisotropy + ) + # plt.loglog(r_array, integrad_a15) + # plt.show() for i in range(len(R)): npt.assert_almost_equal(log_I_R[i] / lin_I_R[i], 1, decimal=2) - #assert 1 == 0 + # assert 1 == 0 def test_I_R_sigma(self): - """ - test numerical integral against quad integrator - :return: - """ - light_profile_list = ['HERNQUIST'] - Rs = .5 - kwargs_light = [{'Rs': Rs, 'amp': 1.}] # effective half light radius (2d projected) in arcsec + """Test numerical integral against quad integrator :return:""" + light_profile_list = ["HERNQUIST"] + Rs = 0.5 + kwargs_light = [ + {"Rs": Rs, "amp": 1.0} + ] # effective half light radius (2d projected) in arcsec # 0.551 * # mass profile - mass_profile_list = ['SPP'] + mass_profile_list = ["SPP"] theta_E = 1.2 - gamma = 2. - kwargs_profile = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope + gamma = 2.0 + kwargs_profile = [ + {"theta_E": theta_E, "gamma": gamma} + ] # Einstein radius (arcsec) and power-law slope # anisotropy profile - anisotropy_type = 'OM' - r_ani = 2. - kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec] - - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.0001} - - kwargs_model = {'mass_profile_list': mass_profile_list, - 'light_profile_list': light_profile_list, - 'anisotropy_model': anisotropy_type} - - numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics) + anisotropy_type = "OM" + r_ani = 2.0 + kwargs_anisotropy = {"r_ani": r_ani} # anisotropy radius [arcsec] + + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.0001, + } + + kwargs_model = { + "mass_profile_list": mass_profile_list, + "light_profile_list": light_profile_list, + "anisotropy_model": anisotropy_type, + } + + numerics = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics + ) R = 0.1 - out = integrate.quad(lambda x: numerics._integrand_A15(x, R, kwargs_profile, kwargs_light, kwargs_anisotropy), - R, kwargs_numerics['max_integrate']) - - I_R_sigma_quad = out[0] * 2 * const.G / (const.arcsec * kwargs_cosmo['d_d'] * const.Mpc) - I_R_sigma_numerics_log, _ = numerics._I_R_sigma2(R, kwargs_profile, kwargs_light, kwargs_anisotropy) - - kwargs_numerics_lin = {'interpol_grid_num': 2000, 'log_integration': False, - 'max_integrate': 1000, 'min_integrate': 0.0001} - numerics_lin = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics_lin) - I_R_simga_numerics_lin, _ = numerics_lin._I_R_sigma2(R, kwargs_profile, kwargs_light, kwargs_anisotropy) + out = integrate.quad( + lambda x: numerics._integrand_A15( + x, R, kwargs_profile, kwargs_light, kwargs_anisotropy + ), + R, + kwargs_numerics["max_integrate"], + ) + + I_R_sigma_quad = ( + out[0] * 2 * const.G / (const.arcsec * kwargs_cosmo["d_d"] * const.Mpc) + ) + I_R_sigma_numerics_log, _ = numerics._I_R_sigma2( + R, kwargs_profile, kwargs_light, kwargs_anisotropy + ) + + kwargs_numerics_lin = { + "interpol_grid_num": 2000, + "log_integration": False, + "max_integrate": 1000, + "min_integrate": 0.0001, + } + numerics_lin = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics_lin + ) + I_R_simga_numerics_lin, _ = numerics_lin._I_R_sigma2( + R, kwargs_profile, kwargs_light, kwargs_anisotropy + ) npt.assert_almost_equal(I_R_sigma_numerics_log / I_R_sigma_quad, 1, decimal=2) # We do not test the linear integral as it is not as accurate!!! - #npt.assert_almost_equal(I_R_simga_numerics_lin / I_R_simga_quad, 1, decimal=2) - + # npt.assert_almost_equal(I_R_simga_numerics_lin / I_R_simga_quad, 1, decimal=2) # here we test the interpolation - numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics) + numerics = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics + ) R = 1 - I_R_sigma2_interp, I_R_interp = numerics._I_R_sigma2_interp(R, kwargs_profile, kwargs_light, kwargs_anisotropy) - I_R_sigma2, I_R = numerics._I_R_sigma2(R, kwargs_profile, kwargs_light, kwargs_anisotropy) + I_R_sigma2_interp, I_R_interp = numerics._I_R_sigma2_interp( + R, kwargs_profile, kwargs_light, kwargs_anisotropy + ) + I_R_sigma2, I_R = numerics._I_R_sigma2( + R, kwargs_profile, kwargs_light, kwargs_anisotropy + ) npt.assert_almost_equal(I_R_sigma2_interp / I_R_sigma2, 1, decimal=3) npt.assert_almost_equal(I_R_interp / I_R, 1, decimal=3) @@ -259,29 +374,45 @@ def test_power_law_test(self): # This should result in a constant velocity dispersion as a function of radius, analytically known # set up power-law light profile - light_model = ['POWER_LAW'] - kwargs_light = [{'gamma': 2, 'amp': 1, 'e1': 0, 'e2': 0}] + light_model = ["POWER_LAW"] + kwargs_light = [{"gamma": 2, "amp": 1, "e1": 0, "e2": 0}] - lens_model = ['SIS'] - kwargs_mass = [{'theta_E': 1}] + lens_model = ["SIS"] + kwargs_mass = [{"theta_E": 1}] - anisotropy_type = 'isotropic' + anisotropy_type = "isotropic" kwargs_anisotropy = {} - kwargs_model = {'mass_profile_list': lens_model, - 'light_profile_list': light_model, - 'anisotropy_model': anisotropy_type} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.0001} - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} + kwargs_model = { + "mass_profile_list": lens_model, + "light_profile_list": light_model, + "anisotropy_model": anisotropy_type, + } + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.0001, + } + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} # compute analytic velocity dispersion of SIS profile - v_sigma_c2 = kwargs_mass[0]['theta_E'] * const.arcsec / (4 * np.pi) * kwargs_cosmo['d_s'] / kwargs_cosmo['d_ds'] + v_sigma_c2 = ( + kwargs_mass[0]["theta_E"] + * const.arcsec + / (4 * np.pi) + * kwargs_cosmo["d_s"] + / kwargs_cosmo["d_ds"] + ) v_sigma_true = np.sqrt(v_sigma_c2) * const.c / 1000 - numerics = NumericKinematics(kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics) + numerics = NumericKinematics( + kwargs_model=kwargs_model, kwargs_cosmo=kwargs_cosmo, **kwargs_numerics + ) R = 2 - I_R_sigma2, I_R = numerics._I_R_sigma2(R, kwargs_mass, kwargs_light, kwargs_anisotropy={}) + I_R_sigma2, I_R = numerics._I_R_sigma2( + R, kwargs_mass, kwargs_light, kwargs_anisotropy={} + ) sigma_v = np.sqrt(I_R_sigma2 / I_R) / 1000 print(sigma_v, v_sigma_true) npt.assert_almost_equal(sigma_v / v_sigma_true, 1, decimal=2) @@ -291,13 +422,15 @@ def test_power_law_test(self): sigma_array = np.zeros_like(r_array) for i, R in enumerate(r_array): - I_R_sigma2, I_R = numerics._I_R_sigma2(R, kwargs_mass, kwargs_light, kwargs_anisotropy) + I_R_sigma2, I_R = numerics._I_R_sigma2( + R, kwargs_mass, kwargs_light, kwargs_anisotropy + ) sigma_array[i] = np.sqrt(I_R_sigma2 / I_R) / 1000 - #import matplotlib.pyplot as plt - #plt.semilogx(r_array, sigma_array) - #plt.hlines(v_sigma_true, xmin=r_array[0], xmax=r_array[-1]) - #plt.show() + # import matplotlib.pyplot as plt + # plt.semilogx(r_array, sigma_array) + # plt.hlines(v_sigma_true, xmin=r_array[0], xmax=r_array[-1]) + # plt.show() npt.assert_almost_equal(sigma_array / v_sigma_true, 1, decimal=2) @@ -305,24 +438,32 @@ def test_power_law_test(self): sigma_array = np.zeros_like(r_array) R_test = 0 for i, r in enumerate(r_array): - sigma_s2 = numerics.sigma_s2_r(r, R_test, kwargs_mass, kwargs_light, kwargs_anisotropy) + sigma_s2 = numerics.sigma_s2_r( + r, R_test, kwargs_mass, kwargs_light, kwargs_anisotropy + ) sigma_array[i] = np.sqrt(sigma_s2) / 1000 npt.assert_almost_equal(sigma_array / v_sigma_true, 1, decimal=2) def test_delete_cache(self): - kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800} - kwargs_numerics = {'interpol_grid_num': 2000, 'log_integration': True, - 'max_integrate': 1000, 'min_integrate': 0.0001} - kwargs_model = {'mass_profile_list': [], - 'light_profile_list': [], - 'anisotropy_model': 'const'} + kwargs_cosmo = {"d_d": 1000, "d_s": 1500, "d_ds": 800} + kwargs_numerics = { + "interpol_grid_num": 2000, + "log_integration": True, + "max_integrate": 1000, + "min_integrate": 0.0001, + } + kwargs_model = { + "mass_profile_list": [], + "light_profile_list": [], + "anisotropy_model": "const", + } numeric_kin = NumericKinematics(kwargs_model, kwargs_cosmo, **kwargs_numerics) numeric_kin._interp_jeans_integral = 1 numeric_kin._log_mass_3d = 2 numeric_kin.delete_cache() - assert hasattr(numeric_kin, '_log_mass_3d') is False - assert hasattr(numeric_kin, '_interp_jeans_integral') is False + assert hasattr(numeric_kin, "_log_mass_3d") is False + assert hasattr(numeric_kin, "_interp_jeans_integral") is False -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_GalKin/test_psf.py b/test/test_GalKin/test_psf.py index 1038ee1c7..a0536b4d6 100644 --- a/test/test_GalKin/test_psf.py +++ b/test/test_GalKin/test_psf.py @@ -5,37 +5,35 @@ class TestPSF(object): - def setup_method(self): pass def test_displace_psf(self): - psf = PSF(psf_type='GAUSSIAN', fwhm=1) + psf = PSF(psf_type="GAUSSIAN", fwhm=1) np.random.seed(41) x, y = psf.displace_psf(0, 0) assert x != 0 assert y != 0 - psf = PSF(psf_type='MOFFAT', fwhm=1, moffat_beta=2.6) + psf = PSF(psf_type="MOFFAT", fwhm=1, moffat_beta=2.6) np.random.seed(41) x, y = psf.displace_psf(0, 0) assert x != 0 assert y != 0 def test_kernel(self): - psf = PSF(psf_type='GAUSSIAN', fwhm=1) + psf = PSF(psf_type="GAUSSIAN", fwhm=1) kernel = psf.convolution_kernel(delta_pix=0.3, num_pix=21) npt.assert_almost_equal(np.sum(kernel), 1, decimal=5) - psf = PSF(psf_type='MOFFAT', fwhm=1, moffat_beta=2.6) + psf = PSF(psf_type="MOFFAT", fwhm=1, moffat_beta=2.6) kernel = psf.convolution_kernel(delta_pix=0.3, num_pix=21) npt.assert_almost_equal(np.sum(kernel), 1, decimal=5) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - psf = PSF(psf_type='BRRR', fwhm=1, moffat_beta=2.6) - #psf.displace_psf(0, 0) + psf = PSF(psf_type="BRRR", fwhm=1, moffat_beta=2.6) + # psf.displace_psf(0, 0) diff --git a/test/test_GalKin/test_velocity_util.py b/test/test_GalKin/test_velocity_util.py index f2aa18954..ecd6eb0df 100644 --- a/test/test_GalKin/test_velocity_util.py +++ b/test/test_GalKin/test_velocity_util.py @@ -5,9 +5,7 @@ class TestVelocityUtil(object): - """ - - """ + """""" def setup_method(self): pass @@ -41,11 +39,11 @@ def test_sample_moffat(self): x_ = x_array[1:] - x_array[1] + x_array[0] f_moffat = velocity_util.moffat_r(x_, alpha=alpha, beta=beta) * x_ - #import matplotlib.pyplot as plt - #plt.plot(x_, f_moffat, label='moffat') - #plt.plot(x_, r_hist, label='sampled') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # plt.plot(x_, f_moffat, label='moffat') + # plt.plot(x_, r_hist, label='sampled') + # plt.legend() + # plt.show() npt.assert_almost_equal(r_hist, f_moffat, decimal=1) def test_displace_PSF_moffat(self): @@ -66,14 +64,14 @@ def test_project_2d_random(self): r = np.ones(num) R, x, y = velocity_util.project2d_random(r=r) assert len(R) == num - #import matplotlib.pyplot as plt - #bins = np.linspace(0., 1, 100) - #hist, bins_hist = np.histogram(R, bins=bins, density=True) - #bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. - #plt.plot(bins_plot, hist, label='regular') - #plt.legend() - #plt.show() + # import matplotlib.pyplot as plt + # bins = np.linspace(0., 1, 100) + # hist, bins_hist = np.histogram(R, bins=bins, density=True) + # bins_plot = (bins_hist[1:] + bins_hist[:-1]) / 2. + # plt.plot(bins_plot, hist, label='regular') + # plt.legend() + # plt.show() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_MultiBand/test_joint_linear.py b/test/test_ImSim/test_MultiBand/test_joint_linear.py index 3ca241c6f..20c9a4204 100644 --- a/test/test_ImSim/test_MultiBand/test_joint_linear.py +++ b/test/test_ImSim/test_MultiBand/test_joint_linear.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -17,13 +17,11 @@ class TestJointLinear(object): - """ - tests the source model routines - """ - def setup_method(self): + """Tests the source model routines.""" + def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -31,68 +29,135 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": fwhm, "truncation": 5} psf_class = PSF(**kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0001, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': True} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0001, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics = {"supersampling_factor": 2, "supersampling_convolution": True} + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim self.solver = LensEquationSolver(lensModel=lens_model_class) - multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics], [kwargs_data, kwargs_psf, kwargs_numerics]] - kwargs_model = {'lens_model_list': lens_model_list, 'source_light_model_list': source_model_list, - 'point_source_model_list': ['SOURCE_POSITION'], 'fixed_magnification_list': [True], - 'lens_light_model_list': lens_light_model_list} + multi_band_list = [ + [kwargs_data, kwargs_psf, kwargs_numerics], + [kwargs_data, kwargs_psf, kwargs_numerics], + ] + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "point_source_model_list": ["SOURCE_POSITION"], + "fixed_magnification_list": [True], + "lens_light_model_list": lens_light_model_list, + } self.imageModel = JointLinear(multi_band_list, kwargs_model) def test_linear_response(self): - A = self.imageModel.linear_response_matrix(kwargs_lens=self.kwargs_lens, kwargs_source=self.kwargs_source, - kwargs_lens_light=self.kwargs_lens_light, kwargs_ps=self.kwargs_ps) + A = self.imageModel.linear_response_matrix( + kwargs_lens=self.kwargs_lens, + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + kwargs_ps=self.kwargs_ps, + ) nx, ny = np.shape(A) assert nx == 3 assert ny == 100**2 * 2 def test_image_linear_solve(self): - wls_list, model_error_list, cov_param, param = self.imageModel.image_linear_solve(self.kwargs_lens, - self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, inv_bool=False) + ( + wls_list, + model_error_list, + cov_param, + param, + ) = self.imageModel.image_linear_solve( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + inv_bool=False, + ) assert len(wls_list) == 2 def test_likelihood_data_given_model(self): - logL = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, source_marg=False) + logL = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=False, + ) chi2_reduced = logL * 2 / self.imageModel.num_data_evaluate npt.assert_almost_equal(chi2_reduced, -1, 1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_MultiBand/test_multi_linear.py b/test/test_ImSim/test_MultiBand/test_multi_linear.py index b0285a983..401e934ac 100644 --- a/test/test_ImSim/test_MultiBand/test_multi_linear.py +++ b/test/test_ImSim/test_MultiBand/test_multi_linear.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import pytest @@ -17,13 +17,11 @@ class TestImageModel(object): - """ - tests the source model routines - """ - def setup_method(self): + """Tests the source model routines.""" + def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -31,63 +29,137 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**kwargs_psf) # 'EXTERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0001, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': True} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0001, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics = {"supersampling_factor": 2, "supersampling_convolution": True} + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim self.solver = LensEquationSolver(lensModel=lens_model_class) multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics]] - kwargs_model = {'lens_model_list': lens_model_list, 'source_light_model_list': source_model_list, - 'point_source_model_list': ['SOURCE_POSITION'], 'fixed_magnification_list': [True]} - self.imageModel = MultiLinear(multi_band_list, kwargs_model, likelihood_mask_list=None, compute_bool=None) + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "point_source_model_list": ["SOURCE_POSITION"], + "fixed_magnification_list": [True], + } + self.imageModel = MultiLinear( + multi_band_list, kwargs_model, likelihood_mask_list=None, compute_bool=None + ) def test_image_linear_solve(self): - model, error_map, cov_param, param = self.imageModel.image_linear_solve(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, inv_bool=False) - chi2_reduced = self.imageModel._imageModel_list[0].reduced_chi2(model[0], error_map[0]) + model, error_map, cov_param, param = self.imageModel.image_linear_solve( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + inv_bool=False, + ) + chi2_reduced = self.imageModel._imageModel_list[0].reduced_chi2( + model[0], error_map[0] + ) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) - chi2_reduced_list = self.imageModel.reduced_residuals(model_list=model, error_map_list=error_map) - npt.assert_almost_equal(np.sum(chi2_reduced_list[0]**2)/(100**2), 1, decimal=1) + chi2_reduced_list = self.imageModel.reduced_residuals( + model_list=model, error_map_list=error_map + ) + npt.assert_almost_equal( + np.sum(chi2_reduced_list[0] ** 2) / (100**2), 1, decimal=1 + ) def test_likelihood_data_given_model(self): - logL = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, source_marg=False) + logL = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=False, + ) npt.assert_almost_equal(logL, -5100, decimal=-3) - logLmarg = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, - self.kwargs_ps, source_marg=True) + logLmarg = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=True, + ) npt.assert_almost_equal(logL - logLmarg, 0, decimal=-2) def test_numData_evaluate(self): @@ -95,5 +167,5 @@ def test_numData_evaluate(self): assert numData == 10000 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_MultiBand/test_single_band_multi_model.py b/test/test_ImSim/test_MultiBand/test_single_band_multi_model.py index 298fb4341..98369f062 100644 --- a/test/test_ImSim/test_MultiBand/test_single_band_multi_model.py +++ b/test/test_ImSim/test_MultiBand/test_single_band_multi_model.py @@ -14,7 +14,6 @@ class TestSingleBandMultiModel(object): - def setup_method(self): # data specifics sigma_bkg = 0.05 # background noise per pixel @@ -25,69 +24,113 @@ def setup_method(self): # PSF specification - kwargs_data = simulation_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = simulation_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": fwhm, "pixel_size": deltaPix} psf_class = PSF(**kwargs_psf) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} - - lens_model_list = ['SPEP'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } + + lens_model_list = ["SPEP"] kwargs_lens = [kwargs_spemd] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, - 'e1': 0.1, 'e2': 0.1} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } + + lens_light_model_list = ["SERSIC"] kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) # Point Source - point_source_model_list = ['UNLENSED'] - kwargs_ps = [{'ra_image': [0.4], 'dec_image': [-0.2], 'point_amp': [2]}] + point_source_model_list = ["UNLENSED"] + kwargs_ps = [{"ra_image": [0.4], "dec_image": [-0.2], "point_amp": [2]}] point_source_class = PointSource(point_source_type_list=point_source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False, 'compute_mode': 'regular'} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, point_source_class=point_source_class, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + "compute_mode": "regular", + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class=point_source_class, + kwargs_numerics=kwargs_numerics, + ) self.imageModel = imageModel - image_sim = simulation_util.simulate_simple(imageModel, kwargs_lens, kwargs_source, kwargs_lens_light, - kwargs_ps) + image_sim = simulation_util.simulate_simple( + imageModel, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim self.multi_band_list = [[kwargs_data, kwargs_psf, kwargs_numerics]] - self.kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': point_source_model_list, - 'fixed_magnification_list': [False], - 'index_lens_model_list': [[0]], - 'index_lens_light_model_list': [[0]], - 'index_source_light_model_list': [[0]], - 'index_point_source_model_list': [[0]], - 'point_source_frame_list': [[0]] - } - - self.kwargs_params = {'kwargs_lens': kwargs_lens, 'kwargs_source': kwargs_source, - 'kwargs_lens_light': kwargs_lens_light, 'kwargs_ps': kwargs_ps} - - self.single_band = SingleBandMultiModel(multi_band_list=self.multi_band_list, kwargs_model=self.kwargs_model, - linear_solver=True) - self.single_band_no_linear = SingleBandMultiModel(multi_band_list=self.multi_band_list, - kwargs_model=self.kwargs_model, - linear_solver=False) + self.kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": point_source_model_list, + "fixed_magnification_list": [False], + "index_lens_model_list": [[0]], + "index_lens_light_model_list": [[0]], + "index_source_light_model_list": [[0]], + "index_point_source_model_list": [[0]], + "point_source_frame_list": [[0]], + } + + self.kwargs_params = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + "kwargs_ps": kwargs_ps, + } + + self.single_band = SingleBandMultiModel( + multi_band_list=self.multi_band_list, + kwargs_model=self.kwargs_model, + linear_solver=True, + ) + self.single_band_no_linear = SingleBandMultiModel( + multi_band_list=self.multi_band_list, + kwargs_model=self.kwargs_model, + linear_solver=False, + ) def test_likelihood_data_given_model(self): - logl = self.single_band.likelihood_data_given_model(**self.kwargs_params) - logl_no_linear = self.single_band_no_linear.likelihood_data_given_model(**self.kwargs_params) + logl_no_linear = self.single_band_no_linear.likelihood_data_given_model( + **self.kwargs_params + ) npt.assert_almost_equal(logl / logl_no_linear, 1, decimal=4) def test_num_param_linear(self): @@ -102,40 +145,64 @@ def test_image(self): npt.assert_almost_equal(image, image_) def test_source_surface_brightness(self): - image = self.single_band.source_surface_brightness(kwargs_source=self.kwargs_params['kwargs_source'], - kwargs_lens=self.kwargs_params['kwargs_lens']) - image_ = self.imageModel.source_surface_brightness(kwargs_source=self.kwargs_params['kwargs_source'], - kwargs_lens=self.kwargs_params['kwargs_lens']) + image = self.single_band.source_surface_brightness( + kwargs_source=self.kwargs_params["kwargs_source"], + kwargs_lens=self.kwargs_params["kwargs_lens"], + ) + image_ = self.imageModel.source_surface_brightness( + kwargs_source=self.kwargs_params["kwargs_source"], + kwargs_lens=self.kwargs_params["kwargs_lens"], + ) npt.assert_almost_equal(image, image_) def test_lens_surface_brightness(self): - image = self.single_band.lens_surface_brightness(kwargs_lens_light=self.kwargs_params['kwargs_lens_light']) - image_ = self.imageModel.lens_surface_brightness(kwargs_lens_light=self.kwargs_params['kwargs_lens_light']) + image = self.single_band.lens_surface_brightness( + kwargs_lens_light=self.kwargs_params["kwargs_lens_light"] + ) + image_ = self.imageModel.lens_surface_brightness( + kwargs_lens_light=self.kwargs_params["kwargs_lens_light"] + ) npt.assert_almost_equal(image, image_) def test_point_source(self): - image = self.single_band.point_source(kwargs_lens=self.kwargs_params['kwargs_lens'], - kwargs_ps=self.kwargs_params['kwargs_ps']) - image_ = self.imageModel.point_source(kwargs_lens=self.kwargs_params['kwargs_lens'], - kwargs_ps=self.kwargs_params['kwargs_ps']) + image = self.single_band.point_source( + kwargs_lens=self.kwargs_params["kwargs_lens"], + kwargs_ps=self.kwargs_params["kwargs_ps"], + ) + image_ = self.imageModel.point_source( + kwargs_lens=self.kwargs_params["kwargs_lens"], + kwargs_ps=self.kwargs_params["kwargs_ps"], + ) npt.assert_almost_equal(image, image_) def test_update_linear_kwargs(self): - num = self.single_band.num_param_linear(self.kwargs_params['kwargs_lens'], self.kwargs_params['kwargs_source'], - self.kwargs_params['kwargs_lens_light'], - self.kwargs_params['kwargs_ps']) + num = self.single_band.num_param_linear( + self.kwargs_params["kwargs_lens"], + self.kwargs_params["kwargs_source"], + self.kwargs_params["kwargs_lens_light"], + self.kwargs_params["kwargs_ps"], + ) param = np.ones(num) * 10 - kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps = self.single_band.update_linear_kwargs(param, - kwargs_lens=self.kwargs_params['kwargs_lens'], - kwargs_source=self.kwargs_params['kwargs_ps'], - kwargs_lens_light=self.kwargs_params['kwargs_lens_light'], - kwargs_ps=self.kwargs_params['kwargs_ps']) - assert kwargs_source[0]['amp'] == 10 + ( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + ) = self.single_band.update_linear_kwargs( + param, + kwargs_lens=self.kwargs_params["kwargs_lens"], + kwargs_source=self.kwargs_params["kwargs_ps"], + kwargs_lens_light=self.kwargs_params["kwargs_lens_light"], + kwargs_ps=self.kwargs_params["kwargs_ps"], + ) + assert kwargs_source[0]["amp"] == 10 def test_extinction_map(self): - extinction_map = self.single_band.extinction_map(kwargs_extinction=None, kwargs_special=None) + extinction_map = self.single_band.extinction_map( + kwargs_extinction=None, kwargs_special=None + ) npt.assert_almost_equal(extinction_map, 1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_adaptive_numerics.py b/test/test_ImSim/test_Numerics/test_adaptive_numerics.py index a0d7126b0..d5084e17f 100644 --- a/test/test_ImSim/test_Numerics/test_adaptive_numerics.py +++ b/test/test_ImSim/test_Numerics/test_adaptive_numerics.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -10,53 +10,78 @@ class TestAdaptiveConvolution(object): - def setup_method(self): self.supersampling_factor = 3 - lightModel = LightModel(light_model_list=['GAUSSIAN']) - self.delta_pix = 1. + lightModel = LightModel(light_model_list=["GAUSSIAN"]) + self.delta_pix = 1.0 x, y = util.make_grid(20, deltapix=self.delta_pix) - x_sub, y_sub = util.make_grid(20*self.supersampling_factor, deltapix=self.delta_pix/self.supersampling_factor) - kwargs = [{'amp': 1, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + x_sub, y_sub = util.make_grid( + 20 * self.supersampling_factor, + deltapix=self.delta_pix / self.supersampling_factor, + ) + kwargs = [{"amp": 1, "sigma": 2, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) flux_sub = lightModel.surface_brightness(x_sub, y_sub, kwargs) self.model_sub = util.array2image(flux_sub) x, y = util.make_grid(5, deltapix=self.delta_pix) - kwargs_kernel = [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}] + kwargs_kernel = [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}] kernel = lightModel.surface_brightness(x, y, kwargs_kernel) self.kernel = util.array2image(kernel) / np.sum(kernel) - x_sub, y_sub = util.make_grid(5*self.supersampling_factor, deltapix=self.delta_pix/self.supersampling_factor) + x_sub, y_sub = util.make_grid( + 5 * self.supersampling_factor, + deltapix=self.delta_pix / self.supersampling_factor, + ) kernel_sub = lightModel.surface_brightness(x_sub, y_sub, kwargs_kernel) self.kernel_sub = util.array2image(kernel_sub) / np.sum(kernel_sub) def test_convolve2d(self): - #kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) - subgrid_conv = SubgridKernelConvolution(self.kernel_sub, self.supersampling_factor, supersampling_kernel_size=None, convolution_type='fft') + # kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) + subgrid_conv = SubgridKernelConvolution( + self.kernel_sub, + self.supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft", + ) model_subgrid_conv = subgrid_conv.convolution2d(self.model_sub) conv_supersample_pixels = np.zeros_like(self.model) conv_supersample_pixels = np.array(conv_supersample_pixels, dtype=bool) - conv_supersample_pixels[self.model > np.max(self.model)/20] = True - adaptive_conv = AdaptiveConvolution(self.kernel_sub, self.supersampling_factor, conv_supersample_pixels, supersampling_kernel_size=5, compute_pixels=None) + conv_supersample_pixels[self.model > np.max(self.model) / 20] = True + adaptive_conv = AdaptiveConvolution( + self.kernel_sub, + self.supersampling_factor, + conv_supersample_pixels, + supersampling_kernel_size=5, + compute_pixels=None, + ) model_adaptive_conv = adaptive_conv.convolve2d(self.model_sub) - npt.assert_almost_equal(np.sum(model_subgrid_conv), np.sum(model_adaptive_conv), decimal=2) + npt.assert_almost_equal( + np.sum(model_subgrid_conv), np.sum(model_adaptive_conv), decimal=2 + ) npt.assert_almost_equal(model_subgrid_conv, model_adaptive_conv, decimal=2) conv_supersample_pixels = np.zeros_like(self.model) conv_supersample_pixels = np.array(conv_supersample_pixels, dtype=bool) conv_supersample_pixels[self.model > np.max(self.model) / 2] = True - adaptive_conv = AdaptiveConvolution(self.kernel_sub, self.supersampling_factor, conv_supersample_pixels, - supersampling_kernel_size=1, compute_pixels=None) + adaptive_conv = AdaptiveConvolution( + self.kernel_sub, + self.supersampling_factor, + conv_supersample_pixels, + supersampling_kernel_size=1, + compute_pixels=None, + ) model_adaptive_conv = adaptive_conv.convolve2d(self.model_sub) - npt.assert_almost_equal(np.sum(model_subgrid_conv), np.sum(model_adaptive_conv), decimal=2) + npt.assert_almost_equal( + np.sum(model_subgrid_conv), np.sum(model_adaptive_conv), decimal=2 + ) npt.assert_almost_equal(model_subgrid_conv, model_adaptive_conv, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_convolution.py b/test/test_ImSim/test_Numerics/test_convolution.py index 5407a7e29..31e7f7c4c 100644 --- a/test/test_ImSim/test_Numerics/test_convolution.py +++ b/test/test_ImSim/test_Numerics/test_convolution.py @@ -1,21 +1,24 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt -from lenstronomy.ImSim.Numerics.convolution import MultiGaussianConvolution, PixelKernelConvolution, \ - SubgridKernelConvolution, MGEConvolution +from lenstronomy.ImSim.Numerics.convolution import ( + MultiGaussianConvolution, + PixelKernelConvolution, + SubgridKernelConvolution, + MGEConvolution, +) from lenstronomy.LightModel.light_model import LightModel import lenstronomy.Util.util as util import pytest class TestPixelKernelConvolution(object): - def setup_method(self): - lightModel = LightModel(light_model_list=['GAUSSIAN']) + lightModel = LightModel(light_model_list=["GAUSSIAN"]) self.delta_pix = 1 x, y = util.make_grid(10, deltapix=self.delta_pix) - kwargs = [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}] + kwargs = [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) @@ -44,110 +47,156 @@ def test_pixel_kernel(self): class TestSubgridKernelConvolution(object): - def setup_method(self): self.supersampling_factor = 3 - lightModel = LightModel(light_model_list=['GAUSSIAN']) - self.delta_pix = 1. + lightModel = LightModel(light_model_list=["GAUSSIAN"]) + self.delta_pix = 1.0 x, y = util.make_grid(20, deltapix=self.delta_pix) - x_sub, y_sub = util.make_grid(20*self.supersampling_factor, deltapix=self.delta_pix/self.supersampling_factor) - kwargs = [{'amp': 1, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + x_sub, y_sub = util.make_grid( + 20 * self.supersampling_factor, + deltapix=self.delta_pix / self.supersampling_factor, + ) + kwargs = [{"amp": 1, "sigma": 2, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) flux_sub = lightModel.surface_brightness(x_sub, y_sub, kwargs) self.model_sub = util.array2image(flux_sub) x, y = util.make_grid(5, deltapix=self.delta_pix) - kwargs_kernel = [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}] + kwargs_kernel = [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}] kernel = lightModel.surface_brightness(x, y, kwargs_kernel) self.kernel = util.array2image(kernel) / np.sum(kernel) - x_sub, y_sub = util.make_grid(5*self.supersampling_factor, deltapix=self.delta_pix/self.supersampling_factor) + x_sub, y_sub = util.make_grid( + 5 * self.supersampling_factor, + deltapix=self.delta_pix / self.supersampling_factor, + ) kernel_sub = lightModel.surface_brightness(x_sub, y_sub, kwargs_kernel) self.kernel_sub = util.array2image(kernel_sub) / np.sum(kernel_sub) def test_fft_scipy_static(self): - supersampling_factor = 2 - conv_sicpy = SubgridKernelConvolution(self.kernel, supersampling_factor, supersampling_kernel_size=None, - convolution_type='fft') - - conv_static = SubgridKernelConvolution(self.kernel, supersampling_factor, supersampling_kernel_size=None, - convolution_type='fft_static') + conv_sicpy = SubgridKernelConvolution( + self.kernel, + supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft", + ) + + conv_static = SubgridKernelConvolution( + self.kernel, + supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft_static", + ) model_conv_scipy = conv_sicpy.convolution2d(self.model) model_conv_static = conv_static.convolution2d(self.model) npt.assert_almost_equal(model_conv_static, model_conv_scipy, decimal=3) def test_convolve2d(self): - #kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) - subgrid_conv = SubgridKernelConvolution(self.kernel_sub, self.supersampling_factor, supersampling_kernel_size=None, convolution_type='fft') + # kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) + subgrid_conv = SubgridKernelConvolution( + self.kernel_sub, + self.supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft", + ) model_subgrid_conv = subgrid_conv.convolution2d(self.model_sub) supersampling_factor = 1 - conv = SubgridKernelConvolution(self.kernel, supersampling_factor, supersampling_kernel_size=None, - convolution_type='fft') + conv = SubgridKernelConvolution( + self.kernel, + supersampling_factor, + supersampling_kernel_size=None, + convolution_type="fft", + ) model_conv = conv.convolution2d(self.model) - npt.assert_almost_equal(np.sum(model_subgrid_conv), np.sum(model_conv), decimal=1) + npt.assert_almost_equal( + np.sum(model_subgrid_conv), np.sum(model_conv), decimal=1 + ) npt.assert_almost_equal(model_subgrid_conv, model_conv, decimal=1) - - #kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) - subgrid_conv_split = SubgridKernelConvolution(self.kernel_sub, self.supersampling_factor, supersampling_kernel_size=5, - convolution_type='fft') + # kernel_supersampled = kernel_util.subgrid_kernel(self.kernel, self.supersampling_factor, odd=True, num_iter=5) + subgrid_conv_split = SubgridKernelConvolution( + self.kernel_sub, + self.supersampling_factor, + supersampling_kernel_size=5, + convolution_type="fft", + ) model_subgrid_conv_split = subgrid_conv_split.convolution2d(self.model_sub) - npt.assert_almost_equal(np.sum(model_subgrid_conv), np.sum(model_subgrid_conv_split), decimal=8) + npt.assert_almost_equal( + np.sum(model_subgrid_conv), np.sum(model_subgrid_conv_split), decimal=8 + ) npt.assert_almost_equal(model_subgrid_conv, model_subgrid_conv_split, decimal=8) - subgrid_conv_split = SubgridKernelConvolution(self.kernel_sub, self.supersampling_factor, supersampling_kernel_size=3, - convolution_type='fft') + subgrid_conv_split = SubgridKernelConvolution( + self.kernel_sub, + self.supersampling_factor, + supersampling_kernel_size=3, + convolution_type="fft", + ) model_subgrid_conv_split = subgrid_conv_split.convolution2d(self.model_sub) - npt.assert_almost_equal(np.sum(model_subgrid_conv), np.sum(model_subgrid_conv_split), decimal=5) + npt.assert_almost_equal( + np.sum(model_subgrid_conv), np.sum(model_subgrid_conv_split), decimal=5 + ) npt.assert_almost_equal(model_subgrid_conv, model_subgrid_conv_split, decimal=3) class TestMultiGaussianConvolution(object): - def setup_method(self): - lightModel = LightModel(light_model_list=['GAUSSIAN']) + lightModel = LightModel(light_model_list=["GAUSSIAN"]) self.delta_pix = 1 x, y = util.make_grid(10, deltapix=self.delta_pix) - kwargs = [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}] + kwargs = [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) def test_convolve2d(self): sigma_list = [0.5, 1, 2] fraction_list = [0.5, 0.2, 0.3] - mge_conv = MultiGaussianConvolution(sigma_list=sigma_list, fraction_list=fraction_list, pixel_scale=self.delta_pix) + mge_conv = MultiGaussianConvolution( + sigma_list=sigma_list, + fraction_list=fraction_list, + pixel_scale=self.delta_pix, + ) image_convolved = mge_conv.convolution2d(self.model) npt.assert_almost_equal(np.sum(image_convolved), np.sum(self.model), decimal=2) class TestMGEConvolution(object): - def setup_method(self): - lightModel = LightModel(light_model_list=['GAUSSIAN']) + lightModel = LightModel(light_model_list=["GAUSSIAN"]) self.delta_pix = 1 x, y = util.make_grid(10, deltapix=self.delta_pix) - kwargs = [{'amp': 1, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + kwargs = [{"amp": 1, "sigma": 2, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) def test_convolve2d(self): - sigma_list = [2, 3, 4] fraction_list = [0.5, 0.2, 0.3] - mg_conv = MultiGaussianConvolution(sigma_list=sigma_list, fraction_list=fraction_list, pixel_scale=self.delta_pix) + mg_conv = MultiGaussianConvolution( + sigma_list=sigma_list, + fraction_list=fraction_list, + pixel_scale=self.delta_pix, + ) pixel_kernel = mg_conv.pixel_kernel(num_pix=11) mge_conv = MGEConvolution(pixel_kernel, pixel_scale=self.delta_pix, order=20) image_conv_mg = mg_conv.convolution2d(self.model) image_conv_mge = mge_conv.convolution2d(self.model) - npt.assert_almost_equal(image_conv_mge/np.max(image_conv_mg), image_conv_mg/np.max(image_conv_mg), decimal=2) + npt.assert_almost_equal( + image_conv_mge / np.max(image_conv_mg), + image_conv_mg / np.max(image_conv_mg), + decimal=2, + ) diff_kernel = mge_conv.kernel_difference() - npt.assert_almost_equal(diff_kernel, pixel_kernel - mge_conv._mge_conv.pixel_kernel(len(pixel_kernel))) + npt.assert_almost_equal( + diff_kernel, + pixel_kernel - mge_conv._mge_conv.pixel_kernel(len(pixel_kernel)), + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_grid.py b/test/test_ImSim/test_Numerics/test_grid.py index b8281fe82..4e4c21fc0 100644 --- a/test/test_ImSim/test_Numerics/test_grid.py +++ b/test/test_ImSim/test_Numerics/test_grid.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -11,9 +11,8 @@ class TestAdaptiveGrid(object): - def setup_method(self): - deltaPix = 1. + deltaPix = 1.0 transform_pix2angle = np.array([[1, 0], [0, 1]]) * deltaPix ra_at_xy_0, dec_at_xy_0 = -5, -5 nx, ny = 11, 11 @@ -23,11 +22,19 @@ def setup_method(self): supersampling_indexes[5, 5] = True self._supersampling_indexes = supersampling_indexes self.nx, self.ny = nx, ny - self._adaptive_grid = AdaptiveGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_indexes, self._supersampling_factor) + self._adaptive_grid = AdaptiveGrid( + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampling_indexes, + self._supersampling_factor, + ) def test_coordinates_evaluate(self): x_grid, y_grid = self._adaptive_grid.coordinates_evaluate - print(np.shape(x_grid), 'test shape') + print(np.shape(x_grid), "test shape") assert len(x_grid) == self._supersampling_factor**2 + self.nx * self.ny - 1 def test_subpixel_coordinates(self): @@ -40,8 +47,8 @@ def test_subpixel_coordinates(self): def test_average_subgrid(self): subpixel_x, subpixel_y = self._adaptive_grid._high_res_coordinates - model = LightModel(light_model_list=['GAUSSIAN']) - kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}] + model = LightModel(light_model_list=["GAUSSIAN"]) + kwargs_light = [{"center_x": 0, "center_y": 0, "sigma": 1, "amp": 1}] subgrid_values = model.surface_brightness(subpixel_x, subpixel_y, kwargs_light) supersampled_values = self._adaptive_grid._average_subgrid(subgrid_values) assert len(supersampled_values) == 1 @@ -49,39 +56,49 @@ def test_average_subgrid(self): def test_merge_low_high_res(self): subpixel_x, subpixel_y = self._adaptive_grid._high_res_coordinates x, y = self._adaptive_grid._x_low_res, self._adaptive_grid._x_low_res - model = LightModel(light_model_list=['GAUSSIAN']) - kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}] + model = LightModel(light_model_list=["GAUSSIAN"]) + kwargs_light = [{"center_x": 0, "center_y": 0, "sigma": 1, "amp": 1}] subgrid_values = model.surface_brightness(subpixel_x, subpixel_y, kwargs_light) image1d = model.surface_brightness(x, y, kwargs_light) image_added = self._adaptive_grid._merge_low_high_res(image1d, subgrid_values) added_array = util.image2array(image_added) supersampled_values = self._adaptive_grid._average_subgrid(subgrid_values) - assert added_array[util.image2array(self._supersampling_indexes)] == supersampled_values + assert ( + added_array[util.image2array(self._supersampling_indexes)] + == supersampled_values + ) image_high_res = self._adaptive_grid._high_res_image(subgrid_values) assert len(image_high_res) == self.nx * self._supersampling_factor def test_flux_array2image_low_high(self): x, y = self._adaptive_grid.coordinates_evaluate - model = LightModel(light_model_list=['GAUSSIAN']) - kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}] + model = LightModel(light_model_list=["GAUSSIAN"]) + kwargs_light = [{"center_x": 0, "center_y": 0, "sigma": 1, "amp": 1}] flux_values = model.surface_brightness(x, y, kwargs_light) - image_low_res, image_high_res = self._adaptive_grid.flux_array2image_low_high(flux_values) + image_low_res, image_high_res = self._adaptive_grid.flux_array2image_low_high( + flux_values + ) assert len(image_high_res) == self.nx * self._supersampling_factor class TestRegularGrid(object): - def setup_method(self): - self._deltaPix = 1. + self._deltaPix = 1.0 transform_pix2angle = np.array([[1, 0], [0, 1]]) * self._deltaPix ra_at_xy_0, dec_at_xy_0 = -5, -5 nx, ny = 11, 11 self._supersampling_factor = 4 self.nx, self.ny = nx, ny - self._regular_grid = RegularGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, - supersampling_factor=self._supersampling_factor) + self._regular_grid = RegularGrid( + nx, + ny, + transform_pix2angle, + ra_at_xy_0, + dec_at_xy_0, + supersampling_factor=self._supersampling_factor, + ) def test_grid_points_spacing(self): deltaPix = self._regular_grid.grid_points_spacing @@ -96,5 +113,6 @@ def test_supersampling_factor(self): ssf = self._regular_grid.supersampling_factor assert ssf == self._supersampling_factor -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_numerics.py b/test/test_ImSim/test_Numerics/test_numerics.py index b47aac907..252f777db 100644 --- a/test/test_ImSim/test_Numerics/test_numerics.py +++ b/test/test_ImSim/test_Numerics/test_numerics.py @@ -9,22 +9,44 @@ class TestNumerics(object): - def setup_method(self): - # we define a model consisting of a singe Sersric profile from lenstronomy.LightModel.light_model import LightModel - light_model_list = ['SERSIC_ELLIPSE'] + + light_model_list = ["SERSIC_ELLIPSE"] self.lightModel = LightModel(light_model_list=light_model_list) self.kwargs_light = [ - {'amp': 100, 'R_sersic': 0.5, 'n_sersic': 3, 'e1': 0, 'e2': 0, 'center_x': 0.02, 'center_y': 0}] + { + "amp": 100, + "R_sersic": 0.5, + "n_sersic": 3, + "e1": 0, + "e2": 0, + "center_x": 0.02, + "center_y": 0, + } + ] # we define a pixel grid and a higher resolution super sampling factor self._supersampling_factor = 5 numPix = 61 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) - x, y, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix=numPix, deltapix=deltaPix, subgrid_res=1, left_lower=False, inverse=False) + ( + x, + y, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix=numPix, + deltapix=deltaPix, + subgrid_res=1, + left_lower=False, + inverse=False, + ) flux = self.lightModel.surface_brightness(x, y, kwargs_list=self.kwargs_light) flux = util.array2image(flux) flux_max = np.max(flux) @@ -33,188 +55,305 @@ def setup_method(self): self._conv_pixels_partial = conv_pixels_partial # high resolution ray-tracing and high resolution convolution, the full calculation - self.kwargs_numerics_true = {'supersampling_factor': self._supersampling_factor, - # super sampling factor of (partial) high resolution ray-tracing - 'compute_mode': 'regular', # 'regular' or 'adaptive' - 'supersampling_convolution': True, - # bool, if True, performs the supersampled convolution (either on regular or adaptive grid) - 'supersampling_kernel_size': None, - # size of the higher resolution kernel region (can be smaller than the original kernel). None leads to use the full size - 'flux_evaluate_indexes': None, # bool mask, if None, it will evaluate all (sub) pixels - 'supersampled_indexes': None, - # bool mask of pixels to be computed in supersampled grid (only for adaptive mode) - 'compute_indexes': None, - # bool mask of pixels to be computed the PSF response (flux being added to). Only used for adaptive mode and can be set =likelihood mask. - 'point_source_supersampling_factor': 1, - # int, supersampling factor when rendering a point source (not used in this script) - } + self.kwargs_numerics_true = { + "supersampling_factor": self._supersampling_factor, + # super sampling factor of (partial) high resolution ray-tracing + "compute_mode": "regular", # 'regular' or 'adaptive' + "supersampling_convolution": True, + # bool, if True, performs the supersampled convolution (either on regular or adaptive grid) + "supersampling_kernel_size": None, + # size of the higher resolution kernel region (can be smaller than the original kernel). None leads to use the full size + "flux_evaluate_indexes": None, # bool mask, if None, it will evaluate all (sub) pixels + "supersampled_indexes": None, + # bool mask of pixels to be computed in supersampled grid (only for adaptive mode) + "compute_indexes": None, + # bool mask of pixels to be computed the PSF response (flux being added to). Only used for adaptive mode and can be set =likelihood mask. + "point_source_supersampling_factor": 1, + # int, supersampling factor when rendering a point source (not used in this script) + } # high resolution convolution on a smaller PSF with low resolution convolution on the edges of the PSF and high resolution ray tracing - self.kwargs_numerics_high_res_narrow = {'supersampling_factor': self._supersampling_factor, - 'compute_mode': 'regular', - 'supersampling_convolution': True, - 'supersampling_kernel_size': 5, - } + self.kwargs_numerics_high_res_narrow = { + "supersampling_factor": self._supersampling_factor, + "compute_mode": "regular", + "supersampling_convolution": True, + "supersampling_kernel_size": 5, + } # low resolution convolution based on high resolution ray-tracing grid - self.kwargs_numerics_low_conv_high_grid = {'supersampling_factor': self._supersampling_factor, - 'compute_mode': 'regular', - 'supersampling_convolution': False, - # does not matter for supersampling_factor=1 - 'supersampling_kernel_size': None, - # does not matter for supersampling_factor=1 - } + self.kwargs_numerics_low_conv_high_grid = { + "supersampling_factor": self._supersampling_factor, + "compute_mode": "regular", + "supersampling_convolution": False, + # does not matter for supersampling_factor=1 + "supersampling_kernel_size": None, + # does not matter for supersampling_factor=1 + } # low resolution convolution with a subset of pixels with high resolution ray-tracing - self.kwargs_numerics_low_conv_high_adaptive = {'supersampling_factor': self._supersampling_factor, - 'compute_mode': 'adaptive', - 'supersampling_convolution': False, - # does not matter for supersampling_factor=1 - 'supersampling_kernel_size': None, - # does not matter for supersampling_factor=1 - 'supersampled_indexes': self._conv_pixels_partial, - 'convolution_kernel_size': 9, - } + self.kwargs_numerics_low_conv_high_adaptive = { + "supersampling_factor": self._supersampling_factor, + "compute_mode": "adaptive", + "supersampling_convolution": False, + # does not matter for supersampling_factor=1 + "supersampling_kernel_size": None, + # does not matter for supersampling_factor=1 + "supersampled_indexes": self._conv_pixels_partial, + "convolution_kernel_size": 9, + } # low resolution convolution with a subset of pixels with high resolution ray-tracing and high resoluton convolution on smaller kernel size - self.kwargs_numerics_high_adaptive = {'supersampling_factor': self._supersampling_factor, - 'compute_mode': 'adaptive', - 'supersampling_convolution': True, - # does not matter for supersampling_factor=1 - 'supersampling_kernel_size': 5, # does not matter for supersampling_factor=1 - 'supersampled_indexes': self._conv_pixels_partial, - 'convolution_kernel_size': 9, - } + self.kwargs_numerics_high_adaptive = { + "supersampling_factor": self._supersampling_factor, + "compute_mode": "adaptive", + "supersampling_convolution": True, + # does not matter for supersampling_factor=1 + "supersampling_kernel_size": 5, # does not matter for supersampling_factor=1 + "supersampled_indexes": self._conv_pixels_partial, + "convolution_kernel_size": 9, + } # low resolution convolution and low resolution ray tracing, the simplest calculation - self.kwargs_numerics_low_res = {'supersampling_factor': 1, - 'compute_mode': 'regular', - 'supersampling_convolution': False, # does not matter for supersampling_factor=1 - 'supersampling_kernel_size': None, # does not matter for supersampling_factor=1 - 'convolution_kernel_size': 9, - } + self.kwargs_numerics_low_res = { + "supersampling_factor": 1, + "compute_mode": "regular", + "supersampling_convolution": False, # does not matter for supersampling_factor=1 + "supersampling_kernel_size": None, # does not matter for supersampling_factor=1 + "convolution_kernel_size": 9, + } flux_evaluate_indexes = np.zeros((numPix, numPix), dtype=bool) flux_evaluate_indexes[flux >= flux_max / 1000] = True # low resolution convolution on subframe - self.kwargs_numerics_partial = {'supersampling_factor': 1, - 'compute_mode': 'regular', - 'supersampling_convolution': False, - # does not matter for supersampling_factor=1 - 'supersampling_kernel_size': None, # does not matter for supersampling_factor=1 - 'flux_evaluate_indexes': flux_evaluate_indexes, - 'convolution_kernel_size': 9 - } - + self.kwargs_numerics_partial = { + "supersampling_factor": 1, + "compute_mode": "regular", + "supersampling_convolution": False, + # does not matter for supersampling_factor=1 + "supersampling_kernel_size": None, # does not matter for supersampling_factor=1 + "flux_evaluate_indexes": flux_evaluate_indexes, + "convolution_kernel_size": 9, + } # import PSF file - kernel_super = kernel_util.kernel_gaussian(num_pix=11 * self._supersampling_factor, - delta_pix=deltaPix / self._supersampling_factor, fwhm=0.1) - + kernel_super = kernel_util.kernel_gaussian( + num_pix=11 * self._supersampling_factor, + delta_pix=deltaPix / self._supersampling_factor, + fwhm=0.1, + ) kernel_size = 9 - kernel_super = kernel_util.cut_psf(psf_data=kernel_super, psf_size=kernel_size * self._supersampling_factor) + kernel_super = kernel_util.cut_psf( + psf_data=kernel_super, psf_size=kernel_size * self._supersampling_factor + ) # make instance of the PixelGrid class from lenstronomy.Data.pixel_grid import PixelGrid - kwargs_grid = {'nx': numPix, 'ny': numPix, 'transform_pix2angle': Mpix2coord, 'ra_at_xy_0': ra_at_xy_0, - 'dec_at_xy_0': dec_at_xy_0} + + kwargs_grid = { + "nx": numPix, + "ny": numPix, + "transform_pix2angle": Mpix2coord, + "ra_at_xy_0": ra_at_xy_0, + "dec_at_xy_0": dec_at_xy_0, + } self.pixel_grid = PixelGrid(**kwargs_grid) # make instance of the PSF class from lenstronomy.Data.psf import PSF - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_super, - 'point_source_supersampling_factor': self._supersampling_factor} - self.psf_class = PSF(**kwargs_psf) - + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_super, + "point_source_supersampling_factor": self._supersampling_factor, + } + self.psf_class = PSF(**kwargs_psf) # without convolution - image_model_true = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_true) + image_model_true = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_true, + ) self.image_true = image_model_true.image(kwargs_lens_light=self.kwargs_light) def test_full(self): - image_model_true = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_true) - image_unconvolved = image_model_true.image(kwargs_lens_light=self.kwargs_light, unconvolved=True) - npt.assert_almost_equal(np.sum(self.image_true) / np.sum(image_unconvolved), 1, decimal=2) + image_model_true = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_true, + ) + image_unconvolved = image_model_true.image( + kwargs_lens_light=self.kwargs_light, unconvolved=True + ) + npt.assert_almost_equal( + np.sum(self.image_true) / np.sum(image_unconvolved), 1, decimal=2 + ) def test_high_res_narrow(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_high_res_narrow) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) - npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=2) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_high_res_narrow, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) + npt.assert_almost_equal( + (self.image_true - image_conv) / self.image_true, 0, decimal=2 + ) def test_low_conv_high_grid(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_low_conv_high_grid) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) - npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_low_conv_high_grid, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) + npt.assert_almost_equal( + (self.image_true - image_conv) / self.image_true, 0, decimal=1 + ) def test_low_conv_high_adaptive(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_low_conv_high_adaptive) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) - npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_low_conv_high_adaptive, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) + npt.assert_almost_equal( + (self.image_true - image_conv) / self.image_true, 0, decimal=1 + ) def test_high_adaptive(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_high_adaptive) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) - npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_high_adaptive, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) + npt.assert_almost_equal( + (self.image_true - image_conv) / self.image_true, 0, decimal=1 + ) def test_low_res(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_low_res) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) - npt.assert_almost_equal((self.image_true - image_conv) / self.image_true, 0, decimal=1) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_low_res, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) + npt.assert_almost_equal( + (self.image_true - image_conv) / self.image_true, 0, decimal=1 + ) def test_sub_frame(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_partial) - image_conv = image_model.image(kwargs_lens_light=self.kwargs_light, unconvolved=False) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_partial, + ) + image_conv = image_model.image( + kwargs_lens_light=self.kwargs_light, unconvolved=False + ) delta = (self.image_true - image_conv) / self.image_true npt.assert_almost_equal(delta[self._conv_pixels_partial], 0, decimal=1) def test_property_access(self): - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=self.kwargs_numerics_true) + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=self.kwargs_numerics_true, + ) grid_supersampling_factor = image_model.ImageNumerics.grid_supersampling_factor assert grid_supersampling_factor == self._supersampling_factor - kwargs_numerics = {'supersampling_factor': 1, 'compute_mode': 'regular', 'supersampling_convolution': False} - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = { + "supersampling_factor": 1, + "compute_mode": "regular", + "supersampling_convolution": False, + } + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=kwargs_numerics, + ) from lenstronomy.ImSim.Numerics.convolution import PixelKernelConvolution + convolution_class = image_model.ImageNumerics.convolution_class assert isinstance(convolution_class, PixelKernelConvolution) - kwargs_numerics = {'supersampling_factor': 2, 'compute_mode': 'regular', 'supersampling_convolution': True} - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = { + "supersampling_factor": 2, + "compute_mode": "regular", + "supersampling_convolution": True, + } + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=kwargs_numerics, + ) from lenstronomy.ImSim.Numerics.convolution import SubgridKernelConvolution + convolution_class = image_model.ImageNumerics.convolution_class assert isinstance(convolution_class, SubgridKernelConvolution) - kwargs_numerics = {'supersampling_factor': 2, 'compute_mode': 'adaptive', 'supersampling_convolution': True} - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = { + "supersampling_factor": 2, + "compute_mode": "adaptive", + "supersampling_convolution": True, + } + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=kwargs_numerics, + ) from lenstronomy.ImSim.Numerics.adaptive_numerics import AdaptiveConvolution + convolution_class = image_model.ImageNumerics.convolution_class assert isinstance(convolution_class, AdaptiveConvolution) - kwargs_numerics = {'compute_mode': 'regular'} - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = {"compute_mode": "regular"} + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=kwargs_numerics, + ) from lenstronomy.ImSim.Numerics.grid import RegularGrid + grid_class = image_model.ImageNumerics.grid_class assert isinstance(grid_class, RegularGrid) - kwargs_numerics = {'compute_mode': 'adaptive'} - image_model = ImageModel(self.pixel_grid, self.psf_class, lens_light_model_class=self.lightModel, - kwargs_numerics=kwargs_numerics) + kwargs_numerics = {"compute_mode": "adaptive"} + image_model = ImageModel( + self.pixel_grid, + self.psf_class, + lens_light_model_class=self.lightModel, + kwargs_numerics=kwargs_numerics, + ) from lenstronomy.ImSim.Numerics.grid import AdaptiveGrid + grid_class = image_model.ImageNumerics.grid_class assert isinstance(grid_class, AdaptiveGrid) @@ -232,37 +371,56 @@ def test_supersampling_simple(): supersampling_factor = 2 # generate a Gaussian image - x, y = util.make_grid(numPix=numpix * supersampling_factor, deltapix=detector_pixel_scale / supersampling_factor) + x, y = util.make_grid( + numPix=numpix * supersampling_factor, + deltapix=detector_pixel_scale / supersampling_factor, + ) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() image_1d = gaussian.function(x, y, amp=1, sigma=0.1) image = util.array2image(image_1d) # generate psf kernal supersampled - kernel_super = kernel_util.kernel_gaussian(num_pix=21 * supersampling_factor + 1, - delta_pix=detector_pixel_scale / supersampling_factor, fwhm=0.2) - - psf_parameters = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_super, - 'point_source_supersampling_factor': supersampling_factor} - kwargs_detector = {'pixel_scale': detector_pixel_scale, - 'ccd_gain': 2.5, 'read_noise': 4.0, 'magnitude_zero_point': 25.0, - 'exposure_time': 5400.0, 'sky_brightness': 22, 'num_exposures': 1, - 'background_noise': None} - kwargs_numerics = {'supersampling_factor': 2, - 'supersampling_convolution': True, - 'point_source_supersampling_factor': 2, - 'supersampling_kernel_size': 21 - } + kernel_super = kernel_util.kernel_gaussian( + num_pix=21 * supersampling_factor + 1, + delta_pix=detector_pixel_scale / supersampling_factor, + fwhm=0.2, + ) + + psf_parameters = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_super, + "point_source_supersampling_factor": supersampling_factor, + } + kwargs_detector = { + "pixel_scale": detector_pixel_scale, + "ccd_gain": 2.5, + "read_noise": 4.0, + "magnitude_zero_point": 25.0, + "exposure_time": 5400.0, + "sky_brightness": 22, + "num_exposures": 1, + "background_noise": None, + } + kwargs_numerics = { + "supersampling_factor": 2, + "supersampling_convolution": True, + "point_source_supersampling_factor": 2, + "supersampling_kernel_size": 21, + } psf_model = PSF(**psf_parameters) data_class = DataAPI(numpix=numpix, **kwargs_detector).data_class from lenstronomy.ImSim.Numerics.numerics_subframe import NumericsSubFrame - image_numerics = NumericsSubFrame(pixel_grid=data_class, - psf=psf_model, **kwargs_numerics) + + image_numerics = NumericsSubFrame( + pixel_grid=data_class, psf=psf_model, **kwargs_numerics + ) conv_class = image_numerics.convolution_class conv_flat = conv_class.convolution2d(image) - print(np.shape(conv_flat), 'shape of output') + print(np.shape(conv_flat), "shape of output") # psf_helper = lenstronomy_utils.PSFHelper(data_class, psf_model, kwargs_numerics) @@ -270,52 +428,54 @@ def test_supersampling_simple(): # helper_image = psf_helper.psf_model(image) from scipy import signal - scipy_image = signal.fftconvolve(image, kernel_super, mode='same') + scipy_image = signal.fftconvolve(image, kernel_super, mode="same") from lenstronomy.Util import image_util + image_scipy_resized = image_util.re_size(scipy_image, supersampling_factor) image_unconvolved = image_util.re_size(image, supersampling_factor) # Compare the outputs # low res convolution as comparison - kwargs_numerics_low_res = {'supersampling_factor': 2, - 'supersampling_convolution': False, - 'point_source_supersampling_factor': 2, - } - image_numerics_low_res = NumericsSubFrame(pixel_grid=data_class, - psf=psf_model, **kwargs_numerics_low_res) + kwargs_numerics_low_res = { + "supersampling_factor": 2, + "supersampling_convolution": False, + "point_source_supersampling_factor": 2, + } + image_numerics_low_res = NumericsSubFrame( + pixel_grid=data_class, psf=psf_model, **kwargs_numerics_low_res + ) conv_class_low_res = image_numerics_low_res.convolution_class conv_flat_low_res = conv_class_low_res.convolution2d(image_unconvolved) - #import matplotlib.pyplot as plt - #plt.matshow(image_scipy_resized - image_unconvolved) - #plt.colorbar() - #plt.show() + # import matplotlib.pyplot as plt + # plt.matshow(image_scipy_resized - image_unconvolved) + # plt.colorbar() + # plt.show() - #plt.matshow(image_scipy_resized - conv_flat) - #plt.colorbar() - #plt.show() + # plt.matshow(image_scipy_resized - conv_flat) + # plt.colorbar() + # plt.show() - #plt.matshow(image_scipy_resized - conv_flat_low_res) - #plt.colorbar() - #plt.show() + # plt.matshow(image_scipy_resized - conv_flat_low_res) + # plt.colorbar() + # plt.show() np.testing.assert_almost_equal(conv_flat, image_scipy_resized) - - class TestRaise(unittest.TestCase): - def test_integer_in_supersampling_factor(self): from lenstronomy.Data.psf import PSF - kwargs_psf = {'psf_type': 'NONE'} + + kwargs_psf = {"psf_type": "NONE"} psf_class = PSF(**kwargs_psf) from lenstronomy.ImSim.Numerics.numerics import Numerics + with self.assertRaises(TypeError): - Numerics(pixel_grid=None, psf=psf_class, supersampling_factor=1.) + Numerics(pixel_grid=None, psf=psf_class, supersampling_factor=1.0) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_partial_image.py b/test/test_ImSim/test_Numerics/test_partial_image.py index 41524c5ec..ad75e44af 100644 --- a/test/test_ImSim/test_Numerics/test_partial_image.py +++ b/test/test_ImSim/test_Numerics/test_partial_image.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -7,7 +7,6 @@ class TestPartialImage(object): - def setup_method(self): self.num = 10 partial_read_bools = np.zeros((self.num, self.num), dtype=bool) @@ -31,5 +30,5 @@ def test_num_partial(self): assert self._partialImage.num_partial == 2 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_pixel_convolution.py b/test/test_ImSim/test_Numerics/test_pixel_convolution.py index d336e75f0..94f753a40 100644 --- a/test/test_ImSim/test_Numerics/test_pixel_convolution.py +++ b/test/test_ImSim/test_Numerics/test_pixel_convolution.py @@ -1,8 +1,11 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt -from lenstronomy.ImSim.Numerics.numba_convolution import NumbaConvolution, SubgridNumbaConvolution +from lenstronomy.ImSim.Numerics.numba_convolution import ( + NumbaConvolution, + SubgridNumbaConvolution, +) from lenstronomy.ImSim.Numerics.convolution import PixelKernelConvolution from lenstronomy.LightModel.light_model import LightModel from lenstronomy.Util import util @@ -11,27 +14,28 @@ class TestPixelConvolution(object): - def setup_method(self): - lightModel = LightModel(light_model_list=['GAUSSIAN']) + lightModel = LightModel(light_model_list=["GAUSSIAN"]) self.delta_pix = 1 self.num_pix = 10 self.num_pix_kernel = 7 x, y = util.make_grid(numPix=self.num_pix_kernel, deltapix=self.delta_pix) - kwargs_kernel = [{'amp': 1, 'sigma': 3, 'center_x': 0, 'center_y': 0}] + kwargs_kernel = [{"amp": 1, "sigma": 3, "center_x": 0, "center_y": 0}] kernel = lightModel.surface_brightness(x, y, kwargs_kernel) self.kernel = util.array2image(kernel) self.kernel /= np.sum(self.kernel) x, y = util.make_grid(numPix=self.num_pix, deltapix=self.delta_pix) - kwargs = [{'amp': 1, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + kwargs = [{"amp": 1, "sigma": 2, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x, y, kwargs) self.model = util.array2image(flux) def test_convolve2d(self): conv_pixels = np.ones_like(self.model) conv_pixels = np.array(conv_pixels, dtype=bool) - numba_conv = NumbaConvolution(kernel=self.kernel, conv_pixels=conv_pixels, compute_pixels=conv_pixels) + numba_conv = NumbaConvolution( + kernel=self.kernel, conv_pixels=conv_pixels, compute_pixels=conv_pixels + ) model_conv_numba = numba_conv.convolve2d(self.model) @@ -42,40 +46,56 @@ def test_convolve2d(self): class TestSubgirdNumbaConvolution(object): def setup_method(self): - lightModel = LightModel(light_model_list=['GAUSSIAN']) + lightModel = LightModel(light_model_list=["GAUSSIAN"]) self.supersampling_factor = 3 self.delta_pix = 1 self.num_pix = 10 self.num_pix_kernel = 7 x, y = util.make_grid(numPix=self.num_pix_kernel, deltapix=self.delta_pix) - kwargs_kernel = [{'amp': 1, 'sigma': 3, 'center_x': 0, 'center_y': 0}] + kwargs_kernel = [{"amp": 1, "sigma": 3, "center_x": 0, "center_y": 0}] kernel = lightModel.surface_brightness(x, y, kwargs_kernel) self.kernel = util.array2image(kernel) self.kernel /= np.sum(self.kernel) - x_sub, y_sub = util.make_grid(numPix=self.num_pix_kernel, deltapix=self.delta_pix, subgrid_res=self.supersampling_factor) + x_sub, y_sub = util.make_grid( + numPix=self.num_pix_kernel, + deltapix=self.delta_pix, + subgrid_res=self.supersampling_factor, + ) kernel_super = lightModel.surface_brightness(x_sub, y_sub, kwargs_kernel) self.kernel_super = util.array2image(kernel_super) self.kernel_super /= np.sum(self.kernel_super) - x_sub, y_sub = util.make_grid(numPix=self.num_pix, deltapix=self.delta_pix, subgrid_res=self.supersampling_factor) - kwargs = [{'amp': 1, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + x_sub, y_sub = util.make_grid( + numPix=self.num_pix, + deltapix=self.delta_pix, + subgrid_res=self.supersampling_factor, + ) + kwargs = [{"amp": 1, "sigma": 2, "center_x": 0, "center_y": 0}] flux = lightModel.surface_brightness(x_sub, y_sub, kwargs) self.model_super = util.array2image(flux) - self.model = image_util.re_size(self.model_super, factor=self.supersampling_factor) + self.model = image_util.re_size( + self.model_super, factor=self.supersampling_factor + ) def test_convolve2d(self): conv_pixels = np.ones_like(self.model) conv_pixels = np.array(conv_pixels, dtype=bool) - numba_conv = SubgridNumbaConvolution(kernel_super=self.kernel_super, conv_pixels=conv_pixels, - compute_pixels=conv_pixels, supersampling_factor=self.supersampling_factor) + numba_conv = SubgridNumbaConvolution( + kernel_super=self.kernel_super, + conv_pixels=conv_pixels, + compute_pixels=conv_pixels, + supersampling_factor=self.supersampling_factor, + ) model_conv_numba = numba_conv.convolve2d(self.model_super) pixel_conv = PixelKernelConvolution(kernel=self.kernel_super) image_convolved = pixel_conv.convolution2d(self.model_super) - image_convolved = image_util.re_size(image_convolved, factor=self.supersampling_factor) + image_convolved = image_util.re_size( + image_convolved, factor=self.supersampling_factor + ) npt.assert_almost_equal(model_conv_numba, image_convolved, decimal=10) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_Numerics/test_point_source_rendering.py b/test/test_ImSim/test_Numerics/test_point_source_rendering.py index cf03f5858..850aa9c5a 100644 --- a/test/test_ImSim/test_Numerics/test_point_source_rendering.py +++ b/test/test_ImSim/test_Numerics/test_point_source_rendering.py @@ -9,31 +9,47 @@ class TestPointSourceRendering(object): - def setup_method(self): Mpix2coord = np.array([[1, 0], [0, 1]]) - kwargs_grid = {'ra_at_xy_0': 0, 'dec_at_xy_0': 0, - 'transform_pix2angle': Mpix2coord, 'nx': 10, 'ny': 10} + kwargs_grid = { + "ra_at_xy_0": 0, + "dec_at_xy_0": 0, + "transform_pix2angle": Mpix2coord, + "nx": 10, + "ny": 10, + } pixel_grid = PixelGrid(**kwargs_grid) kernel = np.zeros((5, 5)) kernel[2, 2] = 1 - kwargs_psf = {'kernel_point_source': kernel, 'psf_type': 'PIXEL', 'psf_error_map': np.ones_like(kernel) * kernel**2} + kwargs_psf = { + "kernel_point_source": kernel, + "psf_type": "PIXEL", + "psf_error_map": np.ones_like(kernel) * kernel**2, + } psf_class = PSF(**kwargs_psf) - self._ps_rendering = PointSourceRendering(pixel_grid, supersampling_factor=1, psf=psf_class) + self._ps_rendering = PointSourceRendering( + pixel_grid, supersampling_factor=1, psf=psf_class + ) def test_psf_error_map(self): ra_pos, dec_pos = [5], [5] data = np.zeros((10, 10)) - image = self._ps_rendering.psf_error_map(ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=False) + image = self._ps_rendering.psf_error_map( + ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=False + ) npt.assert_almost_equal(np.sum(image), 0, decimal=10) - image = self._ps_rendering.psf_error_map(ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=True) + image = self._ps_rendering.psf_error_map( + ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=True + ) npt.assert_almost_equal(np.sum(image), 1, decimal=10) ra_pos, dec_pos = [50], [50] data = np.zeros((10, 10)) - image = self._ps_rendering.psf_error_map(ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=False) + image = self._ps_rendering.psf_error_map( + ra_pos, dec_pos, amp=1, data=data, fix_psf_error_map=False + ) npt.assert_almost_equal(np.sum(image), 0, decimal=10) def test_point_source_rendering(self): @@ -44,21 +60,33 @@ def test_point_source_rendering(self): class TestRaise(unittest.TestCase): - def test_raise(self): Mpix2coord = np.array([[1, 0], [0, 1]]) - kwargs_grid = {'ra_at_xy_0': 0, 'dec_at_xy_0': 0, - 'transform_pix2angle': Mpix2coord, 'nx': 10, 'ny': 10} + kwargs_grid = { + "ra_at_xy_0": 0, + "dec_at_xy_0": 0, + "transform_pix2angle": Mpix2coord, + "nx": 10, + "ny": 10, + } pixel_grid = PixelGrid(**kwargs_grid) kernel = np.zeros((5, 5)) kernel[2, 2] = 1 - kwargs_psf = {'kernel_point_source': kernel, 'psf_type': 'PIXEL', 'psf_error_map': np.ones_like(kernel)} + kwargs_psf = { + "kernel_point_source": kernel, + "psf_type": "PIXEL", + "psf_error_map": np.ones_like(kernel), + } psf_class = PSF(**kwargs_psf) - self._ps_rendering = PointSourceRendering(pixel_grid, supersampling_factor=1, psf=psf_class) + self._ps_rendering = PointSourceRendering( + pixel_grid, supersampling_factor=1, psf=psf_class + ) with self.assertRaises(ValueError): - self._ps_rendering.point_source_rendering(ra_pos=[1, 1], dec_pos=[0, 1], amp=[1]) + self._ps_rendering.point_source_rendering( + ra_pos=[1, 1], dec_pos=[0, 1], amp=[1] + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_de_lens.py b/test/test_ImSim/test_de_lens.py index efd656d66..d600f08bd 100644 --- a/test/test_ImSim/test_de_lens.py +++ b/test/test_ImSim/test_de_lens.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -7,7 +7,6 @@ class TestDeLens(object): - def setup_method(self): pass @@ -20,7 +19,9 @@ def test_get_param_WLS(self): npt.assert_almost_equal(result[1], 0, decimal=8) npt.assert_almost_equal(image[0], d[0], decimal=8) - result_new, cov_error_new, image_new = de_lens.get_param_WLS(A, C_D_inv, d, inv_bool=False) + result_new, cov_error_new, image_new = de_lens.get_param_WLS( + A, C_D_inv, d, inv_bool=False + ) npt.assert_almost_equal(result_new[0], result[0], decimal=10) npt.assert_almost_equal(result_new[1], result[1], decimal=10) npt.assert_almost_equal(image_new[0], image[0], decimal=10) @@ -43,7 +44,7 @@ def test_wls_stability(self): npt.assert_almost_equal(image[0], 0, decimal=8) C_D_inv = np.array([1, 1, 1]) - A = np.array([[1., 2., 1. + 10**(-8.9)], [1., 2., 1.]]).T + A = np.array([[1.0, 2.0, 1.0 + 10 ** (-8.9)], [1.0, 2.0, 1.0]]).T d = np.array([1, 2, 3]) result, cov_error, image = de_lens.get_param_WLS(A, C_D_inv, d, inv_bool=False) result, cov_error, image = de_lens.get_param_WLS(A, C_D_inv, d, inv_bool=True) @@ -52,34 +53,34 @@ def test_wls_stability(self): npt.assert_almost_equal(image[0], 0, decimal=8) def test_marginalisation_const(self): - A = np.array([[1,2,3],[3,2,1]]).T - C_D_inv = np.array([1,1,1]) - d = np.array([1,2,3]) + A = np.array([[1, 2, 3], [3, 2, 1]]).T + C_D_inv = np.array([1, 1, 1]) + d = np.array([1, 2, 3]) result, cov_error, image = de_lens.get_param_WLS(A, C_D_inv, d) logL_marg = de_lens.marginalisation_const(cov_error) npt.assert_almost_equal(logL_marg, -2.2821740957339181, decimal=8) - M_inv = np.array([[1,0],[0,1]]) + M_inv = np.array([[1, 0], [0, 1]]) marg_const = de_lens.marginalisation_const(M_inv) assert marg_const == 0 def test_margnialization_new(self): - M_inv = np.array([[1, -0.5, 1], - [-0.5, 3, 0], - [1, 0, 2]]) + M_inv = np.array([[1, -0.5, 1], [-0.5, 3, 0], [1, 0, 2]]) d_prior = 1000 m = len(M_inv) log_det = de_lens.marginalization_new(M_inv, d_prior=d_prior) log_det_old = de_lens.marginalisation_const(M_inv) - npt.assert_almost_equal(log_det, log_det_old + m/2. * np.log(np.pi/2.) - m * np.log(d_prior), decimal=9) + npt.assert_almost_equal( + log_det, + log_det_old + m / 2.0 * np.log(np.pi / 2.0) - m * np.log(d_prior), + decimal=9, + ) - M_inv = np.array([[1, 1, 1], - [0., 1., 0.], - [1., 2., 1.]]) + M_inv = np.array([[1, 1, 1], [0.0, 1.0, 0.0], [1.0, 2.0, 1.0]]) log_det = de_lens.marginalization_new(M_inv, d_prior=10) log_det_old = de_lens.marginalisation_const(M_inv) npt.assert_almost_equal(log_det, log_det_old, decimal=9) - npt.assert_almost_equal(log_det, -10**(15), decimal=10) + npt.assert_almost_equal(log_det, -(10 ** (15)), decimal=10) log_det = de_lens.marginalization_new(M_inv, d_prior=None) log_det_old = de_lens.marginalisation_const(M_inv) @@ -109,5 +110,5 @@ def test_solve_stable(self): assert np.shape(b_none) == np.shape(b) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_differential_extinction.py b/test/test_ImSim/test_differential_extinction.py index fbe19ada9..8e1c71c54 100644 --- a/test/test_ImSim/test_differential_extinction.py +++ b/test/test_ImSim/test_differential_extinction.py @@ -4,23 +4,30 @@ class TestDifferentialExtinction(object): - def setup_method(self): pass def test_extinction(self): - extinction = DifferentialExtinction(optical_depth_model=['GAUSSIAN'], tau0_index=0) - kwargs_extinction = [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}] - kwargs_special = {'tau0_list': [2, 0]} - ext = extinction.extinction(x=1, y=1, kwargs_special=kwargs_special, kwargs_extinction=kwargs_extinction) + extinction = DifferentialExtinction( + optical_depth_model=["GAUSSIAN"], tau0_index=0 + ) + kwargs_extinction = [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}] + kwargs_special = {"tau0_list": [2, 0]} + ext = extinction.extinction( + x=1, y=1, kwargs_special=kwargs_special, kwargs_extinction=kwargs_extinction + ) npt.assert_almost_equal(ext, 0.8894965388088921, decimal=8) - ext = extinction.extinction(x=1, y=1, kwargs_special=kwargs_special, kwargs_extinction=None) + ext = extinction.extinction( + x=1, y=1, kwargs_special=kwargs_special, kwargs_extinction=None + ) npt.assert_almost_equal(ext, 1, decimal=8) - ext = extinction.extinction(x=1, y=1, kwargs_special={}, kwargs_extinction=kwargs_extinction) + ext = extinction.extinction( + x=1, y=1, kwargs_special={}, kwargs_extinction=kwargs_extinction + ) npt.assert_almost_equal(ext, 0.9431312415612645, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_image2soure_mapping.py b/test/test_ImSim/test_image2soure_mapping.py index e434f3721..f3bc6b37c 100644 --- a/test/test_ImSim/test_image2soure_mapping.py +++ b/test/test_ImSim/test_image2soure_mapping.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -11,56 +11,111 @@ class TestMultiSourcePlane(object): - def setup_method(self): - lens_model_list = ['SIS', 'SIS'] - self.kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}, {'theta_E': 0.5, 'center_x': 1, 'center_y':1}] + lens_model_list = ["SIS", "SIS"] + self.kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"theta_E": 0.5, "center_x": 1, "center_y": 1}, + ] singlePlane = LensModel(lens_model_list=lens_model_list) - multiPlane = LensModel(lens_model_list=lens_model_list, multi_plane=True, z_source=3, lens_redshift_list=[0.2, 0.5], - cosmo=None) - pseudoMultiPlane = LensModel(lens_model_list=lens_model_list, multi_plane=True, z_source=3, - lens_redshift_list=[0.5, 0.5], - cosmo=None) + multiPlane = LensModel( + lens_model_list=lens_model_list, + multi_plane=True, + z_source=3, + lens_redshift_list=[0.2, 0.5], + cosmo=None, + ) + pseudoMultiPlane = LensModel( + lens_model_list=lens_model_list, + multi_plane=True, + z_source=3, + lens_redshift_list=[0.5, 0.5], + cosmo=None, + ) # test single plane single source # test single plane multi source # test pseudo multi plane single source - light_model_list = ['SERSIC', 'SERSIC'] - self.kwargs_light = [{'amp': 1, 'R_sersic': 1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0}, - {'amp':2, 'R_sersic': 0.5, 'n_sersic': 1, 'center_x': 1, 'center_y': 1}] - self.singlePlane_singlePlane = Image2SourceMapping(singlePlane, LightModel(light_model_list, - deflection_scaling_list=None, - source_redshift_list=None)) - self.singlePlane_pseudoMulti = Image2SourceMapping(singlePlane, LightModel(light_model_list, - deflection_scaling_list=[1, 1], - source_redshift_list=None)) - self.pseudoMulti_pseudoMulti = Image2SourceMapping(pseudoMultiPlane, LightModel(light_model_list, - deflection_scaling_list=None, - source_redshift_list=[3, 3])) - self.pseudoMulti_single = Image2SourceMapping(pseudoMultiPlane, LightModel(light_model_list, - deflection_scaling_list=None, - source_redshift_list=None)) - self.multi_single = Image2SourceMapping(multiPlane, LightModel(light_model_list, deflection_scaling_list=None, - source_redshift_list=None)) - self.multi_pseudoMulti = Image2SourceMapping(multiPlane, LightModel(light_model_list, - deflection_scaling_list=None, - source_redshift_list=[3, 3])) - self.multi_multi = Image2SourceMapping(multiPlane, LightModel(light_model_list, deflection_scaling_list=None, - source_redshift_list=[0.3, 2])) + light_model_list = ["SERSIC", "SERSIC"] + self.kwargs_light = [ + {"amp": 1, "R_sersic": 1, "n_sersic": 2, "center_x": 0, "center_y": 0}, + {"amp": 2, "R_sersic": 0.5, "n_sersic": 1, "center_x": 1, "center_y": 1}, + ] + self.singlePlane_singlePlane = Image2SourceMapping( + singlePlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=None, + ), + ) + self.singlePlane_pseudoMulti = Image2SourceMapping( + singlePlane, + LightModel( + light_model_list, + deflection_scaling_list=[1, 1], + source_redshift_list=None, + ), + ) + self.pseudoMulti_pseudoMulti = Image2SourceMapping( + pseudoMultiPlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=[3, 3], + ), + ) + self.pseudoMulti_single = Image2SourceMapping( + pseudoMultiPlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=None, + ), + ) + self.multi_single = Image2SourceMapping( + multiPlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=None, + ), + ) + self.multi_pseudoMulti = Image2SourceMapping( + multiPlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=[3, 3], + ), + ) + self.multi_multi = Image2SourceMapping( + multiPlane, + LightModel( + light_model_list, + deflection_scaling_list=None, + source_redshift_list=[0.3, 2], + ), + ) def test_pseudo_multi_ray_tracing(self): x, y = util.make_grid(numPix=10, deltapix=0.5) kwargs_lens = self.kwargs_lens kwargs_light = self.kwargs_light - flux_single_single = self.singlePlane_singlePlane.image_flux_joint(x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light) - flux_single_pseudo = self.singlePlane_pseudoMulti.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - flux_pseudo_pseudo = self.pseudoMulti_pseudoMulti.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - flux_pseudo_single = self.pseudoMulti_single.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) + flux_single_single = self.singlePlane_singlePlane.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + flux_single_pseudo = self.singlePlane_pseudoMulti.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + flux_pseudo_pseudo = self.pseudoMulti_pseudoMulti.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + flux_pseudo_single = self.pseudoMulti_single.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) npt.assert_almost_equal(flux_single_single, flux_single_pseudo, decimal=10) npt.assert_almost_equal(flux_single_single, flux_pseudo_pseudo, decimal=10) npt.assert_almost_equal(flux_single_single, flux_pseudo_single, decimal=10) @@ -69,75 +124,121 @@ def test_multi_ray_tracing(self): x, y = util.make_grid(numPix=10, deltapix=0.1) kwargs_lens = self.kwargs_lens kwargs_light = self.kwargs_light - flux_multi_single = self.multi_single.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - flux_multi_pseudo = self.multi_pseudoMulti.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) + flux_multi_single = self.multi_single.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + flux_multi_pseudo = self.multi_pseudoMulti.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) npt.assert_almost_equal(flux_multi_pseudo, flux_multi_single, decimal=10) - flux_multi_multi = self.multi_multi.image_flux_joint(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) + flux_multi_multi = self.multi_multi.image_flux_joint( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) - #import matplotlib.pyplot as plt - #plt.matshow(util.array2image(flux_multi_multi)) - #plt.show() + # import matplotlib.pyplot as plt + # plt.matshow(util.array2image(flux_multi_multi)) + # plt.show() npt.assert_almost_equal(np.sum(flux_multi_multi), 1454.689246553742, decimal=-1) def test_pseudo_ray_trace_functions_split(self): x, y = util.make_grid(numPix=10, deltapix=0.5) kwargs_lens = self.kwargs_lens kwargs_light = self.kwargs_light - response_single_single, n1 = self.singlePlane_singlePlane.image_flux_split(x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light) - response_single_pseudo, n2 = self.singlePlane_pseudoMulti.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - response_pseudo_pseudo, n3 = self.pseudoMulti_pseudoMulti.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - response_pseudo_single, n4 = self.pseudoMulti_single.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) + response_single_single, n1 = self.singlePlane_singlePlane.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + response_single_pseudo, n2 = self.singlePlane_pseudoMulti.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + response_pseudo_pseudo, n3 = self.pseudoMulti_pseudoMulti.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + response_pseudo_single, n4 = self.pseudoMulti_single.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) npt.assert_almost_equal(n1, n2, decimal=10) npt.assert_almost_equal(n1, n3, decimal=10) npt.assert_almost_equal(n1, n4, decimal=10) assert n1 == 2 - npt.assert_almost_equal(response_single_single[0], response_single_pseudo[0], decimal=10) - npt.assert_almost_equal(response_single_single[0], response_pseudo_pseudo[0], decimal=10) - npt.assert_almost_equal(response_single_single[0], response_pseudo_single[0], decimal=10) + npt.assert_almost_equal( + response_single_single[0], response_single_pseudo[0], decimal=10 + ) + npt.assert_almost_equal( + response_single_single[0], response_pseudo_pseudo[0], decimal=10 + ) + npt.assert_almost_equal( + response_single_single[0], response_pseudo_single[0], decimal=10 + ) - npt.assert_almost_equal(response_single_single[1], response_single_pseudo[1], decimal=10) - npt.assert_almost_equal(response_single_single[1], response_pseudo_pseudo[1], decimal=10) - npt.assert_almost_equal(response_single_single[1], response_pseudo_single[1], decimal=10) + npt.assert_almost_equal( + response_single_single[1], response_single_pseudo[1], decimal=10 + ) + npt.assert_almost_equal( + response_single_single[1], response_pseudo_pseudo[1], decimal=10 + ) + npt.assert_almost_equal( + response_single_single[1], response_pseudo_single[1], decimal=10 + ) def test_multi_ray_trace_functions_split(self): x, y = util.make_grid(numPix=10, deltapix=0.1) kwargs_lens = self.kwargs_lens kwargs_light = self.kwargs_light - response_multi_single, n1 = self.multi_single.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - response_multi_pseudo, n2 = self.multi_pseudoMulti.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) - npt.assert_almost_equal(response_multi_pseudo[0], response_multi_single[0], decimal=10) - npt.assert_almost_equal(response_multi_pseudo[1], response_multi_single[1], decimal=10) + response_multi_single, n1 = self.multi_single.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + response_multi_pseudo, n2 = self.multi_pseudoMulti.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) + npt.assert_almost_equal( + response_multi_pseudo[0], response_multi_single[0], decimal=10 + ) + npt.assert_almost_equal( + response_multi_pseudo[1], response_multi_single[1], decimal=10 + ) npt.assert_almost_equal(n1, n2, decimal=10) - assert n1 ==2 + assert n1 == 2 - response_multi_multi, n = self.multi_multi.image_flux_split(x, y, kwargs_lens=kwargs_lens, - kwargs_source=kwargs_light) + response_multi_multi, n = self.multi_multi.image_flux_split( + x, y, kwargs_lens=kwargs_lens, kwargs_source=kwargs_light + ) npt.assert_almost_equal(np.sum(response_multi_multi), 1413, decimal=-1) def test_image2source(self): x, y = 1, 1 - beta_x, beta_y = self.multi_multi.image2source(x, y, kwargs_lens=self.kwargs_lens, index_source=0) + beta_x, beta_y = self.multi_multi.image2source( + x, y, kwargs_lens=self.kwargs_lens, index_source=0 + ) npt.assert_almost_equal(beta_x, 0.7433428403740511, decimal=2) - beta_x0, beta_y0 = self.singlePlane_singlePlane.image2source(x, y, kwargs_lens=self.kwargs_lens, index_source=0) - beta_x, beta_y = self.singlePlane_pseudoMulti.image2source(x, y, kwargs_lens=self.kwargs_lens, index_source=0) + beta_x0, beta_y0 = self.singlePlane_singlePlane.image2source( + x, y, kwargs_lens=self.kwargs_lens, index_source=0 + ) + beta_x, beta_y = self.singlePlane_pseudoMulti.image2source( + x, y, kwargs_lens=self.kwargs_lens, index_source=0 + ) npt.assert_almost_equal(beta_x0, beta_x, decimal=10) - beta_x, beta_y = self.pseudoMulti_pseudoMulti.image2source(x, y, kwargs_lens=self.kwargs_lens, index_source=0) + beta_x, beta_y = self.pseudoMulti_pseudoMulti.image2source( + x, y, kwargs_lens=self.kwargs_lens, index_source=0 + ) npt.assert_almost_equal(beta_x0, beta_x, decimal=10) def test__re_order_split(self): - lensModel = LensModel(lens_model_list=['SIS', 'SIS'], multi_plane=True, lens_redshift_list=[0.5, 0.4], z_source=3) - mapping = Image2SourceMapping(lensModel, LightModel(light_model_list=['SERSIC', 'SHAPELETS'], deflection_scaling_list=None, - source_redshift_list=[2, 0.3])) + lensModel = LensModel( + lens_model_list=["SIS", "SIS"], + multi_plane=True, + lens_redshift_list=[0.5, 0.4], + z_source=3, + ) + mapping = Image2SourceMapping( + lensModel, + LightModel( + light_model_list=["SERSIC", "SHAPELETS"], + deflection_scaling_list=None, + source_redshift_list=[2, 0.3], + ), + ) n_list = [1, 2] response = np.zeros((3, 3)) response[1:] = 1 @@ -147,35 +248,61 @@ def test__re_order_split(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, z_source=3, lens_redshift_list=[0.2], - cosmo=None) - lightModel = LightModel(light_model_list=['UNIFORM'], deflection_scaling_list=[1.], - source_redshift_list=None) + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + z_source=3, + lens_redshift_list=[0.2], + cosmo=None, + ) + lightModel = LightModel( + light_model_list=["UNIFORM"], + deflection_scaling_list=[1.0], + source_redshift_list=None, + ) class_instance = Image2SourceMapping(lensModel, lightModel) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, z_source=3, lens_redshift_list=[0.2], - cosmo=None) - lightModel = LightModel(light_model_list=['UNIFORM'], deflection_scaling_list=None, - source_redshift_list=[0, 1, 2]) + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + z_source=3, + lens_redshift_list=[0.2], + cosmo=None, + ) + lightModel = LightModel( + light_model_list=["UNIFORM"], + deflection_scaling_list=None, + source_redshift_list=[0, 1, 2], + ) class_instance = Image2SourceMapping(lensModel, lightModel) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, z_source=0.5, lens_redshift_list=[0.2], - cosmo=None) - lightModel = LightModel(light_model_list=['UNIFORM'], deflection_scaling_list=None, - source_redshift_list=[1]) + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + z_source=0.5, + lens_redshift_list=[0.2], + cosmo=None, + ) + lightModel = LightModel( + light_model_list=["UNIFORM"], + deflection_scaling_list=None, + source_redshift_list=[1], + ) class_instance = Image2SourceMapping(lensModel, lightModel) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=False, z_source=0.5, - cosmo=None) - lightModel = LightModel(light_model_list=['UNIFORM'], deflection_scaling_list=[1, 1]) + lensModel = LensModel( + lens_model_list=["SIS"], multi_plane=False, z_source=0.5, cosmo=None + ) + lightModel = LightModel( + light_model_list=["UNIFORM"], deflection_scaling_list=[1, 1] + ) class_instance = Image2SourceMapping(lensModel, lightModel) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_image_linear_solve.py b/test/test_ImSim/test_image_linear_solve.py index 461ea5fba..cfb695caf 100644 --- a/test/test_ImSim/test_image_linear_solve.py +++ b/test/test_ImSim/test_image_linear_solve.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -14,9 +14,8 @@ class TestImageLinearFit(object): - def setup_method(self): - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -24,54 +23,100 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg, inverse=True + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**kwargs_psf) - kwargs_sis = {'theta_E': 1., 'center_x': 0, 'center_y': 0} + kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0} - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] self.kwargs_lens = [kwargs_sis] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.01, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': False} + self.kwargs_ps = [ + {"ra_source": 0.01, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics = { + "supersampling_factor": 2, + "supersampling_convolution": False, + } - self.imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, point_source_class, kwargs_numerics=kwargs_numerics) + self.imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) def test_linear_param_from_kwargs(self): - param = self.imageModel.linear_param_from_kwargs(self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps) - assert param[0] == self.kwargs_source[0]['amp'] - assert param[1] == self.kwargs_lens_light[0]['amp'] - assert param[2] == self.kwargs_ps[0]['source_amp'] + param = self.imageModel.linear_param_from_kwargs( + self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) + assert param[0] == self.kwargs_source[0]["amp"] + assert param[1] == self.kwargs_lens_light[0]["amp"] + assert param[2] == self.kwargs_ps[0]["source_amp"] def test_update_linear_kwargs(self): - num = self.imageModel.num_param_linear(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, - self.kwargs_ps) + num = self.imageModel.num_param_linear( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) param = np.ones(num) * 10 - kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps = self.imageModel.update_linear_kwargs(param, + ( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + ) = self.imageModel.update_linear_kwargs( + param, kwargs_lens=self.kwargs_lens, - kwargs_source=self.kwargs_source, kwargs_lens_light=self.kwargs_lens_light, kwargs_ps=self.kwargs_ps) - assert kwargs_source[0]['amp'] == 10 + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + kwargs_ps=self.kwargs_ps, + ) + assert kwargs_source[0]["amp"] == 10 def test_error_response(self): - C_D_response, model_error = self.imageModel.error_response(kwargs_lens=self.kwargs_lens, - kwargs_ps=self.kwargs_ps, kwargs_special=None) + C_D_response, model_error = self.imageModel.error_response( + kwargs_lens=self.kwargs_lens, kwargs_ps=self.kwargs_ps, kwargs_special=None + ) npt.assert_almost_equal(model_error, 0) diff --git a/test/test_ImSim/test_image_linear_solve_with_interferometric_changes.py b/test/test_ImSim/test_image_linear_solve_with_interferometric_changes.py index 950f2c745..fd4fb5735 100644 --- a/test/test_ImSim/test_image_linear_solve_with_interferometric_changes.py +++ b/test/test_ImSim/test_image_linear_solve_with_interferometric_changes.py @@ -22,97 +22,145 @@ def test_image_linear_solve_with_primary_beam_and_interferometry_psf(): - - background_rms = .05 - exp_time = np.inf - numPix = 80 - deltaPix = 0.05 - psf_type = 'PIXEL' - kernel_size = 161 - + background_rms = 0.05 + exp_time = np.inf + numPix = 80 + deltaPix = 0.05 + psf_type = "PIXEL" + kernel_size = 161 + # simulate a primary beam (pb) - primary_beam = np.zeros((numPix,numPix)) + primary_beam = np.zeros((numPix, numPix)) for i in range(numPix): for j in range(numPix): - primary_beam[i,j] = np.exp(-1e-4*((i-78)**2+(j-56)**2)) + primary_beam[i, j] = np.exp(-1e-4 * ((i - 78) ** 2 + (j - 56) ** 2)) primary_beam /= np.max(primary_beam) - + # simulate a spherical sinc function as psf, which contains negative pixels - psf_test = np.zeros((221,221)) + psf_test = np.zeros((221, 221)) for i in range(221): for j in range(221): if i > j: - psf_test[i,j] = psf_test[j,i] - r = np.sqrt((i-110)**2 + (j-110)**2) + psf_test[i, j] = psf_test[j, i] + r = np.sqrt((i - 110) ** 2 + (j - 110) ** 2) if r == 0: - psf_test[i,j] = 1 + psf_test[i, j] = 1 else: - psf_test[i,j] = np.sin(r*0.5)/(r*0.5) - + psf_test[i, j] = np.sin(r * 0.5) / (r * 0.5) + # note that the simulated noise here is not the interferometric noise. we just use it to test the numerics - test_noise = scipy.signal.fftconvolve(np.random.normal(0,1,(numPix,numPix)),psf_test,mode='same') - - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, background_rms) - kwargs_data['ra_at_xy_0'] = -(40)*deltaPix - kwargs_data['dec_at_xy_0'] = -(40)*deltaPix - kwargs_data['antenna_primary_beam'] = primary_beam - kwargs_data['likelihood_method'] = 'interferometry_natwt' # testing just for interferometry natwt method + test_noise = scipy.signal.fftconvolve( + np.random.normal(0, 1, (numPix, numPix)), psf_test, mode="same" + ) + + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, background_rms + ) + kwargs_data["ra_at_xy_0"] = -(40) * deltaPix + kwargs_data["dec_at_xy_0"] = -(40) * deltaPix + kwargs_data["antenna_primary_beam"] = primary_beam + kwargs_data[ + "likelihood_method" + ] = "interferometry_natwt" # testing just for interferometry natwt method data_class = ImageData(**kwargs_data) - - kernel_cut = kernel_util.cut_psf(psf_test, kernel_size, normalisation = False) - kwargs_psf = {'psf_type': psf_type,'pixel_size': deltaPix, 'kernel_point_source': kernel_cut,'kernel_point_source_normalisation': False} + + kernel_cut = kernel_util.cut_psf(psf_test, kernel_size, normalisation=False) + kwargs_psf = { + "psf_type": psf_type, + "pixel_size": deltaPix, + "kernel_point_source": kernel_cut, + "kernel_point_source_normalisation": False, + } psf_class = PSF(**kwargs_psf) - + # define lens model and source model - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.04} - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_shear = {"gamma1": 0.01, "gamma2": 0.01} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.04, + } + lens_model_list = ["SPEP", "SHEAR"] kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) - - kwargs_sersic = {'amp': 25., 'R_sersic': 0.3, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} - lens_light_model_list = ['SERSIC'] + + kwargs_sersic = { + "amp": 25.0, + "R_sersic": 0.3, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } + lens_light_model_list = ["SERSIC"] kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - - kwargs_sersic_ellipse = {'amp': 10., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': 0.05, 'e2': 0.02} - source_model_list = ['SERSIC_ELLIPSE'] + + kwargs_sersic_ellipse = { + "amp": 10.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": 0.05, + "e2": 0.02, + } + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, kwargs_numerics=kwargs_numerics) + kwargs_numerics = {"supersampling_factor": 1, "supersampling_convolution": False} + + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) image_sim = imageModel.image(kwargs_lens, kwargs_source, kwargs_lens_light) - + # normalize the noise to make it small compared to the model image test_noise *= 1e-2 * (np.max(image_sim) / np.std(test_noise)) sim_data = image_sim + test_noise data_class.update_data(sim_data) - + # define the ImageLinearFit class using the materials defined above, run the _image_linear_solve function - imageLinearFit = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, kwargs_numerics=kwargs_numerics) - model,_,_,amps = imageLinearFit._image_linear_solve(kwargs_lens, kwargs_source, kwargs_lens_light) - + imageLinearFit = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + model, _, _, amps = imageLinearFit._image_linear_solve( + kwargs_lens, kwargs_source, kwargs_lens_light + ) + # execute the same linear solving outside of the _image_linear_solve function - A = imageLinearFit._linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps = None, unconvolved=True) + A = imageLinearFit._linear_response_matrix( + kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps=None, unconvolved=True + ) A0 = util.array2image(A[0]) A1 = util.array2image(A[1]) - A0c = scipy.signal.fftconvolve(A0, psf_test, mode = 'same') - A1c = scipy.signal.fftconvolve(A1, psf_test, mode = 'same') - M = np.zeros((2,2)) + A0c = scipy.signal.fftconvolve(A0, psf_test, mode="same") + A1c = scipy.signal.fftconvolve(A1, psf_test, mode="same") + M = np.zeros((2, 2)) b = np.zeros((2)) - M[0,0] = np.sum(A0c * A0) - M[0,1] = np.sum(A0c * A1) - M[1,0] = np.sum(A1c * A0) - M[1,1] = np.sum(A1c * A1) + M[0, 0] = np.sum(A0c * A0) + M[0, 1] = np.sum(A0c * A1) + M[1, 0] = np.sum(A1c * A0) + M[1, 1] = np.sum(A1c * A1) b[0] = np.sum(A0 * sim_data) b[1] = np.sum(A1 * sim_data) - + amps0 = np.linalg.lstsq(M, b)[0] clean_model = amps0[0] * A0 + amps0[1] * A1 dirty_model = amps0[0] * A0c + amps0[1] * A1c - + npt.assert_almost_equal([clean_model, dirty_model], model, decimal=8) - npt.assert_almost_equal(amps0, amps, decimal=8) \ No newline at end of file + npt.assert_almost_equal(amps0, amps, decimal=8) diff --git a/test/test_ImSim/test_image_model.py b/test/test_ImSim/test_image_model.py index 7000010db..a0d83b0e5 100644 --- a/test/test_ImSim/test_image_model.py +++ b/test/test_ImSim/test_image_model.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -19,13 +19,11 @@ class TestImageModel(object): - """ - tests the source model routines - """ - def setup_method(self): + """Tests the source model routines.""" + def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -33,103 +31,211 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg, inverse=True + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**kwargs_psf) kernel = psf_class.kernel_point_source - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel, - 'psf_error_map': np.ones_like(kernel) * 0.001 * kernel**2} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel, + "psf_error_map": np.ones_like(kernel) * 0.001 * kernel**2, + } psf_class = PSF(**kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} - - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.01, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': False} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.01, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics = { + "supersampling_factor": 2, + "supersampling_convolution": False, + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - self.imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, point_source_class, kwargs_numerics=kwargs_numerics) + self.imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) self.solver = LensEquationSolver(lensModel=self.imageModel.LensModel) def test_source_surface_brightness(self): - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, - unconvolved=False, de_lensed=True) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=True + ) assert len(source_model) == 100 - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=False) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=False + ) assert len(source_model) == 100 - npt.assert_almost_equal(source_model[10, 10], 0.13939841209844345 * 0.05 ** 2, decimal=4) + npt.assert_almost_equal( + source_model[10, 10], 0.13939841209844345 * 0.05**2, decimal=4 + ) - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, unconvolved=True, de_lensed=False) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=True, de_lensed=False + ) assert len(source_model) == 100 - npt.assert_almost_equal(source_model[10, 10], 0.13536114618182182 * 0.05**2, decimal=4) + npt.assert_almost_equal( + source_model[10, 10], 0.13536114618182182 * 0.05**2, decimal=4 + ) def test_lens_surface_brightness(self): - lens_flux = self.imageModel.lens_surface_brightness(self.kwargs_lens_light, unconvolved=False) - print(np.sum(lens_flux), 'test lens flux') + lens_flux = self.imageModel.lens_surface_brightness( + self.kwargs_lens_light, unconvolved=False + ) + print(np.sum(lens_flux), "test lens flux") npt.assert_almost_equal(lens_flux[50, 50], 0.0010788981265391802, decimal=4) - #npt.assert_almost_equal(lens_flux[50, 50], 0.54214440654021534 * 0.05 ** 2, decimal=4) + # npt.assert_almost_equal(lens_flux[50, 50], 0.54214440654021534 * 0.05 ** 2, decimal=4) - lens_flux = self.imageModel.lens_surface_brightness(self.kwargs_lens_light, unconvolved=True) - npt.assert_almost_equal(lens_flux[50, 50], 4.7310552067454452 * 0.05**2, decimal=4) + lens_flux = self.imageModel.lens_surface_brightness( + self.kwargs_lens_light, unconvolved=True + ) + npt.assert_almost_equal( + lens_flux[50, 50], 4.7310552067454452 * 0.05**2, decimal=4 + ) def test_image_linear_solve(self): - model, error_map, cov_param, param = self.imageModel.image_linear_solve(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, inv_bool=False) + model, error_map, cov_param, param = self.imageModel.image_linear_solve( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + inv_bool=False, + ) chi2_reduced = self.imageModel.reduced_chi2(model, error_map) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) def test_linear_response_matrix(self): - A = self.imageModel.linear_response_matrix(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, - self.kwargs_ps) + A = self.imageModel.linear_response_matrix( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) n, m = np.shape(A) assert n == 3 - assert m == 100*100 + assert m == 100 * 100 def test_image_with_params(self): - model = self.imageModel.image(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, unconvolved=False, source_add=True, lens_light_add=True, point_source_add=True) + model = self.imageModel.image( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + unconvolved=False, + source_add=True, + lens_light_add=True, + point_source_add=True, + ) error_map = self.imageModel._error_map_psf(self.kwargs_lens, self.kwargs_ps) chi2_reduced = self.imageModel.reduced_chi2(model, error_map) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) def test_likelihood_data_given_model(self): - logL = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, source_marg=False) + logL = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=False, + ) npt.assert_almost_equal(logL, -5000, decimal=-3) - logLmarg = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, - self.kwargs_ps, source_marg=True) + logLmarg = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=True, + ) npt.assert_almost_equal(logL - logLmarg, 0, decimal=-3) assert logLmarg < logL def test_reduced_residuals(self): - model = sim_util.simulate_simple(self.imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps, no_noise=True) + model = sim_util.simulate_simple( + self.imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + no_noise=True, + ) residuals = self.imageModel.reduced_residuals(model, error_map=0) npt.assert_almost_equal(np.std(residuals), 1.01, decimal=1) @@ -141,12 +247,15 @@ def test_numData_evaluate(self): assert numData == 10000 def test_num_param_linear(self): - num_param_linear = self.imageModel.num_param_linear(self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + num_param_linear = self.imageModel.num_param_linear( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) assert num_param_linear == 3 def test_update_data(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1, inverse=True + ) data_class = ImageData(**kwargs_data) self.imageModel.update_data(data_class) assert self.imageModel.Data.num_pixel == 100 @@ -156,84 +265,153 @@ def test_point_source_rendering(self): numPix = 100 deltaPix = 0.05 - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) kernel = np.zeros((5, 5)) kernel[2, 2] = 1 - kwargs_psf = {'kernel_point_source': kernel, 'psf_type': 'PIXEL', 'psf_error_map': np.ones_like(kernel) * 0.001} + kwargs_psf = { + "kernel_point_source": kernel, + "psf_type": "PIXEL", + "psf_error_map": np.ones_like(kernel) * 0.001, + } psf_class = PSF(**kwargs_psf) - lens_model_class = LensModel(['SPEP']) + lens_model_class = LensModel(["SPEP"]) source_model_class = LightModel([]) lens_light_model_class = LightModel([]) - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': True, 'point_source_supersampling_factor': 1} - point_source_class = PointSource(point_source_type_list=['LENSED_POSITION'], fixed_magnification_list=[False]) - makeImage = ImageModel(data_class, psf_class, lens_model_class, source_model_class, lens_light_model_class, point_source_class, kwargs_numerics=kwargs_numerics) + kwargs_numerics = { + "supersampling_factor": 2, + "supersampling_convolution": True, + "point_source_supersampling_factor": 1, + } + point_source_class = PointSource( + point_source_type_list=["LENSED_POSITION"], fixed_magnification_list=[False] + ) + makeImage = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) # chose point source positions x_pix = np.array([10, 5, 10, 90]) y_pix = np.array([40, 50, 60, 50]) ra_pos, dec_pos = makeImage.Data.map_pix2coord(x_pix, y_pix) e1, e2 = param_util.phi_q2_ellipticity(0, 0.8) - kwargs_lens_init = [{'theta_E': 1, 'gamma': 2, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - kwargs_else = [{'ra_image': ra_pos, 'dec_image': dec_pos, 'point_amp': np.ones_like(ra_pos)}] - image = makeImage.image(kwargs_lens_init, kwargs_source={}, kwargs_lens_light={}, kwargs_ps=kwargs_else) - #print(np.shape(model), 'test') - #image = makeImage.ImageNumerics.array2image(model) + kwargs_lens_init = [ + {"theta_E": 1, "gamma": 2, "e1": e1, "e2": e2, "center_x": 0, "center_y": 0} + ] + kwargs_else = [ + { + "ra_image": ra_pos, + "dec_image": dec_pos, + "point_amp": np.ones_like(ra_pos), + } + ] + image = makeImage.image( + kwargs_lens_init, + kwargs_source={}, + kwargs_lens_light={}, + kwargs_ps=kwargs_else, + ) + # print(np.shape(model), 'test') + # image = makeImage.ImageNumerics.array2image(model) for i in range(len(x_pix)): npt.assert_almost_equal(image[y_pix[i], x_pix[i]], 1, decimal=2) x_pix = np.array([10.5, 5.5, 10.5, 90.5]) y_pix = np.array([40, 50, 60, 50]) ra_pos, dec_pos = makeImage.Data.map_pix2coord(x_pix, y_pix) - phi, q = 0., 0.8 + phi, q = 0.0, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_lens_init = [{'theta_E': 1, 'gamma': 2, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - kwargs_else = [{'ra_image': ra_pos, 'dec_image': dec_pos, 'point_amp': np.ones_like(ra_pos)}] - image = makeImage.image(kwargs_lens_init, kwargs_source={}, kwargs_lens_light={}, kwargs_ps=kwargs_else) - #image = makeImage.ImageNumerics.array2image(model) + kwargs_lens_init = [ + {"theta_E": 1, "gamma": 2, "e1": e1, "e2": e2, "center_x": 0, "center_y": 0} + ] + kwargs_else = [ + { + "ra_image": ra_pos, + "dec_image": dec_pos, + "point_amp": np.ones_like(ra_pos), + } + ] + image = makeImage.image( + kwargs_lens_init, + kwargs_source={}, + kwargs_lens_light={}, + kwargs_ps=kwargs_else, + ) + # image = makeImage.ImageNumerics.array2image(model) for i in range(len(x_pix)): - print(int(y_pix[i]), int(x_pix[i]+0.5)) + print(int(y_pix[i]), int(x_pix[i] + 0.5)) npt.assert_almost_equal(image[int(y_pix[i]), int(x_pix[i])], 0.5, decimal=1) - npt.assert_almost_equal(image[int(y_pix[i]), int(x_pix[i]+0.5)], 0.5, decimal=1) + npt.assert_almost_equal( + image[int(y_pix[i]), int(x_pix[i] + 0.5)], 0.5, decimal=1 + ) def test_point_source(self): - - pointSource = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_ps = [{'source_amp': 1000, 'ra_source': 0.1, 'dec_source': 0.1}] - lensModel = LensModel(lens_model_list=['SIS']) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + pointSource = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_ps = [{"source_amp": 1000, "ra_source": 0.1, "dec_source": 0.1}] + lensModel = LensModel(lens_model_list=["SIS"]) + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] numPix = 64 deltaPix = 0.13 - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) psf_type = "GAUSSIAN" fwhm = 0.9 - kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm} + kwargs_psf = {"psf_type": psf_type, "fwhm": fwhm} psf_class = PSF(**kwargs_psf) - imageModel = ImageModel(data_class=data_class, psf_class=psf_class, lens_model_class=lensModel, - point_source_class=pointSource) + imageModel = ImageModel( + data_class=data_class, + psf_class=psf_class, + lens_model_class=lensModel, + point_source_class=pointSource, + ) image = imageModel.image(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps) assert np.sum(image) > 0 def test_error_map_source(self): - sourceModel = LightModel(light_model_list=['UNIFORM', 'UNIFORM']) + sourceModel = LightModel(light_model_list=["UNIFORM", "UNIFORM"]) - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) psf_type = "GAUSSIAN" fwhm = 0.9 - kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm} + kwargs_psf = {"psf_type": psf_type, "fwhm": fwhm} psf_class = PSF(**kwargs_psf) - imageModel = ImageLinearFit(data_class=data_class, psf_class=psf_class, lens_model_class=None, - point_source_class=None, source_model_class=sourceModel) + imageModel = ImageLinearFit( + data_class=data_class, + psf_class=psf_class, + lens_model_class=None, + point_source_class=None, + source_model_class=sourceModel, + ) x_grid, y_grid = util.make_grid(numPix=10, deltapix=1) - error_map = imageModel.error_map_source(kwargs_source=[{'amp': 1}, {'amp': 1}], x_grid=x_grid, y_grid=y_grid, cov_param=np.array([[1, 0], [0, 1]])) + error_map = imageModel.error_map_source( + kwargs_source=[{"amp": 1}, {"amp": 1}], + x_grid=x_grid, + y_grid=y_grid, + cov_param=np.array([[1, 0], [0, 1]]), + ) assert error_map[0] == 2 def test_create_empty(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) imageModel_empty = ImageModel(data_class, PSF()) assert imageModel_empty._psf_error_map == False @@ -242,35 +420,56 @@ def test_create_empty(self): assert flux.all() == 0 def test_extinction_map(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) - extinction_class = DifferentialExtinction(optical_depth_model=['UNIFORM'], tau0_index=0) + extinction_class = DifferentialExtinction( + optical_depth_model=["UNIFORM"], tau0_index=0 + ) imageModel = ImageModel(data_class, PSF(), extinction_class=extinction_class) - extinction = imageModel.extinction_map(kwargs_extinction=[{'amp': 1}], kwargs_special={'tau0_list': [1, 0, 0]}) + extinction = imageModel.extinction_map( + kwargs_extinction=[{"amp": 1}], kwargs_special={"tau0_list": [1, 0, 0]} + ) npt.assert_almost_equal(extinction, np.exp(-1)) def test_error_response(self): - - C_D_response, model_error = self.imageModel._error_response(self.kwargs_lens, self.kwargs_ps, kwargs_special=None) + C_D_response, model_error = self.imageModel._error_response( + self.kwargs_lens, self.kwargs_ps, kwargs_special=None + ) assert len(model_error) == 100 print(np.sum(model_error)) npt.assert_almost_equal(np.sum(model_error), 0.0019271126921470687, decimal=3) def test_point_source_linear_response_set(self): - kwargs_special = {'delta_x_image': [0.1, 0.1], 'delta_y_image': [-0.1, -0.1]} - ra_pos, dec_pos, amp, num_point = self.imageModel.point_source_linear_response_set(self.kwargs_ps, self.kwargs_lens, kwargs_special, with_amp=True) - ra, dec = self.imageModel.PointSource.image_position(self.kwargs_ps, self.kwargs_lens) + kwargs_special = {"delta_x_image": [0.1, 0.1], "delta_y_image": [-0.1, -0.1]} + ( + ra_pos, + dec_pos, + amp, + num_point, + ) = self.imageModel.point_source_linear_response_set( + self.kwargs_ps, self.kwargs_lens, kwargs_special, with_amp=True + ) + ra, dec = self.imageModel.PointSource.image_position( + self.kwargs_ps, self.kwargs_lens + ) npt.assert_almost_equal(ra[0][0], ra_pos[0][0] - 0.1, decimal=5) def test_displace_astrometry(self): - kwargs_special = {'delta_x_image': np.array([0.1, 0.1]), 'delta_y_image': np.array([-0.1, -0.1])} + kwargs_special = { + "delta_x_image": np.array([0.1, 0.1]), + "delta_y_image": np.array([-0.1, -0.1]), + } x_pos, y_pos = np.array([0, 0]), np.array([0, 0]) - x_shift, y_shift = self.imageModel._displace_astrometry(x_pos, y_pos, kwargs_special=kwargs_special) + x_shift, y_shift = self.imageModel._displace_astrometry( + x_pos, y_pos, kwargs_special=kwargs_special + ) assert x_pos[0] == 0 - assert x_shift[0] == kwargs_special['delta_x_image'][0] + assert x_shift[0] == kwargs_special["delta_x_image"][0] assert y_pos[0] == 0 - assert y_shift[0] == kwargs_special['delta_y_image'][0] + assert y_shift[0] == kwargs_special["delta_y_image"][0] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_image_model_pixelbased.py b/test/test_ImSim/test_image_model_pixelbased.py index 454f6f0fc..22b6fe8b0 100644 --- a/test/test_ImSim/test_image_model_pixelbased.py +++ b/test/test_ImSim/test_image_model_pixelbased.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -19,16 +19,17 @@ from lenstronomy.LightModel.Profiles.starlets import SLIT_Starlets -_force_no_pysap = True # if issues on Travis-CI to install pysap, force use python-only functions +_force_no_pysap = ( + True # if issues on Travis-CI to install pysap, force use python-only functions +) class TestImageModel(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -36,128 +37,246 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg, inverse=True + ) data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**kwargs_psf) kernel = psf_class.kernel_point_source - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel, 'psf_error_map': np.ones_like(kernel) * 0.001} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel, + "psf_error_map": np.ones_like(kernel) * 0.001, + } psf_class = PSF(**kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] kwargs_lens_light_base = [kwargs_sersic] lens_light_model_class_base = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source_base = [kwargs_sersic_ellipse] source_model_class_base = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.01, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class_base = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics_base = {'supersampling_factor': 2, 'supersampling_convolution': False} - imageModel_base = ImageModel(data_class, psf_class, lens_model_class, source_model_class_base, lens_light_model_class_base, point_source_class_base, kwargs_numerics=kwargs_numerics_base) - image_sim = sim_util.simulate_simple(imageModel_base, self.kwargs_lens, kwargs_source_base, - kwargs_lens_light_base, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.01, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class_base = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics_base = { + "supersampling_factor": 2, + "supersampling_convolution": False, + } + imageModel_base = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class_base, + lens_light_model_class_base, + point_source_class_base, + kwargs_numerics=kwargs_numerics_base, + ) + image_sim = sim_util.simulate_simple( + imageModel_base, + self.kwargs_lens, + kwargs_source_base, + kwargs_lens_light_base, + self.kwargs_ps, + ) data_class.update_data(image_sim) # create a starlet light distributions n_scales = 6 - source_map = imageModel_base.source_surface_brightness(kwargs_source_base, de_lensed=True, unconvolved=True) + source_map = imageModel_base.source_surface_brightness( + kwargs_source_base, de_lensed=True, unconvolved=True + ) starlets_class = SLIT_Starlets(force_no_pysap=_force_no_pysap) source_map_starlets = starlets_class.decomposition_2d(source_map, n_scales) - self.kwargs_source = [{'amp': source_map_starlets, 'n_scales': n_scales, 'n_pixels': numPix, 'scale': deltaPix, 'center_x': 0, 'center_y': 0}] - source_model_class = LightModel(light_model_list=['SLIT_STARLETS']) - lens_light_map = imageModel_base.lens_surface_brightness(kwargs_lens_light_base, unconvolved=True) + self.kwargs_source = [ + { + "amp": source_map_starlets, + "n_scales": n_scales, + "n_pixels": numPix, + "scale": deltaPix, + "center_x": 0, + "center_y": 0, + } + ] + source_model_class = LightModel(light_model_list=["SLIT_STARLETS"]) + lens_light_map = imageModel_base.lens_surface_brightness( + kwargs_lens_light_base, unconvolved=True + ) starlets_class = SLIT_Starlets(force_no_pysap=_force_no_pysap, second_gen=True) lens_light_starlets = starlets_class.decomposition_2d(lens_light_map, n_scales) - self.kwargs_lens_light = [{'amp': lens_light_starlets, 'n_scales': n_scales, 'n_pixels': numPix, 'scale': deltaPix, 'center_x': 0, 'center_y': 0}] - lens_light_model_class = LightModel(light_model_list=['SLIT_STARLETS_GEN2']) - - kwargs_numerics = {'supersampling_factor': 1} + self.kwargs_lens_light = [ + { + "amp": lens_light_starlets, + "n_scales": n_scales, + "n_pixels": numPix, + "scale": deltaPix, + "center_x": 0, + "center_y": 0, + } + ] + lens_light_model_class = LightModel(light_model_list=["SLIT_STARLETS_GEN2"]) + + kwargs_numerics = {"supersampling_factor": 1} kwargs_pixelbased = { - 'supersampling_factor_source': 2, # supersampling of pixelated source grid - + "supersampling_factor_source": 2, # supersampling of pixelated source grid # following choices are to minimize pixel solver runtime (not to get accurate reconstruction!) - 'threshold_decrease_type': 'none', - 'num_iter_source': 2, - 'num_iter_lens': 2, - 'num_iter_global': 2, - 'num_iter_weights': 2, + "threshold_decrease_type": "none", + "num_iter_source": 2, + "num_iter_lens": 2, + "num_iter_global": 2, + "num_iter_weights": 2, } - self.imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, - source_model_class=source_model_class, - lens_light_model_class=lens_light_model_class, - point_source_class=None, - kwargs_numerics=kwargs_numerics, kwargs_pixelbased=kwargs_pixelbased) - self.imageModel_source = ImageLinearFit(data_class, psf_class, lens_model_class, - source_model_class=source_model_class, - lens_light_model_class=None, - point_source_class=None, - kwargs_numerics=kwargs_numerics, kwargs_pixelbased=kwargs_pixelbased) - + self.imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class=source_model_class, + lens_light_model_class=lens_light_model_class, + point_source_class=None, + kwargs_numerics=kwargs_numerics, + kwargs_pixelbased=kwargs_pixelbased, + ) + self.imageModel_source = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class=source_model_class, + lens_light_model_class=None, + point_source_class=None, + kwargs_numerics=kwargs_numerics, + kwargs_pixelbased=kwargs_pixelbased, + ) + self.solver = LensEquationSolver(lensModel=self.imageModel.LensModel) def test_source_surface_brightness(self): - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, - unconvolved=False, de_lensed=True) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=True + ) assert len(source_model) == 100 - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=False) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=False, de_lensed=False + ) assert len(source_model) == 100 - npt.assert_almost_equal(source_model[10, 10], 0.13939841209844345 * 0.05**2, decimal=4) + npt.assert_almost_equal( + source_model[10, 10], 0.13939841209844345 * 0.05**2, decimal=4 + ) - source_model = self.imageModel.source_surface_brightness(self.kwargs_source, self.kwargs_lens, unconvolved=True, de_lensed=False) + source_model = self.imageModel.source_surface_brightness( + self.kwargs_source, self.kwargs_lens, unconvolved=True, de_lensed=False + ) assert len(source_model) == 100 - npt.assert_almost_equal(source_model[10, 10], 0.13536114618182182 * 0.05**2, decimal=4) + npt.assert_almost_equal( + source_model[10, 10], 0.13536114618182182 * 0.05**2, decimal=4 + ) def test_lens_surface_brightness(self): - lens_flux = self.imageModel.lens_surface_brightness(self.kwargs_lens_light, unconvolved=False) + lens_flux = self.imageModel.lens_surface_brightness( + self.kwargs_lens_light, unconvolved=False + ) npt.assert_almost_equal(lens_flux[50, 50], 0.011827638016863616, decimal=4) # the following should raise an error: no deconvolution is performed when pixel-based modelling lens light # lens_flux = self.imageModel.lens_surface_brightness(self.kwargs_lens_light, unconvolved=True) def test_image_linear_solve(self): - model, error_map, cov_param, param = self.imageModel.image_linear_solve(self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + model, error_map, cov_param, param = self.imageModel.image_linear_solve( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) chi2_reduced = self.imageModel.reduced_chi2(model, error_map) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) - model, error_map, cov_param, param = self.imageModel_source.image_linear_solve(self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + model, error_map, cov_param, param = self.imageModel_source.image_linear_solve( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) chi2_reduced = self.imageModel.reduced_chi2(model, error_map) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) def test_image_with_params(self): - model = self.imageModel.image(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, unconvolved=False, source_add=True, lens_light_add=True, point_source_add=True) + model = self.imageModel.image( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + unconvolved=False, + source_add=True, + lens_light_add=True, + point_source_add=True, + ) error_map = self.imageModel._error_map_psf(self.kwargs_lens, self.kwargs_ps) chi2_reduced = self.imageModel.reduced_chi2(model, error_map) npt.assert_almost_equal(chi2_reduced, 1, decimal=1) def test_likelihood_data_given_model(self): - logL = self.imageModel.likelihood_data_given_model(self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps, source_marg=False) + logL = self.imageModel.likelihood_data_given_model( + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + source_marg=False, + ) npt.assert_almost_equal(logL, -5000, decimal=-3) def test_reduced_residuals(self): - model = sim_util.simulate_simple(self.imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps, no_noise=True) + model = sim_util.simulate_simple( + self.imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + no_noise=True, + ) residuals = self.imageModel.reduced_residuals(model, error_map=0) npt.assert_almost_equal(np.std(residuals), 1.01, decimal=1) @@ -169,22 +288,32 @@ def test_numData_evaluate(self): assert numData == 10000 def test_num_param_linear(self): - num_param_linear = self.imageModel.num_param_linear(self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) - assert num_param_linear == 0 # pixels of pixel-based profiles not counted as linear param - - num_param_linear = self.imageModel_source.num_param_linear(self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) - assert num_param_linear == 0 # pixels of pixel-based profiles not counted as linear param + num_param_linear = self.imageModel.num_param_linear( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) + assert ( + num_param_linear == 0 + ) # pixels of pixel-based profiles not counted as linear param + + num_param_linear = self.imageModel_source.num_param_linear( + self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps + ) + assert ( + num_param_linear == 0 + ) # pixels of pixel-based profiles not counted as linear param def test_update_data(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1, inverse=True + ) data_class = ImageData(**kwargs_data) self.imageModel.update_data(data_class) assert self.imageModel.Data.num_pixel == 100 def test_create_empty(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) imageModel_empty = ImageModel(data_class, PSF()) assert imageModel_empty._psf_error_map == False @@ -193,31 +322,40 @@ def test_create_empty(self): assert flux.all() == 0 def test_extinction_map(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, exposure_time=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, exposure_time=1, background_rms=1 + ) data_class = ImageData(**kwargs_data) - extinction_class = DifferentialExtinction(optical_depth_model=['UNIFORM'], tau0_index=0) + extinction_class = DifferentialExtinction( + optical_depth_model=["UNIFORM"], tau0_index=0 + ) imageModel = ImageModel(data_class, PSF(), extinction_class=extinction_class) - extinction = imageModel.extinction_map(kwargs_extinction=[{'amp': 1}], kwargs_special={'tau0_list': [1, 0, 0]}) + extinction = imageModel.extinction_map( + kwargs_extinction=[{"amp": 1}], kwargs_special={"tau0_list": [1, 0, 0]} + ) npt.assert_almost_equal(extinction, np.exp(-1)) def test_error_response(self): - C_D_response, psf_model_error = self.imageModel._error_response(self.kwargs_lens, self.kwargs_ps, kwargs_special=None) + C_D_response, psf_model_error = self.imageModel._error_response( + self.kwargs_lens, self.kwargs_ps, kwargs_special=None + ) assert len(psf_model_error) == 100 print(np.sum(psf_model_error)) npt.assert_almost_equal(np.sum(psf_model_error), 0, decimal=3) - C_D_response, psf_model_error = self.imageModel_source._error_response(self.kwargs_lens, self.kwargs_ps, kwargs_special=None) + C_D_response, psf_model_error = self.imageModel_source._error_response( + self.kwargs_lens, self.kwargs_ps, kwargs_special=None + ) assert len(psf_model_error) == 100 print(np.sum(psf_model_error)) npt.assert_almost_equal(np.sum(psf_model_error), 0, decimal=3) class TestRaise(unittest.TestCase): - def __init__(self, *args, **kwargs): super(TestRaise, self).__init__(*args, **kwargs) # data specifics - sigma_bkg = .05 # background noise per pixel + sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -225,111 +363,220 @@ def __init__(self, *args, **kwargs): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg, inverse=True) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg, inverse=True + ) self.data_class = ImageData(**kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_class = PSF(**kwargs_psf) kernel = psf_class.kernel_point_source - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel, 'psf_error_map': np.ones_like(kernel) * 0.001} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel, + "psf_error_map": np.ones_like(kernel) * 0.001, + } self.psf_class = PSF(**kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] self.lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] kwargs_lens_light_base = [kwargs_sersic] lens_light_model_class_base = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source_base = [kwargs_sersic_ellipse] source_model_class_base = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.01, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class_base = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - kwargs_numerics_base = {'supersampling_factor': 2, 'supersampling_convolution': False} - imageModel_base = ImageModel(self.data_class, self.psf_class, self.lens_model_class, source_model_class_base, lens_light_model_class_base, point_source_class_base, kwargs_numerics=kwargs_numerics_base) - image_sim = sim_util.simulate_simple(imageModel_base, self.kwargs_lens, kwargs_source_base, - kwargs_lens_light_base, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.01, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class_base = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + kwargs_numerics_base = { + "supersampling_factor": 2, + "supersampling_convolution": False, + } + imageModel_base = ImageModel( + self.data_class, + self.psf_class, + self.lens_model_class, + source_model_class_base, + lens_light_model_class_base, + point_source_class_base, + kwargs_numerics=kwargs_numerics_base, + ) + image_sim = sim_util.simulate_simple( + imageModel_base, + self.kwargs_lens, + kwargs_source_base, + kwargs_lens_light_base, + self.kwargs_ps, + ) self.data_class.update_data(image_sim) # create a starlet light distributions n_scales = 6 - source_map = imageModel_base.source_surface_brightness(kwargs_source_base, de_lensed=True, unconvolved=True) + source_map = imageModel_base.source_surface_brightness( + kwargs_source_base, de_lensed=True, unconvolved=True + ) starlets_class = SLIT_Starlets(force_no_pysap=_force_no_pysap) source_map_starlets = starlets_class.decomposition_2d(source_map, n_scales) - self.kwargs_source = [{'amp': source_map_starlets, 'n_scales': n_scales, 'n_pixels': numPix, 'scale': deltaPix, 'center_x': 0, 'center_y': 0}] - self.source_model_class = LightModel(light_model_list=['SLIT_STARLETS']) - lens_light_map = imageModel_base.lens_surface_brightness(kwargs_lens_light_base, unconvolved=True) + self.kwargs_source = [ + { + "amp": source_map_starlets, + "n_scales": n_scales, + "n_pixels": numPix, + "scale": deltaPix, + "center_x": 0, + "center_y": 0, + } + ] + self.source_model_class = LightModel(light_model_list=["SLIT_STARLETS"]) + lens_light_map = imageModel_base.lens_surface_brightness( + kwargs_lens_light_base, unconvolved=True + ) starlets_class = SLIT_Starlets(force_no_pysap=_force_no_pysap, second_gen=True) lens_light_starlets = starlets_class.decomposition_2d(lens_light_map, n_scales) - self.kwargs_lens_light = [{'amp': lens_light_starlets, 'n_scales': n_scales, 'n_pixels': numPix, 'scale': deltaPix, 'center_x': 0, 'center_y': 0}] - self.lens_light_model_class = LightModel(light_model_list=['SLIT_STARLETS_GEN2']) - - self.kwargs_numerics = {'supersampling_factor': 1} + self.kwargs_lens_light = [ + { + "amp": lens_light_starlets, + "n_scales": n_scales, + "n_pixels": numPix, + "scale": deltaPix, + "center_x": 0, + "center_y": 0, + } + ] + self.lens_light_model_class = LightModel( + light_model_list=["SLIT_STARLETS_GEN2"] + ) + + self.kwargs_numerics = {"supersampling_factor": 1} self.kwargs_pixelbased = { - 'supersampling_factor_source': 2, # supersampling of pixelated source grid - + "supersampling_factor_source": 2, # supersampling of pixelated source grid # following choices are to minimize pixel solver runtime (not to get accurate reconstruction!) - 'threshold_decrease_type': 'none', - 'num_iter_source': 2, - 'num_iter_lens': 2, - 'num_iter_global': 2, - 'num_iter_weights': 2, + "threshold_decrease_type": "none", + "num_iter_source": 2, + "num_iter_lens": 2, + "num_iter_global": 2, + "num_iter_weights": 2, } def test_raise(self): with self.assertRaises(ValueError): # test various numerics that are not supported by the pixelbased solver - kwargs_numerics = {'supersampling_factor': 2, 'supersampling_convolution': True} - imageModel = ImageLinearFit(self.data_class, self.psf_class, self.lens_model_class, - source_model_class=self.source_model_class, - lens_light_model_class=self.lens_light_model_class, - kwargs_numerics=kwargs_numerics, kwargs_pixelbased=self.kwargs_pixelbased) + kwargs_numerics = { + "supersampling_factor": 2, + "supersampling_convolution": True, + } + imageModel = ImageLinearFit( + self.data_class, + self.psf_class, + self.lens_model_class, + source_model_class=self.source_model_class, + lens_light_model_class=self.lens_light_model_class, + kwargs_numerics=kwargs_numerics, + kwargs_pixelbased=self.kwargs_pixelbased, + ) with self.assertRaises(ValueError): # test various numerics that are not supported by the pixelbased solver - kwargs_numerics = {'compute_mode': 'adaptive'} - imageModel = ImageLinearFit(self.data_class, self.psf_class, self.lens_model_class, - source_model_class=self.source_model_class, - lens_light_model_class=self.lens_light_model_class, - kwargs_numerics=kwargs_numerics, kwargs_pixelbased=self.kwargs_pixelbased) + kwargs_numerics = {"compute_mode": "adaptive"} + imageModel = ImageLinearFit( + self.data_class, + self.psf_class, + self.lens_model_class, + source_model_class=self.source_model_class, + lens_light_model_class=self.lens_light_model_class, + kwargs_numerics=kwargs_numerics, + kwargs_pixelbased=self.kwargs_pixelbased, + ) with self.assertRaises(ValueError): # test unsupported gaussian PSF type - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': 0.5, 'truncation': 5, 'pixel_size': 0.05} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": 0.5, + "truncation": 5, + "pixel_size": 0.05, + } psf_class = PSF(**kwargs_psf) - imageModel = ImageLinearFit(self.data_class, psf_class, self.lens_model_class, - source_model_class=self.source_model_class, - lens_light_model_class=self.lens_light_model_class, - kwargs_numerics=self.kwargs_numerics, kwargs_pixelbased=self.kwargs_pixelbased) + imageModel = ImageLinearFit( + self.data_class, + psf_class, + self.lens_model_class, + source_model_class=self.source_model_class, + lens_light_model_class=self.lens_light_model_class, + kwargs_numerics=self.kwargs_numerics, + kwargs_pixelbased=self.kwargs_pixelbased, + ) with self.assertRaises(ValueError): - kwargs_numerics = {'supersampling_factor': 1} + kwargs_numerics = {"supersampling_factor": 1} # test more than a single pixel-based light profile - source_model_class = LightModel(['SLIT_STARLETS', 'SLIT_STARLETS']) - imageModel = ImageLinearFit(self.data_class, self.psf_class, self.lens_model_class, - source_model_class=source_model_class, - lens_light_model_class=self.lens_light_model_class, - kwargs_numerics=self.kwargs_numerics, kwargs_pixelbased=self.kwargs_pixelbased) + source_model_class = LightModel(["SLIT_STARLETS", "SLIT_STARLETS"]) + imageModel = ImageLinearFit( + self.data_class, + self.psf_class, + self.lens_model_class, + source_model_class=source_model_class, + lens_light_model_class=self.lens_light_model_class, + kwargs_numerics=self.kwargs_numerics, + kwargs_pixelbased=self.kwargs_pixelbased, + ) with self.assertRaises(ValueError): # test access to unconvolved lens light surface brightness - imageModel = ImageLinearFit(self.data_class, self.psf_class, self.lens_model_class, - source_model_class=self.source_model_class, - lens_light_model_class=self.lens_light_model_class, - kwargs_numerics=self.kwargs_numerics, kwargs_pixelbased=self.kwargs_pixelbased) + imageModel = ImageLinearFit( + self.data_class, + self.psf_class, + self.lens_model_class, + source_model_class=self.source_model_class, + lens_light_model_class=self.lens_light_model_class, + kwargs_numerics=self.kwargs_numerics, + kwargs_pixelbased=self.kwargs_pixelbased, + ) imageModel.lens_surface_brightness(self.kwargs_lens_light, unconvolved=True) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_ImSim/test_image_model_with_interferometric_changes.py b/test/test_ImSim/test_image_model_with_interferometric_changes.py index b41f387f7..db3291a0b 100644 --- a/test/test_ImSim/test_image_model_with_interferometric_changes.py +++ b/test/test_ImSim/test_image_model_with_interferometric_changes.py @@ -16,76 +16,125 @@ and compare the (image_with_pb_and_psf) with scipy.signal.fftconvolve(image_without_pb_psf * pb, PSF, mode='same'). """ -def test_ImageModel_with_primary_beam_and_interferometry_psf(): - sigma_bkg = .05 +def test_ImageModel_with_primary_beam_and_interferometry_psf(): + sigma_bkg = 0.05 exp_time = np.inf - numPix = 100 + numPix = 100 deltaPix = 0.05 # simulate a primary beam (pb) - primary_beam = np.zeros((numPix,numPix)) + primary_beam = np.zeros((numPix, numPix)) for i in range(numPix): for j in range(numPix): - primary_beam[i,j] = np.exp(-1e-4*((i-78)**2+(j-56)**2)) + primary_beam[i, j] = np.exp(-1e-4 * ((i - 78) ** 2 + (j - 56) ** 2)) primary_beam /= np.max(primary_beam) # simulate a spherical sinc function as psf, which contains negative pixels - psf_test = np.zeros((221,221)) + psf_test = np.zeros((221, 221)) for i in range(221): for j in range(221): if i > j: - psf_test[i,j] = psf_test[j,i] - r = np.sqrt((i-110)**2 + (j-110)**2) + psf_test[i, j] = psf_test[j, i] + r = np.sqrt((i - 110) ** 2 + (j - 110) ** 2) if r == 0: - psf_test[i,j] = 1 + psf_test[i, j] = 1 else: - psf_test[i,j] = np.sin(r*0.5)/(r*0.5) - + psf_test[i, j] = np.sin(r * 0.5) / (r * 0.5) + # define two data classes - kwargs_data_no_pb = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data_no_pb = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class_no_pb = ImageData(**kwargs_data_no_pb) - kwargs_data_with_pb = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) - kwargs_data_with_pb['antenna_primary_beam'] = primary_beam + kwargs_data_with_pb = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) + kwargs_data_with_pb["antenna_primary_beam"] = primary_beam data_class_with_pb = ImageData(**kwargs_data_with_pb) # define two psf classes - kwargs_psf_none = {'psf_type': 'NONE'} + kwargs_psf_none = {"psf_type": "NONE"} psf_class_none = PSF(**kwargs_psf_none) - kernel_cut = kernel_util.cut_psf(psf_test, 201, normalisation = False) - kwargs_psf = {'psf_type': 'PIXEL', 'pixel_size': deltaPix, 'kernel_point_source': kernel_cut,'kernel_point_source_normalisation': False } + kernel_cut = kernel_util.cut_psf(psf_test, 201, normalisation=False) + kwargs_psf = { + "psf_type": "PIXEL", + "pixel_size": deltaPix, + "kernel_point_source": kernel_cut, + "kernel_point_source_normalisation": False, + } psf_class = PSF(**kwargs_psf) # define lens model and source model - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.04} - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_shear = {"gamma1": 0.01, "gamma2": 0.01} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.04, + } + lens_model_list = ["SPEP", "SHEAR"] kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 30., 'R_sersic': 0.3, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} - lens_light_model_list = ['SERSIC'] + kwargs_sersic = { + "amp": 30.0, + "R_sersic": 0.3, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } + lens_light_model_list = ["SERSIC"] kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': 0.05, 'e2': 0.02} - source_model_list = ['SERSIC_ELLIPSE'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": 0.05, + "e2": 0.02, + } + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} + kwargs_numerics = {"supersampling_factor": 1, "supersampling_convolution": False} # make images using 1) data and psf classes without pb and or psf - imageModel_no_pb_psf = ImageModel(data_class_no_pb, psf_class_none, lens_model_class, source_model_class, lens_light_model_class, kwargs_numerics=kwargs_numerics) - image_sim_no_pb_psf = imageModel_no_pb_psf.image(kwargs_lens, kwargs_source,kwargs_lens_light) + imageModel_no_pb_psf = ImageModel( + data_class_no_pb, + psf_class_none, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim_no_pb_psf = imageModel_no_pb_psf.image( + kwargs_lens, kwargs_source, kwargs_lens_light + ) # make images using 2) data and psf classes with defined pb and or psf - imageModel_with_pb_psf = ImageModel(data_class_with_pb, psf_class, lens_model_class, source_model_class, lens_light_model_class, kwargs_numerics=kwargs_numerics) - image_sim_with_pb_psf = imageModel_with_pb_psf.image(kwargs_lens, kwargs_source,kwargs_lens_light) + imageModel_with_pb_psf = ImageModel( + data_class_with_pb, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim_with_pb_psf = imageModel_with_pb_psf.image( + kwargs_lens, kwargs_source, kwargs_lens_light + ) # add pb and psf to 1) out of the imageModel, compare them to check if the pb and psf changes make sense - image_sim_with_pb_psf2 = scipy.signal.fftconvolve(image_sim_no_pb_psf * primary_beam,kernel_cut,mode='same') - npt.assert_almost_equal(image_sim_with_pb_psf, image_sim_with_pb_psf2, decimal=8) \ No newline at end of file + image_sim_with_pb_psf2 = scipy.signal.fftconvolve( + image_sim_no_pb_psf * primary_beam, kernel_cut, mode="same" + ) + npt.assert_almost_equal(image_sim_with_pb_psf, image_sim_with_pb_psf2, decimal=8) diff --git a/test/test_LensModel/test_LightConeSim/test_light_cone.py b/test/test_LensModel/test_LightConeSim/test_light_cone.py index a237e97cd..0b901db3a 100644 --- a/test/test_LensModel/test_LightConeSim/test_light_cone.py +++ b/test/test_LensModel/test_LightConeSim/test_light_cone.py @@ -9,7 +9,6 @@ class TestLightCone(object): - def setup_method(self): # define a cosmology cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) @@ -18,66 +17,131 @@ def setup_method(self): z_source = 2 # source redshift self._z_source = z_source # analytic profile class in multi plane - self._lensmodel = LensModel(lens_model_list=['NFW', 'NFW', 'NFW'], lens_redshift_list=redshift_list, - multi_plane=True, z_source_convention=z_source, cosmo=cosmo, z_source=z_source) + self._lensmodel = LensModel( + lens_model_list=["NFW", "NFW", "NFW"], + lens_redshift_list=redshift_list, + multi_plane=True, + z_source_convention=z_source, + cosmo=cosmo, + z_source=z_source, + ) # a single plane class from which the convergence/mass maps are computeded - single_plane = LensModel(lens_model_list=['NFW'], multi_plane=False) + single_plane = LensModel(lens_model_list=["NFW"], multi_plane=False) # multi-plane class with three interpolation grids - self._lens_model_interp = LensModel(lens_model_list=['INTERPOL', 'INTERPOL', 'INTERPOL'], lens_redshift_list=redshift_list, - multi_plane=True, z_source_convention=z_source, cosmo=cosmo, z_source=z_source) + self._lens_model_interp = LensModel( + lens_model_list=["INTERPOL", "INTERPOL", "INTERPOL"], + lens_redshift_list=redshift_list, + multi_plane=True, + z_source_convention=z_source, + cosmo=cosmo, + z_source=z_source, + ) # deflector parameterisation in units of reduced deflection angles to the source convention redshift logM_200_list = [8, 9, 10] # log 10 halo masses of the three deflectors c_list = [20, 10, 8] # concentrations of the three halos kwargs_lens = [] kwargs_lens_interp = [] grid_spacing = 0.01 # spacing of the convergence grid in units arc seconds - x_grid, y_grid = util.make_grid(numPix=500, deltapix=grid_spacing) # we create the grid coordinates centered at zero - x_axes, y_axes = util.get_axes(x_grid, y_grid) # we need the axes only for the interpolation + x_grid, y_grid = util.make_grid( + numPix=500, deltapix=grid_spacing + ) # we create the grid coordinates centered at zero + x_axes, y_axes = util.get_axes( + x_grid, y_grid + ) # we need the axes only for the interpolation mass_map_list = [] grid_spacing_list_mpc = [] for i, z in enumerate(redshift_list): # loop through the three deflectors - lens_cosmo = LensCosmo(z_lens=z, z_source=z_source, cosmo=cosmo) # instance of LensCosmo, a class that manages cosmology relevant quantities of a lens - alpha_Rs, Rs = lens_cosmo.nfw_physical2angle(M=10**(logM_200_list[i]), c=c_list[i]) # we turn the halo mass and concentration in reduced deflection angles and angles on the sky - kwargs_nfw = {'Rs': Rs, 'alpha_Rs': alpha_Rs, 'center_x': 0, 'center_y': 0} # lensing parameters of the NFW profile in lenstronomy conventions + lens_cosmo = LensCosmo( + z_lens=z, z_source=z_source, cosmo=cosmo + ) # instance of LensCosmo, a class that manages cosmology relevant quantities of a lens + alpha_Rs, Rs = lens_cosmo.nfw_physical2angle( + M=10 ** (logM_200_list[i]), c=c_list[i] + ) # we turn the halo mass and concentration in reduced deflection angles and angles on the sky + kwargs_nfw = { + "Rs": Rs, + "alpha_Rs": alpha_Rs, + "center_x": 0, + "center_y": 0, + } # lensing parameters of the NFW profile in lenstronomy conventions kwargs_lens.append(kwargs_nfw) - kappa_map = single_plane.kappa(x_grid, y_grid, [kwargs_nfw]) # convergence map of a single NFW profile + kappa_map = single_plane.kappa( + x_grid, y_grid, [kwargs_nfw] + ) # convergence map of a single NFW profile kappa_map = util.array2image(kappa_map) - mass_map = lens_cosmo.sigma_crit_angle * kappa_map * grid_spacing ** 2 # projected mass per pixel on the gird + mass_map = ( + lens_cosmo.sigma_crit_angle * kappa_map * grid_spacing**2 + ) # projected mass per pixel on the gird mass_map_list.append(mass_map) - npt.assert_almost_equal(np.log10(np.sum(mass_map)), logM_200_list[i], decimal=0) # check whether the sum of mass roughtly correspoonds the mass definition - grid_spacing_mpc = lens_cosmo.arcsec2phys_lens(grid_spacing) # turn grid spacing from arcseconds into Mpc + npt.assert_almost_equal( + np.log10(np.sum(mass_map)), logM_200_list[i], decimal=0 + ) # check whether the sum of mass roughtly correspoonds the mass definition + grid_spacing_mpc = lens_cosmo.arcsec2phys_lens( + grid_spacing + ) # turn grid spacing from arcseconds into Mpc grid_spacing_list_mpc.append(grid_spacing_mpc) - f_x, f_y = convergence_integrals.deflection_from_kappa_grid(kappa_map, grid_spacing) # perform the deflection calculation from the convergence map - f_ = convergence_integrals.potential_from_kappa_grid(kappa_map, grid_spacing) # perform the lensing potential calculation from the convergence map (attention: arbitrary normalization) - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': f_, 'f_x': f_x, 'f_y': f_y} # keyword arguments of the interpolation model + f_x, f_y = convergence_integrals.deflection_from_kappa_grid( + kappa_map, grid_spacing + ) # perform the deflection calculation from the convergence map + f_ = convergence_integrals.potential_from_kappa_grid( + kappa_map, grid_spacing + ) # perform the lensing potential calculation from the convergence map (attention: arbitrary normalization) + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": f_, + "f_x": f_x, + "f_y": f_y, + } # keyword arguments of the interpolation model kwargs_lens_interp.append(kwargs_interp) self.kwargs_lens = kwargs_lens self.kwargs_lens_interp = kwargs_lens_interp - self.lightCone = LightCone(mass_map_list, grid_spacing_list_mpc, redshift_list) # here we make the instance of the LightCone class based on the mass map, physical grid spacing and redshifts. + self.lightCone = LightCone( + mass_map_list, grid_spacing_list_mpc, redshift_list + ) # here we make the instance of the LightCone class based on the mass map, physical grid spacing and redshifts. def test_ray_shooting(self): - beta_x, beta_y = self._lensmodel.ray_shooting(2., 1., self.kwargs_lens) - beta_x_num, beta_y_num = self._lens_model_interp.ray_shooting(2., 1., self.kwargs_lens_interp) + beta_x, beta_y = self._lensmodel.ray_shooting(2.0, 1.0, self.kwargs_lens) + beta_x_num, beta_y_num = self._lens_model_interp.ray_shooting( + 2.0, 1.0, self.kwargs_lens_interp + ) npt.assert_almost_equal(beta_x_num, beta_x, decimal=1) npt.assert_almost_equal(beta_y_num, beta_y, decimal=1) - lens_model, kwargs_lens = self.lightCone.cone_instance(z_source=self._z_source, cosmo=self._cosmo, multi_plane=True) + lens_model, kwargs_lens = self.lightCone.cone_instance( + z_source=self._z_source, cosmo=self._cosmo, multi_plane=True + ) assert len(lens_model.lens_model_list) == 3 - beta_x_cone, beta_y_cone = lens_model.ray_shooting(2., 1., kwargs_lens) - npt.assert_almost_equal(kwargs_lens[0]['grid_interp_x'], self.kwargs_lens_interp[0]['grid_interp_x'], decimal=5) - npt.assert_almost_equal(kwargs_lens[0]['grid_interp_y'], self.kwargs_lens_interp[0]['grid_interp_y'], decimal=5) + beta_x_cone, beta_y_cone = lens_model.ray_shooting(2.0, 1.0, kwargs_lens) + npt.assert_almost_equal( + kwargs_lens[0]["grid_interp_x"], + self.kwargs_lens_interp[0]["grid_interp_x"], + decimal=5, + ) + npt.assert_almost_equal( + kwargs_lens[0]["grid_interp_y"], + self.kwargs_lens_interp[0]["grid_interp_y"], + decimal=5, + ) - npt.assert_almost_equal(kwargs_lens[0]['f_x'], self.kwargs_lens_interp[0]['f_x'], decimal=5) - npt.assert_almost_equal(kwargs_lens[0]['f_y'], self.kwargs_lens_interp[0]['f_y'], decimal=5) + npt.assert_almost_equal( + kwargs_lens[0]["f_x"], self.kwargs_lens_interp[0]["f_x"], decimal=5 + ) + npt.assert_almost_equal( + kwargs_lens[0]["f_y"], self.kwargs_lens_interp[0]["f_y"], decimal=5 + ) npt.assert_almost_equal(beta_x_cone, beta_x_num, decimal=5) npt.assert_almost_equal(beta_y_cone, beta_y_num, decimal=5) def test_deflection(self): alpha_x, alpha_y = self._lensmodel.alpha(2, 1, self.kwargs_lens) - alpha_x_num, alpha_y_num = self._lens_model_interp.alpha(2, 1, self.kwargs_lens_interp) + alpha_x_num, alpha_y_num = self._lens_model_interp.alpha( + 2, 1, self.kwargs_lens_interp + ) npt.assert_almost_equal(alpha_x_num, alpha_x, decimal=3) npt.assert_almost_equal(alpha_y_num, alpha_y, decimal=3) - lens_model, kwargs_lens = self.lightCone.cone_instance(z_source=self._z_source, cosmo=self._cosmo, multi_plane=True) + lens_model, kwargs_lens = self.lightCone.cone_instance( + z_source=self._z_source, cosmo=self._cosmo, multi_plane=True + ) alpha_x_cone, alpha_y_cone = lens_model.alpha(2, 1, kwargs_lens) npt.assert_almost_equal(alpha_x_cone, alpha_x, decimal=3) npt.assert_almost_equal(alpha_y_cone, alpha_y, decimal=3) @@ -88,6 +152,8 @@ def test_arrival_time(self): f_ = self._lensmodel.arrival_time(x, y, self.kwargs_lens) f_num = self._lens_model_interp.arrival_time(x, y, self.kwargs_lens_interp) npt.assert_almost_equal(f_num[0] - f_num[1], f_[0] - f_[1], decimal=1) - lens_model, kwargs_lens = self.lightCone.cone_instance(z_source=self._z_source, cosmo=self._cosmo, multi_plane=True) + lens_model, kwargs_lens = self.lightCone.cone_instance( + z_source=self._z_source, cosmo=self._cosmo, multi_plane=True + ) f_cone = lens_model.arrival_time(x, y, kwargs_lens) npt.assert_almost_equal(f_cone[0] - f_cone[1], f_[0] - f_[1], decimal=1) diff --git a/test/test_LensModel/test_LineOfSight/test_single_plane_los.py b/test/test_LensModel/test_LineOfSight/test_single_plane_los.py index 773b112eb..445ae3593 100644 --- a/test/test_LensModel/test_LineOfSight/test_single_plane_los.py +++ b/test/test_LensModel/test_LineOfSight/test_single_plane_los.py @@ -1,4 +1,4 @@ -__author__ = 'nataliehogg' +__author__ = "nataliehogg" import numpy as np import numpy.testing as npt @@ -13,83 +13,130 @@ from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver from astropy.cosmology import default_cosmology + cosmo = default_cosmology.get() try: import fastell4py + bool_test = True except: bool_test = False class TestSinglePlaneLOS(object): - """ - tests the SinglePlaneLOS routines + """Tests the SinglePlaneLOS routines. - these functions are the same as in TestLensModel but with the addition of LOS and LOS_MINIMAL as profiles. - with all params in self.kwargs_los set to zero, the results should be the same as the non-LOS cases originally tested - the test_los_vs_multiplane checks that a multiplane setup with three shear planes returns the same as the LOS and LOS MINIMAL models + these functions are the same as in TestLensModel but with the addition of LOS and + LOS_MINIMAL as profiles. with all params in self.kwargs_los set to zero, the results + should be the same as the non-LOS cases originally tested the test_los_vs_multiplane + checks that a multiplane setup with three shear planes returns the same as the LOS + and LOS MINIMAL models """ def setup_method(self): - self.lensModel = SinglePlane(['GAUSSIAN']) - self.lensModel_los = SinglePlaneLOS(['GAUSSIAN', 'LOS'], index_los = 1) - self.lensModel_minimal = SinglePlaneLOS(['GAUSSIAN', 'LOS_MINIMAL'], index_los = 1) - self.kwargs = {'amp': 1., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.} - self.los_kwargs = {'gamma1_os': 0.0, 'gamma2_os': 0.0, 'kappa_os': 0.0, 'omega_os': 0.0, - 'gamma1_od': 0.0, 'gamma2_od': 0.0, 'kappa_od': 0.0, 'omega_od': 0.0, - 'gamma1_ds': 0.0, 'gamma2_ds': 0.0, 'kappa_ds': 0.0, 'omega_ds': 0.0, - 'gamma1_los': 0.0, 'gamma2_los': 0.0, 'kappa_los': 0.0, 'omega_los': 0.0} + self.lensModel = SinglePlane(["GAUSSIAN"]) + self.lensModel_los = SinglePlaneLOS(["GAUSSIAN", "LOS"], index_los=1) + self.lensModel_minimal = SinglePlaneLOS( + ["GAUSSIAN", "LOS_MINIMAL"], index_los=1 + ) + self.kwargs = { + "amp": 1.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 0.0, + "center_y": 0.0, + } + self.los_kwargs = { + "gamma1_os": 0.0, + "gamma2_os": 0.0, + "kappa_os": 0.0, + "omega_os": 0.0, + "gamma1_od": 0.0, + "gamma2_od": 0.0, + "kappa_od": 0.0, + "omega_od": 0.0, + "gamma1_ds": 0.0, + "gamma2_ds": 0.0, + "kappa_ds": 0.0, + "omega_ds": 0.0, + "gamma1_los": 0.0, + "gamma2_los": 0.0, + "kappa_los": 0.0, + "omega_los": 0.0, + } def test_potential(self): - output = self.lensModel.potential(x=1., y=1., kwargs = [self.kwargs]) - output_los = self.lensModel_los.potential(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) - output_minimal = self.lensModel_minimal.potential(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) + output = self.lensModel.potential(x=1.0, y=1.0, kwargs=[self.kwargs]) + output_los = self.lensModel_los.potential( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) + output_minimal = self.lensModel_minimal.potential( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) npt.assert_almost_equal(output_los, output, decimal=8) npt.assert_almost_equal(output_minimal, output, decimal=8) def test_alpha(self): - output1, output2 = self.lensModel.alpha(x=1., y=1., kwargs = [self.kwargs]) - output1_los, output2_los = self.lensModel_los.alpha(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) - output1_minimal, output2_minimal = self.lensModel_minimal.alpha(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) + output1, output2 = self.lensModel.alpha(x=1.0, y=1.0, kwargs=[self.kwargs]) + output1_los, output2_los = self.lensModel_los.alpha( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) + output1_minimal, output2_minimal = self.lensModel_minimal.alpha( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) npt.assert_almost_equal(output1_los, output1, decimal=8) npt.assert_almost_equal(output2_los, output2, decimal=8) npt.assert_almost_equal(output1_minimal, output1, decimal=8) npt.assert_almost_equal(output2_minimal, output2, decimal=8) def test_ray_shooting(self): - delta_x, delta_y = self.lensModel.ray_shooting(x=1., y=1., kwargs=[self.kwargs]) - delta_x_los, delta_y_los = self.lensModel_los.ray_shooting(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) - delta_x_minimal, delta_y_minimal = self.lensModel_minimal.ray_shooting(x=1., y=1., kwargs=[self.kwargs, self.los_kwargs]) + delta_x, delta_y = self.lensModel.ray_shooting( + x=1.0, y=1.0, kwargs=[self.kwargs] + ) + delta_x_los, delta_y_los = self.lensModel_los.ray_shooting( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) + delta_x_minimal, delta_y_minimal = self.lensModel_minimal.ray_shooting( + x=1.0, y=1.0, kwargs=[self.kwargs, self.los_kwargs] + ) npt.assert_almost_equal(delta_x_los, delta_x, decimal=8) npt.assert_almost_equal(delta_y_los, delta_y, decimal=8) npt.assert_almost_equal(delta_x_minimal, delta_x, decimal=8) npt.assert_almost_equal(delta_y_minimal, delta_y, decimal=8) def test_mass_2d(self): - mass_kwargs = {'amp': 1., 'sigma': 2., 'center_x': 0., 'center_y': 0.} + mass_kwargs = {"amp": 1.0, "sigma": 2.0, "center_x": 0.0, "center_y": 0.0} - lensModel = SinglePlane(['GAUSSIAN_KAPPA']) - lensModel_los = SinglePlaneLOS(['GAUSSIAN_KAPPA', 'LOS'], index_los = 1) - lensModel_minimal = SinglePlaneLOS(['GAUSSIAN_KAPPA', 'LOS_MINIMAL'], index_los = 1) + lensModel = SinglePlane(["GAUSSIAN_KAPPA"]) + lensModel_los = SinglePlaneLOS(["GAUSSIAN_KAPPA", "LOS"], index_los=1) + lensModel_minimal = SinglePlaneLOS( + ["GAUSSIAN_KAPPA", "LOS_MINIMAL"], index_los=1 + ) output = lensModel.mass_2d(r=1, kwargs=[mass_kwargs]) output_los = lensModel_los.mass_2d(r=1, kwargs=[mass_kwargs, self.los_kwargs]) - output_minimal = lensModel_minimal.mass_2d(r=1, kwargs=[mass_kwargs, self.los_kwargs]) + output_minimal = lensModel_minimal.mass_2d( + r=1, kwargs=[mass_kwargs, self.los_kwargs] + ) npt.assert_almost_equal(output_los, output, decimal=8) npt.assert_almost_equal(output_minimal, output, decimal=8) def test_mass_3d(self): - mass_kwargs = {'amp': 1., 'sigma': 2., 'center_x': 0., 'center_y': 0.} + mass_kwargs = {"amp": 1.0, "sigma": 2.0, "center_x": 0.0, "center_y": 0.0} - lensModel = SinglePlane(['GAUSSIAN_KAPPA']) - lensModel_los = SinglePlaneLOS(['GAUSSIAN_KAPPA', 'LOS'], index_los = 1) - lensModel_minimal = SinglePlaneLOS(['GAUSSIAN_KAPPA', 'LOS_MINIMAL'], index_los = 1) + lensModel = SinglePlane(["GAUSSIAN_KAPPA"]) + lensModel_los = SinglePlaneLOS(["GAUSSIAN_KAPPA", "LOS"], index_los=1) + lensModel_minimal = SinglePlaneLOS( + ["GAUSSIAN_KAPPA", "LOS_MINIMAL"], index_los=1 + ) output = lensModel.mass_3d(r=1, kwargs=[mass_kwargs]) output_los = lensModel_los.mass_3d(r=1, kwargs=[mass_kwargs, self.los_kwargs]) - output_minimal = lensModel_minimal.mass_3d(r=1, kwargs=[mass_kwargs, self.los_kwargs]) + output_minimal = lensModel_minimal.mass_3d( + r=1, kwargs=[mass_kwargs, self.los_kwargs] + ) npt.assert_almost_equal(output_los, output, decimal=8) npt.assert_almost_equal(output_minimal, output, decimal=8) @@ -101,20 +148,38 @@ def test_density(self): density_model = sis.density_lens(r=r, theta_E=theta_E) # LOS - lensModel_los = SinglePlaneLOS(lens_model_list=['SIS', 'LOS'], index_los = 1) - density_los = lensModel_los.density(r=r, kwargs=[{'theta_E': theta_E}, self.los_kwargs]) + lensModel_los = SinglePlaneLOS(lens_model_list=["SIS", "LOS"], index_los=1) + density_los = lensModel_los.density( + r=r, kwargs=[{"theta_E": theta_E}, self.los_kwargs] + ) npt.assert_almost_equal(density_los, density_model, decimal=8) # LOS_MINIMAL - lensModel_minimal = SinglePlaneLOS(lens_model_list = ['SIS', 'LOS_MINIMAL'], index_los = 1) - density_minimal = lensModel_minimal.density(r=r, kwargs=[{'theta_E': theta_E}, self.los_kwargs]) + lensModel_minimal = SinglePlaneLOS( + lens_model_list=["SIS", "LOS_MINIMAL"], index_los=1 + ) + density_minimal = lensModel_minimal.density( + r=r, kwargs=[{"theta_E": theta_E}, self.los_kwargs] + ) npt.assert_almost_equal(density_minimal, density_model, decimal=8) def test_bool_list(self): - lensModel_los = SinglePlaneLOS(['SPEP', 'SHEAR', 'LOS'], index_los = 2) - lensModel_minimal = SinglePlaneLOS(['SPEP', 'SHEAR', 'LOS_MINIMAL'], index_los = 2) - kwargs = [{'theta_E': 1, 'gamma': 2, 'e1': 0.1, 'e2': -0.1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.01, 'gamma2': -0.02}, self.los_kwargs] + lensModel_los = SinglePlaneLOS(["SPEP", "SHEAR", "LOS"], index_los=2) + lensModel_minimal = SinglePlaneLOS( + ["SPEP", "SHEAR", "LOS_MINIMAL"], index_los=2 + ) + kwargs = [ + { + "theta_E": 1, + "gamma": 2, + "e1": 0.1, + "e2": -0.1, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.01, "gamma2": -0.02}, + self.los_kwargs, + ] # LOS alphax_1_los, alphay_1_los = lensModel_los.alpha(1, 1, kwargs, k=0) @@ -130,29 +195,40 @@ def test_bool_list(self): # LOS_MINIMAL alphax_1_minimal, alphay_1_minimal = lensModel_minimal.alpha(1, 1, kwargs, k=0) - alphax_1_list_minimal, alphay_1_list_minimal = lensModel_minimal.alpha(1, 1, kwargs, k=[0]) + alphax_1_list_minimal, alphay_1_list_minimal = lensModel_minimal.alpha( + 1, 1, kwargs, k=[0] + ) npt.assert_almost_equal(alphax_1_minimal, alphax_1_list_minimal, decimal=5) npt.assert_almost_equal(alphay_1_minimal, alphay_1_list_minimal, decimal=5) - alphax_1_1_minimal, alphay_1_1_minimal = lensModel_minimal.alpha(1, 1, kwargs, k=0) - alphax_1_2_minimal, alphay_1_2_minimal = lensModel_minimal.alpha(1, 1, kwargs, k=1) - alphax_full_minimal, alphay_full_minimal = lensModel_minimal.alpha(1, 1, kwargs, k=None) - npt.assert_almost_equal(alphax_1_1_minimal + alphax_1_2_minimal, alphax_full_minimal, decimal=5) - npt.assert_almost_equal(alphay_1_1_minimal + alphay_1_2_minimal, alphay_full_minimal, decimal=5) + alphax_1_1_minimal, alphay_1_1_minimal = lensModel_minimal.alpha( + 1, 1, kwargs, k=0 + ) + alphax_1_2_minimal, alphay_1_2_minimal = lensModel_minimal.alpha( + 1, 1, kwargs, k=1 + ) + alphax_full_minimal, alphay_full_minimal = lensModel_minimal.alpha( + 1, 1, kwargs, k=None + ) + npt.assert_almost_equal( + alphax_1_1_minimal + alphax_1_2_minimal, alphax_full_minimal, decimal=5 + ) + npt.assert_almost_equal( + alphay_1_1_minimal + alphay_1_2_minimal, alphay_full_minimal, decimal=5 + ) def test_los_versus_multiplane(self): - """ - this function asserts that the outcome from LOS and LOS MINIMAL is the same as MultiPlane - """ + """This function asserts that the outcome from LOS and LOS MINIMAL is the same + as MultiPlane.""" # set up the cosmology to convert between shears # the exact numbers don't matter because we are just doing a comparison - z_o = 0.0 # redshift of observer - z_d = 0.5 # redshift of main lens - z_s = 2.0 # redshift of source + z_o = 0.0 # redshift of observer + z_d = 0.5 # redshift of main lens + z_s = 2.0 # redshift of source - z_f = (z_o + z_d)/2 - z_b = (z_d + z_s)/2 + z_f = (z_o + z_d) / 2 + z_b = (z_d + z_s) / 2 gamma1_od = 0.05 gamma2_od = -0.01 @@ -165,11 +241,19 @@ def d(z1, z2): return cosmo.angular_diameter_distance_z1z2(z1, z2).to_value() # conversion of linear LOS shears to lenstronomy convention - gamma1_f = gamma1_od*((d(z_o, z_d)*d(z_f, z_s))/(d(z_o, z_s)*d(z_f, z_d))) - gamma2_f = gamma2_od*((d(z_o, z_d)*d(z_f, z_s))/(d(z_o, z_s)*d(z_f, z_d))) - - gamma1_b = gamma1_ds*((d(z_o, z_b)*d(z_d, z_s))/(d(z_o, z_s)*d(z_d, z_b))) - gamma2_b = gamma2_ds*((d(z_o, z_b)*d(z_d, z_s))/(d(z_o, z_s)*d(z_d, z_b))) + gamma1_f = gamma1_od * ( + (d(z_o, z_d) * d(z_f, z_s)) / (d(z_o, z_s) * d(z_f, z_d)) + ) + gamma2_f = gamma2_od * ( + (d(z_o, z_d) * d(z_f, z_s)) / (d(z_o, z_s) * d(z_f, z_d)) + ) + + gamma1_b = gamma1_ds * ( + (d(z_o, z_b) * d(z_d, z_s)) / (d(z_o, z_s) * d(z_d, z_b)) + ) + gamma2_b = gamma2_ds * ( + (d(z_o, z_b) * d(z_d, z_s)) / (d(z_o, z_s) * d(z_d, z_b)) + ) gamma1_d = gamma1_os - gamma1_f - gamma1_b gamma2_d = gamma2_os - gamma2_f - gamma2_b @@ -177,56 +261,81 @@ def d(z1, z2): # compute non-linear correction to os term Identity = np.identity(2) - Gamma_f = np.array([[gamma1_f, gamma2_f], - [gamma2_f, -gamma1_f]]) + Gamma_f = np.array([[gamma1_f, gamma2_f], [gamma2_f, -gamma1_f]]) - Gamma_d = np.array([[gamma1_d, gamma2_d], - [gamma2_d, -gamma1_d]]) + Gamma_d = np.array([[gamma1_d, gamma2_d], [gamma2_d, -gamma1_d]]) + Gamma_b = np.array([[gamma1_b, gamma2_b], [gamma2_b, -gamma1_b]]) - Gamma_b = np.array([[gamma1_b, gamma2_b], - [gamma2_b, -gamma1_b]]) + Gamma_od = np.array([[gamma1_od, gamma2_od], [gamma2_od, -gamma1_od]]) - Gamma_od = np.array([[gamma1_od, gamma2_od], - [gamma2_od, -gamma1_od]]) + Gamma_ofb = np.array(Gamma_f) * ( + (d(z_o, z_s) * d(z_f, z_b)) / (d(z_o, z_b) * d(z_f, z_s)) + ) - Gamma_ofb = np.array(Gamma_f)*((d(z_o, z_s)*d(z_f, z_b))/(d(z_o, z_b)*d(z_f, z_s))) + Gamma_odb = np.array(Gamma_d) * ( + (d(z_o, z_s) * d(z_d, z_b)) / (d(z_o, z_b) * d(z_d, z_s)) + ) - Gamma_odb = np.array(Gamma_d)*((d(z_o, z_s)*d(z_d, z_b))/(d(z_o, z_b)*d(z_d, z_s))) + Gamma_os = ( + Gamma_f + + Gamma_d + + Gamma_b + - np.matmul(Gamma_d, Gamma_od) + - np.matmul(Gamma_b, Gamma_ofb + np.matmul(Gamma_odb, Identity - Gamma_od)) + ) - Gamma_os = Gamma_f + Gamma_d + Gamma_b - np.matmul(Gamma_d, Gamma_od) - np.matmul(Gamma_b, Gamma_ofb + np.matmul(Gamma_odb, Identity - Gamma_od)) - - kappa_os = (Gamma_os[0, 0] + Gamma_os[1, 1])/2 - omega_os = (Gamma_os[1, 0] - Gamma_os[0, 1])/2 - gamma1_os = (Gamma_os[0, 0] - Gamma_os[1, 1])/2 - gamma2_os = (Gamma_os[0, 1] + Gamma_os[1, 0])/2 + kappa_os = (Gamma_os[0, 0] + Gamma_os[1, 1]) / 2 + omega_os = (Gamma_os[1, 0] - Gamma_os[0, 1]) / 2 + gamma1_os = (Gamma_os[0, 0] - Gamma_os[1, 1]) / 2 + gamma2_os = (Gamma_os[0, 1] + Gamma_os[1, 0]) / 2 # test three image positions - x, y = np.array([3,4,5]), np.array([2,1,0]) + x, y = np.array([3, 4, 5]), np.array([2, 1, 0]) - lens_model_list = ['EPL', 'SHEAR', 'SHEAR', 'SHEAR'] + lens_model_list = ["EPL", "SHEAR", "SHEAR", "SHEAR"] redshift_list = [z_d, z_f, z_d, z_b] - kwargs_los = {'kappa_os': kappa_os, 'omega_os': omega_os, 'gamma1_os': gamma1_os, 'gamma2_os': gamma2_os, - 'kappa_od': 0.0, 'omega_od': 0.0, 'gamma1_od': gamma1_od, 'gamma2_od': gamma2_od, - 'kappa_ds': 0.0, 'omega_ds': 0.0, 'gamma1_ds': gamma1_ds, 'gamma2_ds': gamma2_ds} - - kwargs_epl = {'theta_E': 0.8, 'gamma': 1.95, 'center_x': 0, 'center_y': 0, 'e1': 0.07, 'e2': -0.03} - - kwargs_gamma_f = {'gamma1': gamma1_f, 'gamma2': gamma2_f} - kwargs_gamma_d = {'gamma1': gamma1_d, 'gamma2': gamma2_d} - kwargs_gamma_b = {'gamma1': gamma1_b, 'gamma2': gamma2_b} + kwargs_los = { + "kappa_os": kappa_os, + "omega_os": omega_os, + "gamma1_os": gamma1_os, + "gamma2_os": gamma2_os, + "kappa_od": 0.0, + "omega_od": 0.0, + "gamma1_od": gamma1_od, + "gamma2_od": gamma2_od, + "kappa_ds": 0.0, + "omega_ds": 0.0, + "gamma1_ds": gamma1_ds, + "gamma2_ds": gamma2_ds, + } + + kwargs_epl = { + "theta_E": 0.8, + "gamma": 1.95, + "center_x": 0, + "center_y": 0, + "e1": 0.07, + "e2": -0.03, + } + + kwargs_gamma_f = {"gamma1": gamma1_f, "gamma2": gamma2_f} + kwargs_gamma_d = {"gamma1": gamma1_d, "gamma2": gamma2_d} + kwargs_gamma_b = {"gamma1": gamma1_b, "gamma2": gamma2_b} kwargs_singleplane_los = [kwargs_los, kwargs_epl] - lens_model_los = SinglePlaneLOS(['LOS', 'EPL'], index_los = 0) + lens_model_los = SinglePlaneLOS(["LOS", "EPL"], index_los=0) kwargs_multiplane = [kwargs_epl, kwargs_gamma_f, kwargs_gamma_d, kwargs_gamma_b] - lens_model_multiplane = MultiPlane(z_source = z_s, - lens_model_list = lens_model_list, - lens_redshift_list = redshift_list) + lens_model_multiplane = MultiPlane( + z_source=z_s, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) # set the tolerance # ray shooting passes at 1e-16 # hessian around 1e-6 @@ -237,76 +346,109 @@ def d(z1, z2): # since we pass an array of image positions # displacement angle - alpha_multiplane_x, alpha_multiplane_y = lens_model_multiplane.alpha(x, y, kwargs_multiplane) + alpha_multiplane_x, alpha_multiplane_y = lens_model_multiplane.alpha( + x, y, kwargs_multiplane + ) alpha_los_x, alpha_los_y = lens_model_los.alpha(x, y, kwargs_singleplane_los) npt.assert_allclose(alpha_multiplane_x, alpha_los_x, rtol=tolerance) npt.assert_allclose(alpha_multiplane_y, alpha_los_y, rtol=tolerance) # ray_shooting - beta_multiplane_x, beta_multiplane_y = lens_model_multiplane.ray_shooting(x, y, kwargs_multiplane) - beta_los_x, beta_los_y = lens_model_los.ray_shooting(x, y, kwargs_singleplane_los) - npt.assert_allclose(beta_multiplane_x, beta_los_x, rtol = tolerance) - npt.assert_allclose(beta_multiplane_y, beta_los_y, rtol = tolerance) + beta_multiplane_x, beta_multiplane_y = lens_model_multiplane.ray_shooting( + x, y, kwargs_multiplane + ) + beta_los_x, beta_los_y = lens_model_los.ray_shooting( + x, y, kwargs_singleplane_los + ) + npt.assert_allclose(beta_multiplane_x, beta_los_x, rtol=tolerance) + npt.assert_allclose(beta_multiplane_y, beta_los_y, rtol=tolerance) # hessian - hessian_multiplane_xx, hessian_multiplane_xy, hessian_multiplane_yx, hessian_multiplane_yy = lens_model_multiplane.hessian(x, y, kwargs_multiplane) - hessian_los_xx, hessian_los_xy, hessian_los_yx, hessian_los_yy = lens_model_los.hessian(x, y, kwargs_singleplane_los) - npt.assert_allclose(hessian_multiplane_xx, hessian_los_xx, rtol = tolerance) - npt.assert_allclose(hessian_multiplane_xy, hessian_los_xy, rtol = tolerance) - npt.assert_allclose(hessian_multiplane_yx, hessian_los_yx, rtol = tolerance) - npt.assert_allclose(hessian_multiplane_yy, hessian_los_yy, rtol = tolerance) + ( + hessian_multiplane_xx, + hessian_multiplane_xy, + hessian_multiplane_yx, + hessian_multiplane_yy, + ) = lens_model_multiplane.hessian(x, y, kwargs_multiplane) + ( + hessian_los_xx, + hessian_los_xy, + hessian_los_yx, + hessian_los_yy, + ) = lens_model_los.hessian(x, y, kwargs_singleplane_los) + npt.assert_allclose(hessian_multiplane_xx, hessian_los_xx, rtol=tolerance) + npt.assert_allclose(hessian_multiplane_xy, hessian_los_xy, rtol=tolerance) + npt.assert_allclose(hessian_multiplane_yx, hessian_los_yx, rtol=tolerance) + npt.assert_allclose(hessian_multiplane_yy, hessian_los_yy, rtol=tolerance) # time delays ra_source, dec_source = 0.05, 0.02 number_of_images = 4 - lens_model_multiplane_time = LensModel(lens_model_list, z_lens = z_d, z_source = z_s, - lens_redshift_list = redshift_list, - multi_plane = True) + lens_model_multiplane_time = LensModel( + lens_model_list, + z_lens=z_d, + z_source=z_s, + lens_redshift_list=redshift_list, + multi_plane=True, + ) multiplane_solver = LensEquationSolver(lens_model_multiplane_time) - x_image_mp, y_image_mp = multiplane_solver.findBrightImage(ra_source, dec_source, - kwargs_multiplane, numImages=number_of_images) + x_image_mp, y_image_mp = multiplane_solver.findBrightImage( + ra_source, dec_source, kwargs_multiplane, numImages=number_of_images + ) - t_days_mp = lens_model_multiplane_time.arrival_time(x_image_mp, y_image_mp, kwargs_multiplane) + t_days_mp = lens_model_multiplane_time.arrival_time( + x_image_mp, y_image_mp, kwargs_multiplane + ) dt_days_mp = t_days_mp[1:] - t_days_mp[0] - lens_model_los_time = LensModel(['LOS', 'EPL'], z_lens=z_d, z_source=z_s) + lens_model_los_time = LensModel(["LOS", "EPL"], z_lens=z_d, z_source=z_s) kwargs_time_los = [kwargs_los, kwargs_epl] los_solver = LensEquationSolver(lens_model_los_time) - x_image_los, y_image_los = los_solver.findBrightImage(ra_source, dec_source, - kwargs_time_los, numImages=number_of_images) + x_image_los, y_image_los = los_solver.findBrightImage( + ra_source, dec_source, kwargs_time_los, numImages=number_of_images + ) - t_days_los = lens_model_los_time.arrival_time(x_image_los, y_image_los, kwargs_time_los) - dt_days_los = t_days_los[1:] - t_days_los[0] + t_days_los = lens_model_los_time.arrival_time( + x_image_los, y_image_los, kwargs_time_los + ) + dt_days_los = t_days_los[1:] - t_days_los[0] npt.assert_allclose(dt_days_mp, dt_days_los, rtol=tolerance) def test_init(self): # need to do this for los minimal too? - lens_model_list = ['TNFW', 'TRIPLE_CHAMELEON', 'LOS', 'SHEAR_GAMMA_PSI', 'CURVED_ARC_CONST', - 'NFW_MC', 'ARC_PERT', 'MULTIPOLE', 'CURVED_ARC_SPP'] + lens_model_list = [ + "TNFW", + "TRIPLE_CHAMELEON", + "LOS", + "SHEAR_GAMMA_PSI", + "CURVED_ARC_CONST", + "NFW_MC", + "ARC_PERT", + "MULTIPOLE", + "CURVED_ARC_SPP", + ] lensModel = SinglePlaneLOS(lens_model_list=lens_model_list, index_los=0) - assert lensModel.func_list[0].param_names[0] == 'Rs' + assert lensModel.func_list[0].param_names[0] == "Rs" class TestRaise(unittest.TestCase): - def test_raise(self): - """ - check whether raises occurs if fastell4py is not installed + """Check whether raises occurs if fastell4py is not installed. :return: """ if bool_test is False: with self.assertRaises(ImportError): - SinglePlaneLOS(lens_model_list=['PEMD', 'LOS'], index_los = 1) + SinglePlaneLOS(lens_model_list=["PEMD", "LOS"], index_los=1) with self.assertRaises(ImportError): - SinglePlaneLOS(lens_model_list=['SPEMD', 'LOS'], index_los = 1) + SinglePlaneLOS(lens_model_list=["SPEMD", "LOS"], index_los=1) else: - SinglePlaneLOS(lens_model_list=['PEMD', 'SPEMD', 'LOS'], index_los = 2) + SinglePlaneLOS(lens_model_list=["PEMD", "SPEMD", "LOS"], index_los=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_MultiPlane/test_multi_plane.py b/test/test_LensModel/test_MultiPlane/test_multi_plane.py index a176c300c..5fbae812c 100644 --- a/test/test_LensModel/test_MultiPlane/test_multi_plane.py +++ b/test/test_LensModel/test_MultiPlane/test_multi_plane.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -7,64 +7,82 @@ from lenstronomy.LensModel.MultiPlane.multi_plane import MultiPlane from lenstronomy.LensModel.MultiPlane.multi_plane_base import MultiPlaneBase from lenstronomy.LensModel.lens_model import LensModel -from lenstronomy.LensModel.MultiPlane.multi_plane import LensedLocation, PhysicalLocation +from lenstronomy.LensModel.MultiPlane.multi_plane import ( + LensedLocation, + PhysicalLocation, +) import lenstronomy.Util.constants as const class TestMultiPlane(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): pass def test_update_source_redshift(self): z_source = 1.5 - lens_model_list = ['SIS'] - kwargs_lens = [{'theta_E': 1}] + lens_model_list = ["SIS"] + kwargs_lens = [{"theta_E": 1}] redshift_list = [0.5] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, - lens_redshift_list=redshift_list, z_interp_stop=3, cosmo_interp=True) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + z_interp_stop=3, + cosmo_interp=True, + ) alpha_x, alpha_y = lensModelMutli.alpha(1, 0, kwargs_lens=kwargs_lens) lensModelMutli.update_source_redshift(z_source=z_source) alpha_x_new, alpha_y_new = lensModelMutli.alpha(1, 0, kwargs_lens=kwargs_lens) - npt.assert_almost_equal(alpha_x/alpha_x_new, 1., decimal=8) + npt.assert_almost_equal(alpha_x / alpha_x_new, 1.0, decimal=8) - lensModelMutli.update_source_redshift(z_source=1.) + lensModelMutli.update_source_redshift(z_source=1.0) alpha_x_new, alpha_y_new = lensModelMutli.alpha(1, 0, kwargs_lens=kwargs_lens) assert alpha_x / alpha_x_new > 1 - lensModelMutli.update_source_redshift(z_source=2.) + lensModelMutli.update_source_redshift(z_source=2.0) alpha_x_new, alpha_y_new = lensModelMutli.alpha(1, 0, kwargs_lens=kwargs_lens) assert alpha_x / alpha_x_new < 1 def test_sis_alpha(self): z_source = 1.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [0.5] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] alpha_x_simple, alpha_y_simple = lensModel.alpha(1, 0, kwargs_lens) alpha_x_multi, alpha_y_multi = lensModelMutli.alpha(1, 0, kwargs_lens) npt.assert_almost_equal(alpha_x_simple, alpha_x_multi, decimal=8) npt.assert_almost_equal(alpha_y_simple, alpha_y_multi, decimal=8) - sum_partial = np.sum(lensModelMutli._multi_plane_base._T_ij_list) + lensModelMutli._T_ij_stop + sum_partial = ( + np.sum(lensModelMutli._multi_plane_base._T_ij_list) + + lensModelMutli._T_ij_stop + ) T_z_true = lensModelMutli._T_z_source npt.assert_almost_equal(sum_partial, T_z_true, decimal=5) def test_sis_ray_tracing(self): z_source = 1.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [0.5] from astropy.cosmology import FlatLambdaCDM, LambdaCDM # test flat LCDM cosmo = FlatLambdaCDM(H0=70, Om0=0.3) - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, - lens_redshift_list=redshift_list, cosmo=cosmo) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + cosmo=cosmo, + ) lensModel = LensModel(lens_model_list=lens_model_list, cosmo=cosmo) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] beta_x_simple, beta_y_simple = lensModel.ray_shooting(1, 0, kwargs_lens) beta_x_multi, beta_y_multi = lensModelMutli.ray_shooting(1, 0, kwargs_lens) npt.assert_almost_equal(beta_x_simple, beta_x_multi, decimal=10) @@ -74,13 +92,21 @@ def test_sis_ray_tracing(self): def test_sis_hessian(self): z_source = 1.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [0.5] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - f_xx_simple, f_xy_simple, f_yx_simple, f_yy_simple = lensModel.hessian(1, 0, kwargs_lens) - f_xx_multi, f_xy_multi, f_yx_multi, f_yy_multi = lensModelMutli.hessian(1, 0, kwargs_lens, diff=0.000001) + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + f_xx_simple, f_xy_simple, f_yx_simple, f_yy_simple = lensModel.hessian( + 1, 0, kwargs_lens + ) + f_xx_multi, f_xy_multi, f_yx_multi, f_yy_multi = lensModelMutli.hessian( + 1, 0, kwargs_lens, diff=0.000001 + ) npt.assert_almost_equal(f_xx_simple, f_xx_multi, decimal=5) npt.assert_almost_equal(f_xy_simple, f_xy_multi, decimal=5) npt.assert_almost_equal(f_yx_simple, f_yx_multi, decimal=5) @@ -90,9 +116,15 @@ def test_empty(self): z_source = 1.5 lens_model_list = [] redshift_list = [] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) kwargs_lens = [] - f_xx_multi, f_xy_multi, f_yx_multi, f_yy_multi = lensModelMutli.hessian(1, 0, kwargs_lens, diff=0.000001) + f_xx_multi, f_xy_multi, f_yx_multi, f_yy_multi = lensModelMutli.hessian( + 1, 0, kwargs_lens, diff=0.000001 + ) npt.assert_almost_equal(0, f_xx_multi, decimal=5) npt.assert_almost_equal(0, f_xy_multi, decimal=5) npt.assert_almost_equal(0, f_yx_multi, decimal=5) @@ -100,11 +132,16 @@ def test_empty(self): def test_sis_kappa_gamma_mag(self): z_source = 1.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [0.5] - lensModelMutli = LensModel(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list, multi_plane=True) + lensModelMutli = LensModel( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + multi_plane=True, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] kappa_simple = lensModel.kappa(1, 0, kwargs_lens) kappa_multi = lensModelMutli.kappa(1, 0, kwargs_lens) npt.assert_almost_equal(kappa_simple, kappa_multi, decimal=5) @@ -121,14 +158,20 @@ def test_sis_kappa_gamma_mag(self): def test_sis_travel_time(self): z_source = 1.5 z_lens = 0.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [z_lens] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - dt = lensModelMutli.arrival_time(1., 0., kwargs_lens) - Dt = lensModelMutli._multi_plane_base._cosmo_bkg.ddt(z_lens=z_lens, z_source=z_source) - fermat_pot = lensModel.fermat_potential(1, 0., kwargs_lens) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + dt = lensModelMutli.arrival_time(1.0, 0.0, kwargs_lens) + Dt = lensModelMutli._multi_plane_base._cosmo_bkg.ddt( + z_lens=z_lens, z_source=z_source + ) + fermat_pot = lensModel.fermat_potential(1, 0.0, kwargs_lens) dt_simple = const.delay_arcsec2days(fermat_pot, Dt) print(dt, dt_simple) npt.assert_almost_equal(dt, dt_simple, decimal=8) @@ -136,14 +179,23 @@ def test_sis_travel_time(self): def test_sis_travel_time_new(self): z_source = 1.5 z_lens = 0.5 - lens_model_list = ['SIS', 'SIS'] + lens_model_list = ["SIS", "SIS"] redshift_list = [z_lens, 0.2] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}, {'theta_E': 0., 'center_x': 0, 'center_y': 0}] - dt = lensModelMutli.arrival_time(1., 0., kwargs_lens) - Dt = lensModelMutli._multi_plane_base._cosmo_bkg.ddt(z_lens=z_lens, z_source=z_source) - fermat_pot = lensModel.fermat_potential(1, 0., kwargs_lens) + kwargs_lens = [ + {"theta_E": 1.0, "center_x": 0, "center_y": 0}, + {"theta_E": 0.0, "center_x": 0, "center_y": 0}, + ] + dt = lensModelMutli.arrival_time(1.0, 0.0, kwargs_lens) + Dt = lensModelMutli._multi_plane_base._cosmo_bkg.ddt( + z_lens=z_lens, z_source=z_source + ) + fermat_pot = lensModel.fermat_potential(1, 0.0, kwargs_lens) dt_simple = const.delay_arcsec2days(fermat_pot, Dt) print(dt, dt_simple) npt.assert_almost_equal(dt, dt_simple, decimal=8) @@ -151,16 +203,20 @@ def test_sis_travel_time_new(self): def test_sis_ray_shooting(self): z_source = 1.5 z_lens = 0.5 - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] redshift_list = [z_lens] - lensModelMutli = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMutli = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModel = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - beta_x, beta_y = lensModelMutli.ray_shooting(1., 0., kwargs_lens) - beta_x_single, beta_y_single = lensModel.ray_shooting(1, 0., kwargs_lens) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + beta_x, beta_y = lensModelMutli.ray_shooting(1.0, 0.0, kwargs_lens) + beta_x_single, beta_y_single = lensModel.ray_shooting(1, 0.0, kwargs_lens) npt.assert_almost_equal(beta_x, beta_x_single, decimal=8) npt.assert_almost_equal(beta_y, beta_y_single, decimal=8) - x, y = np.array([1.]), np.array([2.]) + x, y = np.array([1.0]), np.array([2.0]) beta_x, beta_y = lensModelMutli.ray_shooting(x, y, kwargs_lens) beta_x_single, beta_y_single = lensModel.ray_shooting(x, y, kwargs_lens) npt.assert_almost_equal(beta_x, beta_x_single, decimal=8) @@ -168,36 +224,43 @@ def test_sis_ray_shooting(self): def test_random_ordering(self): z_source = 1.5 - lens_model_list = ['SIS', 'SIS', 'SIS'] - sis1 = {'theta_E': 1., 'center_x': 0, 'center_y': 0} - sis2 = {'theta_E': .2, 'center_x': 0.5, 'center_y': 0} - sis3 = {'theta_E': .1, 'center_x': 0, 'center_y': 0.5} + lens_model_list = ["SIS", "SIS", "SIS"] + sis1 = {"theta_E": 1.0, "center_x": 0, "center_y": 0} + sis2 = {"theta_E": 0.2, "center_x": 0.5, "center_y": 0} + sis3 = {"theta_E": 0.1, "center_x": 0, "center_y": 0.5} z1 = 0.1 z2 = 0.5 z3 = 0.7 redshift_list = [z1, z2, z3] kwargs_lens = [sis1, sis2, sis3] - lensModel = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) - beta_x_1, beta_y_1 = lensModel.ray_shooting(1., 0., kwargs_lens) + lensModel = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) + beta_x_1, beta_y_1 = lensModel.ray_shooting(1.0, 0.0, kwargs_lens) redshift_list = [z3, z2, z1] kwargs_lens = [sis3, sis2, sis1] - lensModel = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) - beta_x_2, beta_y_2 = lensModel.ray_shooting(1., 0., kwargs_lens) + lensModel = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) + beta_x_2, beta_y_2 = lensModel.ray_shooting(1.0, 0.0, kwargs_lens) npt.assert_almost_equal(beta_x_1, beta_x_2, decimal=8) npt.assert_almost_equal(beta_y_1, beta_y_2, decimal=8) def test_ray_shooting_partial_2(self): - z_source = 1.5 - lens_model_list = ['SIS', 'SIS', 'SIS', 'SIS'] - sis1 = {'theta_E': 0.4, 'center_x': 0, 'center_y': 0} - sis2 = {'theta_E': .2, 'center_x': 0.5, 'center_y': 0} - sis3 = {'theta_E': .1, 'center_x': 0, 'center_y': 0.5} - sis4 = {'theta_E': 0.5, 'center_x': 0.1, 'center_y': 0.3} + lens_model_list = ["SIS", "SIS", "SIS", "SIS"] + sis1 = {"theta_E": 0.4, "center_x": 0, "center_y": 0} + sis2 = {"theta_E": 0.2, "center_x": 0.5, "center_y": 0} + sis3 = {"theta_E": 0.1, "center_x": 0, "center_y": 0.5} + sis4 = {"theta_E": 0.5, "center_x": 0.1, "center_y": 0.3} - lens_model_list_macro = ['SIS'] - kwargs_macro = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + lens_model_list_macro = ["SIS"] + kwargs_macro = [{"theta_E": 1, "center_x": 0, "center_y": 0}] zmacro = 0.5 @@ -208,104 +271,193 @@ def test_ray_shooting_partial_2(self): redshift_list = [z1, z2, z3, z4] kwargs_lens = [sis1, sis2, sis3, sis4] kwargs_lens_full = kwargs_macro + kwargs_lens - lensModel_full = MultiPlane(z_source=z_source, lens_model_list=lens_model_list_macro + lens_model_list, - lens_redshift_list=[zmacro]+redshift_list) - lensModel_macro = MultiPlane(z_source=z_source, lens_model_list=lens_model_list_macro, lens_redshift_list=[zmacro]) - lensModel = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) - - theta_x, theta_y = 1., 1. - - x_subs, y_subs, alpha_x_subs, alpha_y_subs = lensModel.ray_shooting_partial(x=0, y=0, alpha_x=theta_x, - alpha_y=theta_y, z_start=0, - z_stop=zmacro, - kwargs_lens=kwargs_lens) - - x_out, y_out, alpha_x_out, alpha_y_out = lensModel_macro.ray_shooting_partial(x_subs, y_subs, alpha_x_subs, alpha_y_subs, - zmacro, zmacro, kwargs_macro, - include_z_start=True) + lensModel_full = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list_macro + lens_model_list, + lens_redshift_list=[zmacro] + redshift_list, + ) + lensModel_macro = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list_macro, + lens_redshift_list=[zmacro], + ) + lensModel = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) + + theta_x, theta_y = 1.0, 1.0 + + x_subs, y_subs, alpha_x_subs, alpha_y_subs = lensModel.ray_shooting_partial( + x=0, + y=0, + alpha_x=theta_x, + alpha_y=theta_y, + z_start=0, + z_stop=zmacro, + kwargs_lens=kwargs_lens, + ) + + x_out, y_out, alpha_x_out, alpha_y_out = lensModel_macro.ray_shooting_partial( + x_subs, + y_subs, + alpha_x_subs, + alpha_y_subs, + zmacro, + zmacro, + kwargs_macro, + include_z_start=True, + ) npt.assert_almost_equal(x_subs, x_out) npt.assert_almost_equal(y_subs, y_out) - x_full, y_full, alpha_x_full, alpha_y_full = lensModel_full.ray_shooting_partial(0, 0, theta_x, theta_y, 0, zmacro, - kwargs_lens_full) + ( + x_full, + y_full, + alpha_x_full, + alpha_y_full, + ) = lensModel_full.ray_shooting_partial( + 0, 0, theta_x, theta_y, 0, zmacro, kwargs_lens_full + ) npt.assert_almost_equal(x_full, x_out) npt.assert_almost_equal(y_full, y_out) npt.assert_almost_equal(alpha_x_full, alpha_x_out) npt.assert_almost_equal(alpha_y_full, alpha_y_out) - x_src, y_src, _, _ = lensModel_full.ray_shooting_partial(x=x_out, y=y_out, alpha_x=alpha_x_out, - alpha_y=alpha_y_out, - z_start=zmacro, - z_stop=z_source, - kwargs_lens=kwargs_lens_full) - + x_src, y_src, _, _ = lensModel_full.ray_shooting_partial( + x=x_out, + y=y_out, + alpha_x=alpha_x_out, + alpha_y=alpha_y_out, + z_start=zmacro, + z_stop=z_source, + kwargs_lens=kwargs_lens_full, + ) beta_x, beta_y = lensModel.co_moving2angle_source(x_src, y_src) - beta_x_true, beta_y_true = lensModel_full.ray_shooting(theta_x, theta_y, kwargs_lens_full) + beta_x_true, beta_y_true = lensModel_full.ray_shooting( + theta_x, theta_y, kwargs_lens_full + ) npt.assert_almost_equal(beta_x, beta_x_true, decimal=8) npt.assert_almost_equal(beta_y, beta_y_true, decimal=8) def test_ray_shooting_partial(self): z_source = 1.5 - lens_model_list = ['SIS', 'SIS', 'SIS'] - sis1 = {'theta_E': 1., 'center_x': 0, 'center_y': 0} - sis2 = {'theta_E': .2, 'center_x': 0.5, 'center_y': 0} - sis3 = {'theta_E': .1, 'center_x': 0, 'center_y': 0.5} + lens_model_list = ["SIS", "SIS", "SIS"] + sis1 = {"theta_E": 1.0, "center_x": 0, "center_y": 0} + sis2 = {"theta_E": 0.2, "center_x": 0.5, "center_y": 0} + sis3 = {"theta_E": 0.1, "center_x": 0, "center_y": 0.5} z1 = 0.1 z2 = 0.5 z3 = 0.7 redshift_list = [z1, z2, z3] kwargs_lens = [sis1, sis2, sis3] - lensModel = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) - lensModel_2 = LensModel(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list, multi_plane=True) + lensModel = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) + lensModel_2 = LensModel( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + multi_plane=True, + ) multiplane_2 = lensModel_2.lens_model intermediate_index = 1 - theta_x, theta_y = 1., 1. + theta_x, theta_y = 1.0, 1.0 Tzsrc = lensModel._multi_plane_base._cosmo_bkg.T_xy(0, z_source) - z_intermediate = lensModel._multi_plane_base._lens_redshift_list[intermediate_index] + z_intermediate = lensModel._multi_plane_base._lens_redshift_list[ + intermediate_index + ] for lensmodel_class in [lensModel, multiplane_2]: - x_out, y_out, alpha_x_out, alpha_y_out = lensmodel_class.ray_shooting_partial(x=0, y=0, alpha_x=theta_x, - alpha_y=theta_y, z_start=0, z_stop=z_intermediate, kwargs_lens=kwargs_lens) - + ( + x_out, + y_out, + alpha_x_out, + alpha_y_out, + ) = lensmodel_class.ray_shooting_partial( + x=0, + y=0, + alpha_x=theta_x, + alpha_y=theta_y, + z_start=0, + z_stop=z_intermediate, + kwargs_lens=kwargs_lens, + ) x_out_full_0 = x_out y_out_full_0 = y_out - x_out, y_out, alpha_x_out, alpha_y_out = lensmodel_class.ray_shooting_partial(x=x_out, y=y_out, alpha_x=alpha_x_out, - alpha_y=alpha_y_out, z_start=z_intermediate, - z_stop=z_source, - kwargs_lens=kwargs_lens) + ( + x_out, + y_out, + alpha_x_out, + alpha_y_out, + ) = lensmodel_class.ray_shooting_partial( + x=x_out, + y=y_out, + alpha_x=alpha_x_out, + alpha_y=alpha_y_out, + z_start=z_intermediate, + z_stop=z_source, + kwargs_lens=kwargs_lens, + ) beta_x, beta_y = lensModel.co_moving2angle_source(x_out, y_out) - beta_x_true, beta_y_true = lensmodel_class.ray_shooting(theta_x, theta_y, kwargs_lens) + beta_x_true, beta_y_true = lensmodel_class.ray_shooting( + theta_x, theta_y, kwargs_lens + ) npt.assert_almost_equal(beta_x, beta_x_true, decimal=8) npt.assert_almost_equal(beta_y, beta_y_true, decimal=8) - T_ij_start = lensModel._multi_plane_base._cosmo_bkg.T_xy(z_observer=0, z_source=0.1) - T_ij_end = lensModel._multi_plane_base._cosmo_bkg.T_xy(z_observer=0.7, z_source=1.5) - x_out, y_out, alpha_x_out, alpha_y_out = lensmodel_class.ray_shooting_partial(x=0, y=0, alpha_x=theta_x, - alpha_y=theta_y, z_start=0, z_stop=z_source, kwargs_lens=kwargs_lens, - T_ij_start=T_ij_start, T_ij_end=T_ij_end) - - beta_x, beta_y = x_out/Tzsrc, y_out/Tzsrc + T_ij_start = lensModel._multi_plane_base._cosmo_bkg.T_xy( + z_observer=0, z_source=0.1 + ) + T_ij_end = lensModel._multi_plane_base._cosmo_bkg.T_xy( + z_observer=0.7, z_source=1.5 + ) + ( + x_out, + y_out, + alpha_x_out, + alpha_y_out, + ) = lensmodel_class.ray_shooting_partial( + x=0, + y=0, + alpha_x=theta_x, + alpha_y=theta_y, + z_start=0, + z_stop=z_source, + kwargs_lens=kwargs_lens, + T_ij_start=T_ij_start, + T_ij_end=T_ij_end, + ) + + beta_x, beta_y = x_out / Tzsrc, y_out / Tzsrc npt.assert_almost_equal(beta_x, beta_x_true, decimal=8) npt.assert_almost_equal(beta_y, beta_y_true, decimal=8) def test_pseudo_multiplane(self): z_source = 1.5 - lens_model_list = ['SIS', 'SIS'] - sis1 = {'theta_E': 1., 'center_x': 0, 'center_y': 0} - sis2 = {'theta_E': .2, 'center_x': 0.5, 'center_y': 0} + lens_model_list = ["SIS", "SIS"] + sis1 = {"theta_E": 1.0, "center_x": 0, "center_y": 0} + sis2 = {"theta_E": 0.2, "center_x": 0.5, "center_y": 0} z1 = 0.5 z2 = 0.5 redshift_list = [z1, z2] kwargs_lens = [sis1, sis2] - lensModelMulti = MultiPlane(z_source=z_source, lens_model_list=lens_model_list, lens_redshift_list=redshift_list) + lensModelMulti = MultiPlane( + z_source=z_source, + lens_model_list=lens_model_list, + lens_redshift_list=redshift_list, + ) lensModelSingle = LensModel(lens_model_list=lens_model_list) beta_x, beta_y = lensModelMulti.ray_shooting(1, 1, kwargs_lens) @@ -314,69 +466,98 @@ def test_pseudo_multiplane(self): npt.assert_almost_equal(beta_y, beta_y_single, decimal=10) def test_position_convention(self): - - lens_model_list = ['SIS', 'SIS','SIS', 'SIS'] + lens_model_list = ["SIS", "SIS", "SIS", "SIS"] redshift_list = [0.5, 0.5, 0.9, 0.6] - kwargs_lens = [{'theta_E': 1, 'center_x':0, 'center_y': 0}, - {'theta_E': 0.4, 'center_x': 0, 'center_y': 0.2}, - {'theta_E': 1, 'center_x': 1.8, 'center_y': -0.4}, - {'theta_E': 0.41, 'center_x': 1., 'center_y': 0.7}] + kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"theta_E": 0.4, "center_x": 0, "center_y": 0.2}, + {"theta_E": 1, "center_x": 1.8, "center_y": -0.4}, + {"theta_E": 0.41, "center_x": 1.0, "center_y": 0.7}, + ] - index_list = [[2,3], [3,2]] + index_list = [[2, 3], [3, 2]] # compute the physical position given lensed position, and check that lensing computations # using the two different conventions and sets of kwargs agree for index in index_list: - - lensModel_observed = LensModel(lens_model_list=lens_model_list, multi_plane=True, - observed_convention_index=index, z_source=1.5, - lens_redshift_list=redshift_list) - lensModel_physical = LensModel(lens_model_list=lens_model_list, multi_plane=True, - z_source=1.5, lens_redshift_list=redshift_list) + lensModel_observed = LensModel( + lens_model_list=lens_model_list, + multi_plane=True, + observed_convention_index=index, + z_source=1.5, + lens_redshift_list=redshift_list, + ) + lensModel_physical = LensModel( + lens_model_list=lens_model_list, + multi_plane=True, + z_source=1.5, + lens_redshift_list=redshift_list, + ) multi = lensModel_observed.lens_model._multi_plane_base lensed, phys = LensedLocation(multi, index), PhysicalLocation() - kwargs_lens_physical = lensModel_observed.lens_model._convention(kwargs_lens) + kwargs_lens_physical = lensModel_observed.lens_model._convention( + kwargs_lens + ) kwargs_phys, kwargs_lensed = phys(kwargs_lens), lensed(kwargs_lens) for j, lensed_kwargs in enumerate(kwargs_lensed): - for ki in lensed_kwargs.keys(): assert lensed_kwargs[ki] == kwargs_lens_physical[j][ki] assert kwargs_phys[j][ki] == kwargs_lens[j][ki] fxx, fyy, fxy, fyx = lensModel_observed.hessian(0.5, 0.5, kwargs_lens) - fxx2, fyy2, fxy2, fyx2 = lensModel_physical.hessian(0.5, 0.5, kwargs_lens_physical) + fxx2, fyy2, fxy2, fyx2 = lensModel_physical.hessian( + 0.5, 0.5, kwargs_lens_physical + ) npt.assert_almost_equal(fxx, fxx2) npt.assert_almost_equal(fxy, fxy2) betax1, betay1 = lensModel_observed.ray_shooting(0.5, 0.5, kwargs_lens) - betax2, betay2 = lensModel_physical.ray_shooting(0.5, 0.5, kwargs_lens_physical) + betax2, betay2 = lensModel_physical.ray_shooting( + 0.5, 0.5, kwargs_lens_physical + ) npt.assert_almost_equal(betax1, betax2) npt.assert_almost_equal(betay1, betay2) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - MultiPlaneBase(z_source_convention=1, lens_model_list=['SIS'], lens_redshift_list=[2]) + MultiPlaneBase( + z_source_convention=1, lens_model_list=["SIS"], lens_redshift_list=[2] + ) with self.assertRaises(ValueError): - MultiPlaneBase(z_source_convention=1, lens_model_list=['SIS', 'SIS'], lens_redshift_list=[0.5]) + MultiPlaneBase( + z_source_convention=1, + lens_model_list=["SIS", "SIS"], + lens_redshift_list=[0.5], + ) with self.assertRaises(ValueError): - lens = MultiPlane(z_source_convention=1, z_source=1, lens_model_list=['SIS', 'SIS'], lens_redshift_list=[0.5, 0.8]) + lens = MultiPlane( + z_source_convention=1, + z_source=1, + lens_model_list=["SIS", "SIS"], + lens_redshift_list=[0.5, 0.8], + ) lens._check_raise(k=[1]) with self.assertRaises(ValueError): - lens_model = LensModel(lens_model_list=['SIS'], multi_plane=True, z_source=1, z_source_convention=1, - cosmo_interp=True, z_interp_stop=0.5) + lens_model = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + z_source=1, + z_source_convention=1, + cosmo_interp=True, + z_interp_stop=0.5, + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_Optimizer/test_fast_rayshooting.py b/test/test_LensModel/test_Optimizer/test_fast_rayshooting.py index b0e4a0999..9f6e0e391 100644 --- a/test/test_LensModel/test_Optimizer/test_fast_rayshooting.py +++ b/test/test_LensModel/test_Optimizer/test_fast_rayshooting.py @@ -8,47 +8,85 @@ class TestFastRayShooting(object): - def setup_method(self): - self.zlens, self.zsource = 0.5, 1.5 - epl_kwargs = {'theta_E': 0.8, 'center_x': 0.1, 'center_y': 0., 'e1': -0.2, 'e2': 0.1, 'gamma': 2.05} - shear_kwargs = {'gamma1': 0.09, 'gamma2': -0.02} + epl_kwargs = { + "theta_E": 0.8, + "center_x": 0.1, + "center_y": 0.0, + "e1": -0.2, + "e2": 0.1, + "gamma": 2.05, + } + shear_kwargs = {"gamma1": 0.09, "gamma2": -0.02} kwargs_macro = [epl_kwargs, shear_kwargs] self.x_image = np.array([0.65043538, -0.31109505, 0.78906059, -0.86222271]) self.y_image = np.array([-0.89067493, 0.94851787, 0.52882605, -0.25403778]) - halo_list = ['SIS', 'SIS', 'SIS'] + halo_list = ["SIS", "SIS", "SIS"] halo_z = [self.zlens - 0.1, self.zlens, self.zlens + 0.4] - halo_kwargs = [{'theta_E': 0.05, 'center_x': 0.3, 'center_y': -0.9}, - {'theta_E': 0.01, 'center_x': 1.3, 'center_y': -0.5}, - {'theta_E': 0.02, 'center_x': -0.4, 'center_y': -0.4}] + halo_kwargs = [ + {"theta_E": 0.05, "center_x": 0.3, "center_y": -0.9}, + {"theta_E": 0.01, "center_x": 1.3, "center_y": -0.5}, + {"theta_E": 0.02, "center_x": -0.4, "center_y": -0.4}, + ] self.kwargs_epl = kwargs_macro + halo_kwargs self.zlist_epl = [self.zlens, self.zlens] + halo_z - self.lens_model_list_epl = ['EPL', 'SHEAR'] + halo_list + self.lens_model_list_epl = ["EPL", "SHEAR"] + halo_list - self.lensModel = LensModel(self.lens_model_list_epl, self.zlens, self.zsource, self.zlist_epl, - multi_plane=True) + self.lensModel = LensModel( + self.lens_model_list_epl, + self.zlens, + self.zsource, + self.zlist_epl, + multi_plane=True, + ) self.param_class = PowerLawFreeShear(self.kwargs_epl) def test_rayshooting(self): - solver = LensEquationSolver(self.lensModel) source_x, source_y = -0.05, -0.02 - x_image_true, y_image_true = solver.findBrightImage(source_x, source_y, self.kwargs_epl) - - fast_rayshooting = MultiplaneFast(x_image_true, y_image_true, self.zlens, self.zsource, - self.lens_model_list_epl, self.zlist_epl, - astropy_instance=None, param_class=self.param_class, foreground_rays=None, - tol_source=1e-5, numerical_alpha_class=None) - - x_fore, y_fore, alpha_x_fore, alpha_y_fore = fast_rayshooting._ray_shooting_fast_foreground() - xtrue, ytrue, alpha_xtrue, alpha_ytrue = self.lensModel.lens_model.\ - ray_shooting_partial(np.zeros_like(x_image_true), np.zeros_like(y_image_true), x_image_true, y_image_true, 0., - self.zlens, self.kwargs_epl) + x_image_true, y_image_true = solver.findBrightImage( + source_x, source_y, self.kwargs_epl + ) + + fast_rayshooting = MultiplaneFast( + x_image_true, + y_image_true, + self.zlens, + self.zsource, + self.lens_model_list_epl, + self.zlist_epl, + astropy_instance=None, + param_class=self.param_class, + foreground_rays=None, + tol_source=1e-5, + numerical_alpha_class=None, + ) + + ( + x_fore, + y_fore, + alpha_x_fore, + alpha_y_fore, + ) = fast_rayshooting._ray_shooting_fast_foreground() + ( + xtrue, + ytrue, + alpha_xtrue, + alpha_ytrue, + ) = self.lensModel.lens_model.ray_shooting_partial( + np.zeros_like(x_image_true), + np.zeros_like(y_image_true), + x_image_true, + y_image_true, + 0.0, + self.zlens, + self.kwargs_epl, + ) npt.assert_almost_equal(x_fore, xtrue) npt.assert_almost_equal(y_fore, ytrue) @@ -61,16 +99,27 @@ def test_rayshooting(self): npt.assert_almost_equal(xfast, x) npt.assert_almost_equal(yfast, y) - x_inner, y_inner = fast_rayshooting.lensModel.ray_shooting(x_image_true, y_image_true, self.kwargs_epl) + x_inner, y_inner = fast_rayshooting.lensModel.ray_shooting( + x_image_true, y_image_true, self.kwargs_epl + ) npt.assert_almost_equal(x_inner, xfast) npt.assert_almost_equal(y_inner, yfast) foreground_rays = fast_rayshooting._foreground_rays - fast_rayshooting_new = MultiplaneFast(x_image_true, y_image_true, self.zlens, self.zsource, - self.lens_model_list_epl, self.zlist_epl, - astropy_instance=None, param_class=self.param_class, foreground_rays=foreground_rays, - tol_source=1e-5, numerical_alpha_class=None) + fast_rayshooting_new = MultiplaneFast( + x_image_true, + y_image_true, + self.zlens, + self.zsource, + self.lens_model_list_epl, + self.zlist_epl, + astropy_instance=None, + param_class=self.param_class, + foreground_rays=foreground_rays, + tol_source=1e-5, + numerical_alpha_class=None, + ) xfast, yfast = fast_rayshooting_new.ray_shooting_fast(args_lens) @@ -85,5 +134,6 @@ def test_rayshooting(self): npt.assert_almost_equal(logL, logL_true) npt.assert_almost_equal(chi_square_total, chi_square_source) -if __name__ == '__main__': - pytest.main() \ No newline at end of file + +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Optimizer/test_optimizer.py b/test/test_LensModel/test_Optimizer/test_optimizer.py index 2a57f78c8..c29235b61 100644 --- a/test/test_LensModel/test_Optimizer/test_optimizer.py +++ b/test/test_LensModel/test_Optimizer/test_optimizer.py @@ -2,182 +2,324 @@ import numpy as np from lenstronomy.Util.param_util import ellipticity2phi_q from lenstronomy.LensModel.lens_model import LensModel -from lenstronomy.LensModel.QuadOptimizer.param_manager import PowerLawFixedShear, \ - PowerLawFixedShearMultipole, PowerLawFreeShear, PowerLawFreeShearMultipole +from lenstronomy.LensModel.QuadOptimizer.param_manager import ( + PowerLawFixedShear, + PowerLawFixedShearMultipole, + PowerLawFreeShear, + PowerLawFreeShearMultipole, +) from lenstronomy.LensModel.QuadOptimizer.optimizer import Optimizer import numpy.testing as npt class TestOptimizer(object): - def setup_method(self): - self.zlens, self.zsource = 0.5, 1.5 - epl_kwargs = {'theta_E': 1., 'center_x': 0., 'center_y': 0., 'e1': 0.2, 'e2': 0.1, 'gamma': 2.05} - shear_kwargs = {'gamma1': 0.05, 'gamma2': -0.04} + epl_kwargs = { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.2, + "e2": 0.1, + "gamma": 2.05, + } + shear_kwargs = {"gamma1": 0.05, "gamma2": -0.04} kwargs_macro = [epl_kwargs, shear_kwargs] self.x_image = np.array([0.65043538, -0.31109505, 0.78906059, -0.86222271]) - self.y_image = np.array([-0.89067493, 0.94851787, 0.52882605, -0.25403778]) + self.y_image = np.array([-0.89067493, 0.94851787, 0.52882605, -0.25403778]) - halo_list = ['SIS', 'SIS', 'SIS'] + halo_list = ["SIS", "SIS", "SIS"] halo_z = [self.zlens - 0.1, self.zlens, self.zlens + 0.4] - halo_kwargs = [{'theta_E': 0.1, 'center_x': 0.3, 'center_y': -0.9}, - {'theta_E': 0.15, 'center_x': 1.3, 'center_y': -0.5}, - {'theta_E': 0.06, 'center_x': -0.4, 'center_y': -0.4}] + halo_kwargs = [ + {"theta_E": 0.1, "center_x": 0.3, "center_y": -0.9}, + {"theta_E": 0.15, "center_x": 1.3, "center_y": -0.5}, + {"theta_E": 0.06, "center_x": -0.4, "center_y": -0.4}, + ] self.kwargs_epl = kwargs_macro + halo_kwargs self.zlist_epl = [self.zlens, self.zlens] + halo_z - self.lens_model_list_epl = ['EPL', 'SHEAR'] + halo_list + self.lens_model_list_epl = ["EPL", "SHEAR"] + halo_list - kwargs_multi = [{'m': 4, 'a_m': -0.04, 'phi_m': -0.2, 'center_x': 0.1, 'center_y': -0.1}] + kwargs_multi = [ + {"m": 4, "a_m": -0.04, "phi_m": -0.2, "center_x": 0.1, "center_y": -0.1} + ] self.kwargs_multipole = kwargs_macro + kwargs_multi + halo_kwargs self.zlist_multipole = [self.zlens, self.zlens, self.zlens] + halo_z - self.lens_model_list_multipole = ['EPL', 'SHEAR'] + ['MULTIPOLE'] + halo_list - + self.lens_model_list_multipole = ["EPL", "SHEAR"] + ["MULTIPOLE"] + halo_list def test_elp_free_shear(self): - param_class = PowerLawFreeShear(self.kwargs_epl) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_epl, self.zlist_epl, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_epl, + self.zlist_epl, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_epl, self.zlens, self.zsource, self.zlist_epl, multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_epl, + self.zlens, + self.zsource, + self.zlist_epl, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) def test_elp_fixed_shear(self): - param_class = PowerLawFixedShear(self.kwargs_epl, 0.06) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_epl, self.zlist_epl, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_epl, + self.zlist_epl, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_epl, self.zlens, self.zsource, self.zlist_epl, multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_epl, + self.zlens, + self.zsource, + self.zlist_epl, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) kwargs_shear = kwargs_final[1] - shear_out = np.hypot(kwargs_shear['gamma1'], kwargs_shear['gamma2']) + shear_out = np.hypot(kwargs_shear["gamma1"], kwargs_shear["gamma2"]) npt.assert_almost_equal(shear_out, 0.06) def test_multipole_free_shear(self): - param_class = PowerLawFreeShearMultipole(self.kwargs_multipole) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_multipole, self.zlist_multipole, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_multipole, + self.zlist_multipole, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_multipole, self.zlens, self.zsource, self.zlist_multipole, multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_multipole, + self.zlens, + self.zsource, + self.zlist_multipole, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) kwargs_epl = kwargs_final[0] kwargs_multipole = kwargs_final[2] - npt.assert_almost_equal(kwargs_multipole['m'], 4) - npt.assert_almost_equal(kwargs_multipole['center_x'], kwargs_epl['center_x']) - npt.assert_almost_equal(kwargs_multipole['center_y'], kwargs_epl['center_y']) - phi, _ = ellipticity2phi_q(kwargs_epl['e1'], kwargs_epl['e2']) - npt.assert_almost_equal(phi, kwargs_multipole['phi_m']) + npt.assert_almost_equal(kwargs_multipole["m"], 4) + npt.assert_almost_equal(kwargs_multipole["center_x"], kwargs_epl["center_x"]) + npt.assert_almost_equal(kwargs_multipole["center_y"], kwargs_epl["center_y"]) + phi, _ = ellipticity2phi_q(kwargs_epl["e1"], kwargs_epl["e2"]) + npt.assert_almost_equal(phi, kwargs_multipole["phi_m"]) def test_multipole_fixed_shear(self): - param_class = PowerLawFixedShearMultipole(self.kwargs_multipole, 0.07) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_multipole, self.zlist_multipole, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_multipole, + self.zlist_multipole, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_multipole, self.zlens, self.zsource, self.zlist_multipole, multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_multipole, + self.zlens, + self.zsource, + self.zlist_multipole, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) kwargs_shear = kwargs_final[1] - shear_out = np.hypot(kwargs_shear['gamma1'], kwargs_shear['gamma2']) + shear_out = np.hypot(kwargs_shear["gamma1"], kwargs_shear["gamma2"]) npt.assert_almost_equal(shear_out, 0.07) kwargs_epl = kwargs_final[0] kwargs_multipole = kwargs_final[2] - npt.assert_almost_equal(kwargs_multipole['m'], 4) - npt.assert_almost_equal(kwargs_multipole['center_x'], kwargs_epl['center_x']) - npt.assert_almost_equal(kwargs_multipole['center_y'], kwargs_epl['center_y']) - phi, _ = ellipticity2phi_q(kwargs_epl['e1'], kwargs_epl['e2']) - npt.assert_almost_equal(phi, kwargs_multipole['phi_m']) + npt.assert_almost_equal(kwargs_multipole["m"], 4) + npt.assert_almost_equal(kwargs_multipole["center_x"], kwargs_epl["center_x"]) + npt.assert_almost_equal(kwargs_multipole["center_y"], kwargs_epl["center_y"]) + phi, _ = ellipticity2phi_q(kwargs_epl["e1"], kwargs_epl["e2"]) + npt.assert_almost_equal(phi, kwargs_multipole["phi_m"]) def test_options(self): - param_class = PowerLawFixedShearMultipole(self.kwargs_multipole, 0.07) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_multipole, self.zlist_multipole, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, particle_swarm=False, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_multipole, + self.zlist_multipole, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + particle_swarm=False, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_multipole, self.zlens, self.zsource, self.zlist_multipole, multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_multipole, + self.zlens, + self.zsource, + self.zlist_multipole, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) kwargs_shear = kwargs_final[1] - shear_out = np.hypot(kwargs_shear['gamma1'], kwargs_shear['gamma2']) + shear_out = np.hypot(kwargs_shear["gamma1"], kwargs_shear["gamma2"]) npt.assert_almost_equal(shear_out, 0.07) foreground_rays = optimizer.fast_rayshooting._foreground_rays - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_multipole, self.zlist_multipole, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, particle_swarm=False, - re_optimize=True, re_optimize_scale=0.5, - foreground_rays=foreground_rays, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_multipole, + self.zlist_multipole, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + particle_swarm=False, + re_optimize=True, + re_optimize_scale=0.5, + foreground_rays=foreground_rays, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True) - lensmodel = LensModel(self.lens_model_list_multipole, self.zlens, self.zsource, self.zlist_multipole, - multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_multipole, + self.zlens, + self.zsource, + self.zlist_multipole, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) kwargs_shear = kwargs_final[1] - shear_out = np.hypot(kwargs_shear['gamma1'], kwargs_shear['gamma2']) + shear_out = np.hypot(kwargs_shear["gamma1"], kwargs_shear["gamma2"]) npt.assert_almost_equal(shear_out, 0.07) def test_multi_threading(self): - param_class = PowerLawFixedShearMultipole(self.kwargs_multipole, 0.07) - optimizer = Optimizer(self.x_image, self.y_image, self.lens_model_list_multipole, self.zlist_multipole, - self.zlens, self.zsource, param_class, pso_convergence_mean=50000, particle_swarm=True, - foreground_rays=None, tol_source=1e-5, tol_simplex_func=1e-3, simplex_n_iterations=400) + optimizer = Optimizer( + self.x_image, + self.y_image, + self.lens_model_list_multipole, + self.zlist_multipole, + self.zlens, + self.zsource, + param_class, + pso_convergence_mean=50000, + particle_swarm=True, + foreground_rays=None, + tol_source=1e-5, + tol_simplex_func=1e-3, + simplex_n_iterations=400, + ) kwargs_final, source = optimizer.optimize(50, 100, verbose=True, threadCount=5) - lensmodel = LensModel(self.lens_model_list_multipole, self.zlens, self.zsource, self.zlist_multipole, - multi_plane=True) - beta_x, beta_y = lensmodel.ray_shooting(self.x_image, self.y_image, kwargs_final) + lensmodel = LensModel( + self.lens_model_list_multipole, + self.zlens, + self.zsource, + self.zlist_multipole, + multi_plane=True, + ) + beta_x, beta_y = lensmodel.ray_shooting( + self.x_image, self.y_image, kwargs_final + ) npt.assert_almost_equal(np.sum(beta_x) - 4 * np.mean(beta_x), 0) npt.assert_almost_equal(np.sum(beta_y) - 4 * np.mean(beta_y), 0) -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Optimizer/test_param_classes.py b/test/test_LensModel/test_Optimizer/test_param_classes.py index b78fbb806..35578a02a 100644 --- a/test/test_LensModel/test_Optimizer/test_param_classes.py +++ b/test/test_LensModel/test_Optimizer/test_param_classes.py @@ -1,81 +1,125 @@ import pytest import numpy as np import numpy.testing as npt -from lenstronomy.LensModel.QuadOptimizer.param_manager import PowerLawFixedShear, \ - PowerLawFixedShearMultipole, PowerLawFreeShear, PowerLawFreeShearMultipole +from lenstronomy.LensModel.QuadOptimizer.param_manager import ( + PowerLawFixedShear, + PowerLawFixedShearMultipole, + PowerLawFreeShear, + PowerLawFreeShearMultipole, +) class TestParamClasses(object): - def setup_method(self): - self.zlens, self.zsource = 0.5, 1.5 - epl_kwargs = {'theta_E': 1., 'center_x': 0., 'center_y': 0., 'e1': 0.2, 'e2': 0.1, 'gamma': 2.05} - shear_kwargs = {'gamma1': 0.05, 'gamma2': -0.04} + epl_kwargs = { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.2, + "e2": 0.1, + "gamma": 2.05, + } + shear_kwargs = {"gamma1": 0.05, "gamma2": -0.04} kwargs_macro = [epl_kwargs, shear_kwargs] self.x_image = np.array([0.65043538, -0.31109505, 0.78906059, -0.86222271]) self.y_image = np.array([-0.89067493, 0.94851787, 0.52882605, -0.25403778]) - halo_list = ['SIS', 'SIS', 'SIS'] + halo_list = ["SIS", "SIS", "SIS"] halo_z = [self.zlens - 0.1, self.zlens, self.zlens + 0.4] - halo_kwargs = [{'theta_E': 0.1, 'center_x': 0.3, 'center_y': -0.9}, - {'theta_E': 0.15, 'center_x': 1.3, 'center_y': -0.5}, - {'theta_E': 0.06, 'center_x': -0.4, 'center_y': -0.4}] + halo_kwargs = [ + {"theta_E": 0.1, "center_x": 0.3, "center_y": -0.9}, + {"theta_E": 0.15, "center_x": 1.3, "center_y": -0.5}, + {"theta_E": 0.06, "center_x": -0.4, "center_y": -0.4}, + ] self.kwargs_epl = kwargs_macro + halo_kwargs self.zlist_epl = [self.zlens, self.zlens] + halo_z - self.lens_model_list_epl = ['EPL', 'SHEAR'] + halo_list + self.lens_model_list_epl = ["EPL", "SHEAR"] + halo_list - kwargs_multi = [{'m': 4, 'a_m': -0.04, 'phi_m': -0.2, 'center_x': 0.1, 'center_y': -0.1}] + kwargs_multi = [ + {"m": 4, "a_m": -0.04, "phi_m": -0.2, "center_x": 0.1, "center_y": -0.1} + ] self.kwargs_multipole = kwargs_macro + kwargs_multi + halo_kwargs self.zlist_multipole = [self.zlens, self.zlens, self.zlens] + halo_z - self.lens_model_list_multipole = ['EPL', 'SHEAR'] + ['MULTIPOLE'] + halo_list + self.lens_model_list_multipole = ["EPL", "SHEAR"] + ["MULTIPOLE"] + halo_list def test_param_penalty(self): - param_class = PowerLawFreeShear(self.kwargs_epl) args = param_class.kwargs_to_args(self.kwargs_epl) param_penalty = param_class.param_chi_square_penalty(args) npt.assert_almost_equal(0, param_penalty) def test_plaw_free_shear(self): - param_class = PowerLawFreeShear(self.kwargs_epl) - npt.assert_(param_class.to_vary_index==2) - kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05}, - {'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}] + npt.assert_(param_class.to_vary_index == 2) + kwargs_in = [ + { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.3, + "e1": 0.25, + "e2": 0.1, + "gamma": 2.05, + }, + {"gamma1": 0.05, "gamma2": -0.01}, + {"theta_E": -0.3, "center_x": 0.0, "center_y": 0.04}, + ] args_epl = param_class.kwargs_to_args(kwargs_in) npt.assert_almost_equal(args_epl, [1, 0, 0.3, 0.25, 0.1, 0.05, -0.01]) kwargs_out = param_class.args_to_kwargs(args_epl) - npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05) + npt.assert_almost_equal(kwargs_out[0]["gamma"], 2.05) for key in kwargs_out[-1].keys(): npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_epl[-1][key]) def test_plaw_fixed_shear(self): param_class = PowerLawFixedShear(self.kwargs_epl, 0.12) npt.assert_(param_class.to_vary_index == 2) - kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05}, - {'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}] + kwargs_in = [ + { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.3, + "e1": 0.25, + "e2": 0.1, + "gamma": 2.05, + }, + {"gamma1": 0.05, "gamma2": -0.01}, + {"theta_E": -0.3, "center_x": 0.0, "center_y": 0.04}, + ] args_epl = param_class.kwargs_to_args(kwargs_in) npt.assert_almost_equal(args_epl[0:5], [1, 0, 0.3, 0.25, 0.1]) kwargs_out = param_class.args_to_kwargs(args_epl) - npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05) - npt.assert_almost_equal(kwargs_out[1]['gamma1'] ** 2 + kwargs_out[1]['gamma2']**2, 0.12 ** 2) + npt.assert_almost_equal(kwargs_out[0]["gamma"], 2.05) + npt.assert_almost_equal( + kwargs_out[1]["gamma1"] ** 2 + kwargs_out[1]["gamma2"] ** 2, 0.12**2 + ) for key in kwargs_out[-1].keys(): npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_epl[-1][key]) def test_plawboxydisky_fixed_shear(self): - param_class = PowerLawFixedShearMultipole(self.kwargs_multipole, 0.12) npt.assert_(param_class.to_vary_index == 3) - kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05}, - {'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}] + kwargs_in = [ + { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.3, + "e1": 0.25, + "e2": 0.1, + "gamma": 2.05, + }, + {"gamma1": 0.05, "gamma2": -0.01}, + {"theta_E": -0.3, "center_x": 0.0, "center_y": 0.04}, + ] args_epl = param_class.kwargs_to_args(kwargs_in) npt.assert_almost_equal(args_epl[0:5], [1, 0, 0.3, 0.25, 0.1]) kwargs_out = param_class.args_to_kwargs(args_epl) - npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05) - npt.assert_almost_equal(kwargs_out[1]['gamma1'] ** 2 + kwargs_out[1]['gamma2']**2, 0.12 ** 2) + npt.assert_almost_equal(kwargs_out[0]["gamma"], 2.05) + npt.assert_almost_equal( + kwargs_out[1]["gamma1"] ** 2 + kwargs_out[1]["gamma2"] ** 2, 0.12**2 + ) for key in kwargs_out[-1].keys(): npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_multipole[-1][key]) @@ -83,15 +127,24 @@ def test_plawboxydisky_fixed_shear(self): npt.assert_almost_equal(kwargs_out[2][key], self.kwargs_multipole[2][key]) def test_plawboxydisky_fixed_shear(self): - param_class = PowerLawFreeShearMultipole(self.kwargs_multipole) npt.assert_(param_class.to_vary_index == 3) - kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05}, - {'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}] + kwargs_in = [ + { + "theta_E": 1.0, + "center_x": 0.0, + "center_y": 0.3, + "e1": 0.25, + "e2": 0.1, + "gamma": 2.05, + }, + {"gamma1": 0.05, "gamma2": -0.01}, + {"theta_E": -0.3, "center_x": 0.0, "center_y": 0.04}, + ] args_epl = param_class.kwargs_to_args(kwargs_in) npt.assert_almost_equal(args_epl, [1, 0, 0.3, 0.25, 0.1, 0.05, -0.01]) kwargs_out = param_class.args_to_kwargs(args_epl) - npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05) + npt.assert_almost_equal(kwargs_out[0]["gamma"], 2.05) for key in kwargs_out[-1].keys(): npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_multipole[-1][key]) @@ -99,10 +152,9 @@ def test_plawboxydisky_fixed_shear(self): npt.assert_almost_equal(kwargs_out[2][key], self.kwargs_multipole[2][key]) def test_bounds(self): - - param_names = ['theta_E', 'center_x', 'center_y', 'e1', 'e2'] + param_names = ["theta_E", "center_x", "center_y", "e1", "e2"] args = [self.kwargs_epl[0][param_name] for param_name in param_names] - param_names = ['gamma1', 'gamma2'] + param_names = ["gamma1", "gamma2"] args += [self.kwargs_epl[1][param_name] for param_name in param_names] shift = np.array([0.1, 0.05, 0.05, 0.1, 0.1, 0.025, 0.025]) @@ -125,5 +177,5 @@ def test_bounds(self): npt.assert_almost_equal(bounds[1], np.array(args) + shift) -if __name__ == '__main__': - pytest.main() \ No newline at end of file +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_arc_perturbations.py b/test/test_LensModel/test_Profiles/test_arc_perturbations.py index c757e9ff1..dae7528ff 100644 --- a/test/test_LensModel/test_Profiles/test_arc_perturbations.py +++ b/test/test_LensModel/test_Profiles/test_arc_perturbations.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.arc_perturbations import ArcPerturbations @@ -9,19 +9,24 @@ class TestArcPerturbations(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.model = ArcPerturbations() - self.kwargs_lens = {'coeff': 2, 'd_r': 0.2, 'd_phi': np.pi, 'center_x': 0, 'center_y': 0} + self.kwargs_lens = { + "coeff": 2, + "d_r": 0.2, + "d_phi": np.pi, + "center_x": 0, + "center_y": 0, + } def test_function(self): x, y = util.make_grid(numPix=100, deltapix=0.1) values = self.model.function(x, y, **self.kwargs_lens) - #import matplotlib.pyplot as plt - #plt.matshow(util.array2image(values)) - #plt.show() + # import matplotlib.pyplot as plt + # plt.matshow(util.array2image(values)) + # plt.show() npt.assert_almost_equal(values[0], 0, decimal=5) def test_derivatives(self): @@ -53,5 +58,5 @@ def test_hessian(self): npt.assert_almost_equal(f_xy_num, f_xy, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_base_profile.py b/test/test_LensModel/test_Profiles/test_base_profile.py index 86dc5964d..825c697e8 100644 --- a/test/test_LensModel/test_Profiles/test_base_profile.py +++ b/test/test_LensModel/test_Profiles/test_base_profile.py @@ -3,7 +3,6 @@ class TestBaseProfile(object): - def setup_method(self): pass @@ -14,7 +13,6 @@ def test_base_functions(self): class TestRaise(unittest.TestCase): - def test_raise(self): base = LensProfileBase() with self.assertRaises(ValueError): diff --git a/test/test_LensModel/test_Profiles/test_chameleon.py b/test/test_LensModel/test_Profiles/test_chameleon.py index 23bc5e10b..b434139c0 100644 --- a/test/test_LensModel/test_Profiles/test_chameleon.py +++ b/test/test_LensModel/test_Profiles/test_chameleon.py @@ -1,25 +1,34 @@ - import pytest import numpy as np import numpy.testing as npt from lenstronomy.LensModel.Profiles.nie import NIE -from lenstronomy.LensModel.Profiles.chameleon import Chameleon, DoubleChameleon, DoubleChameleonPointMass, TripleChameleon -from lenstronomy.LightModel.Profiles.chameleon import DoubleChameleon as DoubleChameleonLight -from lenstronomy.LightModel.Profiles.chameleon import TripleChameleon as TripleChameleonLight +from lenstronomy.LensModel.Profiles.chameleon import ( + Chameleon, + DoubleChameleon, + DoubleChameleonPointMass, + TripleChameleon, +) +from lenstronomy.LightModel.Profiles.chameleon import ( + DoubleChameleon as DoubleChameleonLight, +) +from lenstronomy.LightModel.Profiles.chameleon import ( + TripleChameleon as TripleChameleonLight, +) import lenstronomy.Util.param_util as param_util class TestChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): self.chameleon = Chameleon() self.nie = NIE() def test_theta_E_convert(self): w_c, w_t = 2, 1 - theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=w_c, w_t=w_t, e1=0, e2=0) + theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert( + alpha_1=1, w_c=w_c, w_t=w_t, e1=0, e2=0 + ) assert w_c == 1 assert w_t == 2 assert theta_E_convert == 0 @@ -30,16 +39,28 @@ def test_function(self): :return: """ x = np.linspace(0.1, 10, 10) - w_c, w_t = 0.5, 1. + w_c, w_t = 0.5, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2) - kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2} - kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2} - f_ = self.chameleon.function(x=x, y=1., **kwargs_light) - f_1 = self.nie.function(x=x, y=1., **kwargs_1) - f_2 = self.nie.function(x=x, y=1., **kwargs_2) + kwargs_light = {"alpha_1": 1.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert( + alpha_1=1, w_c=0.5, w_t=1.0, e1=e1, e2=e2 + ) + kwargs_1 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_1, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_2, + "e1": e1, + "e2": e2, + } + f_ = self.chameleon.function(x=x, y=1.0, **kwargs_light) + f_1 = self.nie.function(x=x, y=1.0, **kwargs_1) + f_2 = self.nie.function(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(f_, (f_1 - f_2), decimal=5) def test_derivatives(self): @@ -48,19 +69,31 @@ def test_derivatives(self): :return: """ x = np.linspace(0.1, 10, 10) - w_c, w_t = 0.5, 1. + w_c, w_t = 0.5, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2) - kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2} - kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2} - f_x, f_y = self.chameleon.derivatives(x=x, y=1., **kwargs_light) - f_x_1, f_y_1 = self.nie.derivatives(x=x, y=1., **kwargs_1) - f_x_2, f_y_2 = self.nie.derivatives(x=x, y=1., **kwargs_2) + kwargs_light = {"alpha_1": 1.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert( + alpha_1=1, w_c=0.5, w_t=1.0, e1=e1, e2=e2 + ) + kwargs_1 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_1, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_2, + "e1": e1, + "e2": e2, + } + f_x, f_y = self.chameleon.derivatives(x=x, y=1.0, **kwargs_light) + f_x_1, f_y_1 = self.nie.derivatives(x=x, y=1.0, **kwargs_1) + f_x_2, f_y_2 = self.nie.derivatives(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(f_x, (f_x_1 - f_x_2), decimal=5) npt.assert_almost_equal(f_y, (f_y_1 - f_y_2), decimal=5) - f_x, f_y = self.chameleon.derivatives(x=1, y=0., **kwargs_light) + f_x, f_y = self.chameleon.derivatives(x=1, y=0.0, **kwargs_light) npt.assert_almost_equal(f_x, 1, decimal=1) def test_hessian(self): @@ -69,47 +102,58 @@ def test_hessian(self): :return: """ x = np.linspace(0.1, 10, 10) - w_c, w_t = 0.5, 1. + w_c, w_t = 0.5, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2) - kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2} - kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2} - f_xx, f_xy, f_yx, f_yy = self.chameleon.hessian(x=x, y=1., **kwargs_light) - f_xx_1, f_xy_1, f_yx_1, f_yy_1 = self.nie.hessian(x=x, y=1., **kwargs_1) - f_xx_2, f_xy_2, f_yx_2, f_yy_2 = self.nie.hessian(x=x, y=1., **kwargs_2) + kwargs_light = {"alpha_1": 1.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert( + alpha_1=1, w_c=0.5, w_t=1.0, e1=e1, e2=e2 + ) + kwargs_1 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_1, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "theta_E": theta_E_convert, + "s_scale": s_scale_2, + "e1": e1, + "e2": e2, + } + f_xx, f_xy, f_yx, f_yy = self.chameleon.hessian(x=x, y=1.0, **kwargs_light) + f_xx_1, f_xy_1, f_yx_1, f_yy_1 = self.nie.hessian(x=x, y=1.0, **kwargs_1) + f_xx_2, f_xy_2, f_yx_2, f_yy_2 = self.nie.hessian(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(f_xx, (f_xx_1 - f_xx_2), decimal=5) npt.assert_almost_equal(f_yy, (f_yy_1 - f_yy_2), decimal=5) npt.assert_almost_equal(f_xy, (f_xy_1 - f_xy_2), decimal=5) npt.assert_almost_equal(f_yx, (f_yx_1 - f_yx_2), decimal=5) def test_static(self): - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} + kwargs_light = {"alpha_1": 1.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} f_ = self.chameleon.function(x, y, **kwargs_light) self.chameleon.set_static(**kwargs_light) f_static = self.chameleon.function(x, y, **kwargs_light) npt.assert_almost_equal(f_, f_static, decimal=8) self.chameleon.set_dynamic() - kwargs_light = {'alpha_1': 2., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} + kwargs_light = {"alpha_1": 2.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} f_dyn = self.chameleon.function(x, y, **kwargs_light) assert f_dyn != f_static class TestDoubleChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = DoubleChameleon() names = chameleon.param_names - assert names[0] == 'alpha_1' + assert names[0] == "alpha_1" def test_function(self): """ @@ -121,16 +165,39 @@ def test_function(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - theta_E = 1. - ratio = 2. + theta_E = 1.0 + ratio = 2.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - - kwargs_1 = {'alpha_1': theta_E / (1 + 1. / ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - flux = doublechameleon.function(x=x, y=1., **kwargs_light) - flux1 = chameleon.function(x=x, y=1., **kwargs_1) - flux2 = chameleon.function(x=x, y=1., **kwargs_2) + kwargs_light = { + "alpha_1": 1.0, + "ratio": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + + kwargs_1 = { + "alpha_1": theta_E / (1 + 1.0 / ratio), + "w_c": 0.5, + "w_t": 1.0, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "alpha_1": theta_E / (1 + ratio), + "w_c": 0.1, + "w_t": 0.5, + "e1": e1, + "e2": e2, + } + flux = doublechameleon.function(x=x, y=1.0, **kwargs_light) + flux1 = chameleon.function(x=x, y=1.0, **kwargs_1) + flux2 = chameleon.function(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(flux, flux1 + flux2, decimal=8) def test_derivatives(self): @@ -143,16 +210,39 @@ def test_derivatives(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - theta_E = 1. - ratio = 2. + theta_E = 1.0 + ratio = 2.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - - kwargs_1 = {'alpha_1': theta_E / (1 + 1. / ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - f_x, f_y = doublechameleon.derivatives(x=x, y=1., **kwargs_light) - f_x1, f_y1 = chameleon.derivatives(x=x, y=1., **kwargs_1) - f_x2, f_y2 = chameleon.derivatives(x=x, y=1., **kwargs_2) + kwargs_light = { + "alpha_1": 1.0, + "ratio": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + + kwargs_1 = { + "alpha_1": theta_E / (1 + 1.0 / ratio), + "w_c": 0.5, + "w_t": 1.0, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "alpha_1": theta_E / (1 + ratio), + "w_c": 0.1, + "w_t": 0.5, + "e1": e1, + "e2": e2, + } + f_x, f_y = doublechameleon.derivatives(x=x, y=1.0, **kwargs_light) + f_x1, f_y1 = chameleon.derivatives(x=x, y=1.0, **kwargs_1) + f_x2, f_y2 = chameleon.derivatives(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(f_x, f_x1 + f_x2, decimal=8) npt.assert_almost_equal(f_y, f_y1 + f_y2, decimal=8) @@ -166,24 +256,60 @@ def test_hessian(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - theta_E = 1. - ratio = 2. + theta_E = 1.0 + ratio = 2.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'alpha_1': theta_E, 'ratio': ratio, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - kwargs_light = {'amp': theta_E, 'ratio': ratio, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - - kwargs_1 = {'alpha_1': theta_E / (1 + 1./ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=x, y=1., **kwargs_lens) - f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1., **kwargs_1) - f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1., **kwargs_2) + kwargs_lens = { + "alpha_1": theta_E, + "ratio": ratio, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + kwargs_light = { + "amp": theta_E, + "ratio": ratio, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + + kwargs_1 = { + "alpha_1": theta_E / (1 + 1.0 / ratio), + "w_c": 0.5, + "w_t": 1.0, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "alpha_1": theta_E / (1 + ratio), + "w_c": 0.1, + "w_t": 0.5, + "e1": e1, + "e2": e2, + } + f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=x, y=1.0, **kwargs_lens) + f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1.0, **kwargs_1) + f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(f_xx, f_xx1 + f_xx2, decimal=8) npt.assert_almost_equal(f_yy, f_yy1 + f_yy2, decimal=8) npt.assert_almost_equal(f_xy, f_xy1 + f_xy2, decimal=8) npt.assert_almost_equal(f_yx, f_yx1 + f_yx2, decimal=8) light = DoubleChameleonLight() - f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens) - kappa = 1./2 * (f_xx + f_yy) + f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian( + x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens + ) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_norm = kappa / np.mean(kappa) flux = light.function(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_light) flux_norm = flux / np.mean(flux) @@ -191,31 +317,52 @@ def test_hessian(self): def test_static(self): doublechameleon = DoubleChameleon() - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1, 'ratio': 0.5, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} + kwargs_light = { + "alpha_1": 1, + "ratio": 0.5, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } f_ = doublechameleon.function(x, y, **kwargs_light) doublechameleon.set_static(**kwargs_light) f_static = doublechameleon.function(x, y, **kwargs_light) npt.assert_almost_equal(f_, f_static, decimal=8) doublechameleon.set_dynamic() - kwargs_light = {'alpha_1': 2, 'ratio': 0.5, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} + kwargs_light = { + "alpha_1": 2, + "ratio": 0.5, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } f_dyn = doublechameleon.function(x, y, **kwargs_light) assert f_dyn != f_static class TestDoubleChameleonPointMass(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = DoubleChameleonPointMass() names = chameleon.param_names - assert names[0] == 'alpha_1' + assert names[0] == "alpha_1" def test_function(self): """ @@ -226,8 +373,20 @@ def test_function(self): phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - flux = doublechameleon.function(x=1, y=1., **kwargs_light) + kwargs_light = { + "alpha_1": 1.0, + "ratio_pointmass": 3, + "ratio_chameleon": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + flux = doublechameleon.function(x=1, y=1.0, **kwargs_light) npt.assert_almost_equal(flux, 1.2602247653486218, decimal=4) def test_derivatives(self): @@ -239,9 +398,20 @@ def test_derivatives(self): phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - f_x, f_y = doublechameleon.derivatives(x=1, y=1., **kwargs_light) + kwargs_light = { + "alpha_1": 1.0, + "ratio_pointmass": 3, + "ratio_chameleon": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + f_x, f_y = doublechameleon.derivatives(x=1, y=1.0, **kwargs_light) npt.assert_almost_equal(f_x, 0.43419725313692664, decimal=4) npt.assert_almost_equal(f_y, 0.4521464786719726, decimal=4) @@ -254,9 +424,20 @@ def test_hessian(self): phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=1, y=1., **kwargs_light) + kwargs_light = { + "alpha_1": 1.0, + "ratio_pointmass": 3, + "ratio_chameleon": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=1, y=1.0, **kwargs_light) npt.assert_almost_equal(f_xx, 0.06255816336369684, decimal=4) npt.assert_almost_equal(f_xy, -0.3986532840628945, decimal=4) npt.assert_almost_equal(f_yx, -0.3986532840628945, decimal=4) @@ -264,16 +445,15 @@ def test_hessian(self): class TestTripleChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = TripleChameleon() names = chameleon.param_names - assert names[0] == 'alpha_1' + assert names[0] == "alpha_1" def test_function(self): """ @@ -285,24 +465,37 @@ def test_function(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - ratio12 = 2. + ratio12 = 2.0 ratio13 = 3 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } - - amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13) + kwargs_light = { + "alpha_1": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } + + amp1 = 1.0 / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 - kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - flux = triplechameleon.function(x=x, y=1., **kwargs_light) - flux1 = chameleon.function(x=x, y=1., **kwargs_1) - flux2 = chameleon.function(x=x, y=1., **kwargs_2) - flux3 = chameleon.function(x=x, y=1., **kwargs_3) + kwargs_1 = {"alpha_1": amp1, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + kwargs_2 = {"alpha_1": amp2, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + kwargs_3 = {"alpha_1": amp3, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + flux = triplechameleon.function(x=x, y=1.0, **kwargs_light) + flux1 = chameleon.function(x=x, y=1.0, **kwargs_1) + flux2 = chameleon.function(x=x, y=1.0, **kwargs_2) + flux3 = chameleon.function(x=x, y=1.0, **kwargs_3) npt.assert_almost_equal(flux, flux1 + flux2 + flux3, decimal=8) def test_derivatives(self): @@ -315,25 +508,37 @@ def test_derivatives(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - ratio12 = 2. + ratio12 = 2.0 ratio13 = 3 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } - - amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13) + kwargs_light = { + "alpha_1": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } + + amp1 = 1.0 / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 - kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - f_x, f_y = triplechameleon.derivatives(x=x, y=1., **kwargs_light) - f_x1, f_y1 = chameleon.derivatives(x=x, y=1., **kwargs_1) - f_x2, f_y2 = chameleon.derivatives(x=x, y=1., **kwargs_2) - f_x3, f_y3 = chameleon.derivatives(x=x, y=1., **kwargs_3) + kwargs_1 = {"alpha_1": amp1, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + kwargs_2 = {"alpha_1": amp2, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + kwargs_3 = {"alpha_1": amp3, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + f_x, f_y = triplechameleon.derivatives(x=x, y=1.0, **kwargs_light) + f_x1, f_y1 = chameleon.derivatives(x=x, y=1.0, **kwargs_1) + f_x2, f_y2 = chameleon.derivatives(x=x, y=1.0, **kwargs_2) + f_x3, f_y3 = chameleon.derivatives(x=x, y=1.0, **kwargs_3) npt.assert_almost_equal(f_x, f_x1 + f_x2 + f_x3, decimal=8) npt.assert_almost_equal(f_y, f_y1 + f_y2 + f_y3, decimal=8) @@ -347,39 +552,65 @@ def test_hessian(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - ratio12 = 2. + ratio12 = 2.0 ratio13 = 3 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } - - kwargs_light = {'amp': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } - - amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13) + kwargs_lens = { + "alpha_1": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } + + kwargs_light = { + "amp": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } + + amp1 = 1.0 / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 - kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - - f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian(x=x, y=1., **kwargs_lens) - f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1., **kwargs_1) - f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1., **kwargs_2) - f_xx3, f_xy3, f_yx3, f_yy3 = chameleon.hessian(x=x, y=1., **kwargs_3) + kwargs_1 = {"alpha_1": amp1, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + kwargs_2 = {"alpha_1": amp2, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + kwargs_3 = {"alpha_1": amp3, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + + f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian(x=x, y=1.0, **kwargs_lens) + f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1.0, **kwargs_1) + f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1.0, **kwargs_2) + f_xx3, f_xy3, f_yx3, f_yy3 = chameleon.hessian(x=x, y=1.0, **kwargs_3) npt.assert_almost_equal(f_xx, f_xx1 + f_xx2 + f_xx3, decimal=8) npt.assert_almost_equal(f_yy, f_yy1 + f_yy2 + f_yy3, decimal=8) npt.assert_almost_equal(f_xy, f_xy1 + f_xy2 + f_xy3, decimal=8) npt.assert_almost_equal(f_yx, f_yx1 + f_yx2 + f_yx3, decimal=8) light = TripleChameleonLight() - f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens) - kappa = 1./2 * (f_xx + f_yy) + f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian( + x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens + ) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_norm = kappa / np.mean(kappa) flux = light.function(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_light) flux_norm = flux / np.mean(flux) @@ -387,29 +618,53 @@ def test_hessian(self): def test_static(self): triplechameleon = TripleChameleon() - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 - ratio12 = 2. + ratio12 = 2.0 ratio13 = 3 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } + kwargs_lens = { + "alpha_1": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } f_ = triplechameleon.function(x, y, **kwargs_lens) triplechameleon.set_static(**kwargs_lens) f_static = triplechameleon.function(x, y, **kwargs_lens) npt.assert_almost_equal(f_, f_static, decimal=8) triplechameleon.set_dynamic() - kwargs_lens = {'alpha_1': 2., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, - 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2 - } + kwargs_lens = { + "alpha_1": 2.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.1, + "w_t3": 0.5, + "e13": e1, + "e23": e2, + } f_dyn = triplechameleon.function(x, y, **kwargs_lens) assert f_dyn != f_static -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cnfw.py b/test/test_LensModel/test_Profiles/test_cnfw.py index d64987e0b..1de198ea7 100644 --- a/test/test_LensModel/test_Profiles/test_cnfw.py +++ b/test/test_LensModel/test_Profiles/test_cnfw.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman', 'sibirrer' +__author__ = "dgilman", "sibirrer" from lenstronomy.LensModel.Profiles.cnfw import CNFW from lenstronomy.LensModel.Profiles.nfw import NFW @@ -9,11 +9,9 @@ class Testcnfw(object): - """ - tests the Gaussian methods - """ - def setup_method(self): + """Tests the Gaussian methods.""" + def setup_method(self): self.cn = CNFW() self.n = NFW() @@ -21,19 +19,17 @@ def test_pot(self): # this test requires that the CNFW profile with a very small core results in the potential of the NFW profile pot1 = self.cn.function(x=2, y=0, Rs=1, alpha_Rs=1, r_core=0.001) pot2 = self.n.function(x=2, y=0, Rs=1, alpha_Rs=1) - npt.assert_almost_equal(pot1/pot2, 1, decimal=3) + npt.assert_almost_equal(pot1 / pot2, 1, decimal=3) def _kappa_integrand(self, x, y, Rs, m0, r_core): - - return 2*np.pi*x * self.cn.density_2d(x, y, Rs, m0, r_core) + return 2 * np.pi * x * self.cn.density_2d(x, y, Rs, m0, r_core) def test_derivatives(self): + Rs = 10.0 + rho0 = 1.0 + r_core = 7.0 - Rs = 10. - rho0 = 1. - r_core = 7. - - R = np.linspace(0.1*Rs, 4*Rs, 1000) + R = np.linspace(0.1 * Rs, 4 * Rs, 1000) alpha_Rs = self.cn._rho2alpha(rho0, Rs, r_core) alpha = self.cn.alpha_r(R, Rs, rho0, r_core) @@ -41,13 +37,13 @@ def test_derivatives(self): alpha_derivatives = self.cn.derivatives(R, 0, Rs, alpha_Rs, r_core)[0] npt.assert_almost_equal(alpha_derivatives / alpha_theory, 1) - npt.assert_almost_equal(alpha/alpha_theory, 1) - npt.assert_almost_equal(alpha/alpha_derivatives, 1) + npt.assert_almost_equal(alpha / alpha_theory, 1) + npt.assert_almost_equal(alpha / alpha_derivatives, 1) def test_mass_3d(self): - Rs = 10. - rho0 = 1. - r_core = 7. + Rs = 10.0 + rho0 = 1.0 + r_core = 7.0 R = np.linspace(0.1 * Rs, 4 * Rs, 1000) alpha_Rs = self.cn._rho2alpha(rho0, Rs, r_core) @@ -56,10 +52,9 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) def test_mproj(self): - - Rs = 10. - r_core = 0.7*Rs - Rmax = np.linspace(0.6*Rs, 1.1*Rs, 1000) + Rs = 10.0 + r_core = 0.7 * Rs + Rmax = np.linspace(0.6 * Rs, 1.1 * Rs, 1000) dr = Rmax[1] - Rmax[0] m0 = 1 @@ -72,29 +67,27 @@ def test_mproj(self): npt.assert_almost_equal(mean_diff, 0, decimal=3) def test_GF(self): - x_array = np.array([0.5, 0.8, 1.2]) b = 0.7 Garray = self.cn._G(x_array, b) Farray = self.cn._F(x_array, b) for i in range(0, len(x_array)): npt.assert_almost_equal(Farray[i], self.cn._F(x_array[i], b)) - npt.assert_almost_equal(Garray[i], self.cn._G(x_array[i],b)) + npt.assert_almost_equal(Garray[i], self.cn._G(x_array[i], b)) def test_gamma(self): + Rs = 10.0 + rho0 = 1.0 + r_core = 0.7 * Rs - Rs = 10. - rho0 = 1. - r_core = 0.7*Rs - - R = np.array([0.5*Rs, 0.8*Rs, 1.1*Rs]) + R = np.array([0.5 * Rs, 0.8 * Rs, 1.1 * Rs]) - g1_array, g2_array = self.cn.cnfwGamma(R, Rs, rho0, r_core, R, 0.6*Rs) + g1_array, g2_array = self.cn.cnfwGamma(R, Rs, rho0, r_core, R, 0.6 * Rs) for i in range(0, len(R)): - g1, g2 = self.cn.cnfwGamma(R[i], Rs, rho0, r_core, R[i], 0.6*Rs) + g1, g2 = self.cn.cnfwGamma(R[i], Rs, rho0, r_core, R[i], 0.6 * Rs) npt.assert_almost_equal(g1_array[i], g1) npt.assert_almost_equal(g2_array[i], g2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cnfw_ellipse.py b/test/test_LensModel/test_Profiles/test_cnfw_ellipse.py index 5aa7e993b..57b65cfeb 100644 --- a/test/test_LensModel/test_Profiles/test_cnfw_ellipse.py +++ b/test/test_LensModel/test_Profiles/test_cnfw_ellipse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.cnfw import CNFW @@ -9,10 +9,10 @@ import numpy.testing as npt import pytest + class TestCNFWELLIPSE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.nfw = CNFW() self.nfw_e = CNFW_ELLIPSE() @@ -20,9 +20,9 @@ def setup_method(self): def test_function(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 r_core = 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) @@ -32,7 +32,7 @@ def test_function(self): x = np.array([0]) y = np.array([0]) - q = .8 + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw_e.function(x, y, Rs, alpha_Rs, r_core, e1, e2) @@ -48,9 +48,9 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 r_core = 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) @@ -65,10 +65,10 @@ def test_derivatives(self): npt.assert_almost_equal(f_x[0], 0, decimal=5) npt.assert_almost_equal(f_y[0], 0, decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - alpha_Rs = 1. - q = .8 + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + alpha_Rs = 1.0 + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw_e.derivatives(x, y, Rs, alpha_Rs, r_core, e1, e2) @@ -80,22 +80,24 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 r_core = 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.nfw.hessian(x, y, Rs, alpha_Rs, r_core) - f_xx_e, f_xy_e, f_yx_e, f_yy_e = self.nfw_e.hessian(x, y, Rs, alpha_Rs, r_core, e1, e2) + f_xx_e, f_xy_e, f_yx_e, f_yy_e = self.nfw_e.hessian( + x, y, Rs, alpha_Rs, r_core, e1, e2 + ) npt.assert_almost_equal(f_xx[0], f_xx_e[0], decimal=5) npt.assert_almost_equal(f_yy[0], f_yy_e[0], decimal=5) npt.assert_almost_equal(f_xy[0], f_xy_e[0], decimal=5) npt.assert_almost_equal(f_yx[0], f_yx_e[0], decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - q = .8 + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw_e.hessian(x, y, Rs, alpha_Rs, r_core, e1, e2) @@ -107,9 +109,9 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.14605247788956888, decimal=5) def test_mass_3d(self): - Rs = 10. - rho0 = 1. - r_core = 7. + Rs = 10.0 + rho0 = 1.0 + r_core = 7.0 R = np.linspace(0.1 * Rs, 4 * Rs, 1000) alpha_Rs = self.nfw._rho2alpha(rho0, Rs, r_core) @@ -118,5 +120,5 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_const_mag.py b/test/test_LensModel/test_Profiles/test_const_mag.py index 504e76f7e..5294d4e2b 100644 --- a/test/test_LensModel/test_Profiles/test_const_mag.py +++ b/test/test_LensModel/test_Profiles/test_const_mag.py @@ -1,4 +1,4 @@ -__author__ = 'gipagano' +__author__ = "gipagano" import numpy as np @@ -7,295 +7,294 @@ from lenstronomy.Util import util from lenstronomy.LensModel.Profiles.const_mag import ConstMag + class TestCONST_MAG(object): - """ - tests the CONST_MAG profile for different rotations - """ - - def setup_method(self): + """Tests the CONST_MAG profile for different rotations.""" + def setup_method(self): self.const_mag = ConstMag() - + def test_function(self): - y = np.array([1., 2]) - x = np.array([0., 0.]) - - mu_r = 1. - mu_t = 10. - - - # positive parity - + y = np.array([1.0, 2]) + x = np.array([0.0, 0.0]) + + mu_r = 1.0 + mu_t = 10.0 + + # positive parity + parity = 1 - + ############ # rotation 1 ############ - + phi_G = np.pi - - values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) + + values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) delta_pot = values[1] - values[0] - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f_ = self.const_mag.function(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back - + delta_pot_rot = f_[1] - f_[0] - + # compare npt.assert_almost_equal(delta_pot, delta_pot_rot, decimal=4) - + ############ # rotation 2 ############ - - phi_G = np.pi/3. - - values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) + + phi_G = np.pi / 3.0 + + values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) delta_pot = values[1] - values[0] - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f_ = self.const_mag.function(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back - + delta_pot_rot = f_[1] - f_[0] - + # compare npt.assert_almost_equal(delta_pot, delta_pot_rot, decimal=4) - - #=========================================================== - - # negative parity - + + # =========================================================== + + # negative parity + parity = -1 - + ############ # rotation 1 ############ - + phi_G = np.pi - - values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) + + values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) delta_pot = values[1] - values[0] - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f_ = self.const_mag.function(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back - + delta_pot_rot = f_[1] - f_[0] - + # compare npt.assert_almost_equal(delta_pot, delta_pot_rot, decimal=4) - + ############ # rotation 2 ############ - - phi_G = np.pi/3. - - values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) + + phi_G = np.pi / 3.0 + + values = self.const_mag.function(x, y, mu_r, mu_t, parity, phi_G) delta_pot = values[1] - values[0] - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f_ = self.const_mag.function(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back - + delta_pot_rot = f_[1] - f_[0] - + # compare npt.assert_almost_equal(delta_pot, delta_pot_rot, decimal=4) - + def test_derivatives(self): x = np.array([1]) y = np.array([2]) - - mu_r = 1. - mu_t = 10. - - - # positive parity - + + mu_r = 1.0 + mu_t = 10.0 + + # positive parity + parity = 1 - + ############ # rotation 1 ############ - + phi_G = np.pi - + f_x, f_y = self.const_mag.derivatives(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f__x, f__y = self.const_mag.derivatives(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back f_x_rot, f_y_rot = util.rotate(f__x, f__y, -phi_G) - + # compare npt.assert_almost_equal(f_x, f_x_rot, decimal=4) npt.assert_almost_equal(f_y, f_y_rot, decimal=4) - + ############ # rotation 2 ############ - - phi_G = np.pi/3. - + + phi_G = np.pi / 3.0 + f_x, f_y = self.const_mag.derivatives(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f__x, f__y = self.const_mag.derivatives(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back f_x_rot, f_y_rot = util.rotate(f__x, f__y, -phi_G) - + # compare npt.assert_almost_equal(f_x, f_x_rot, decimal=4) npt.assert_almost_equal(f_y, f_y_rot, decimal=4) - - #=========================================================== - - # negative parity - + + # =========================================================== + + # negative parity + parity = -1 - + ############ # rotation 1 ############ - + phi_G = np.pi - + f_x, f_y = self.const_mag.derivatives(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f__x, f__y = self.const_mag.derivatives(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back f_x_rot, f_y_rot = util.rotate(f__x, f__y, -phi_G) - + # compare npt.assert_almost_equal(f_x, f_x_rot, decimal=4) npt.assert_almost_equal(f_y, f_y_rot, decimal=4) - + ############ # rotation 2 ############ - - phi_G = np.pi/3. - + + phi_G = np.pi / 3.0 + f_x, f_y = self.const_mag.derivatives(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate f__x, f__y = self.const_mag.derivatives(x__, y__, mu_r, mu_t, parity, 0.0) - + # rotate back f_x_rot, f_y_rot = util.rotate(f__x, f__y, -phi_G) - + # compare npt.assert_almost_equal(f_x, f_x_rot, decimal=4) npt.assert_almost_equal(f_y, f_y_rot, decimal=4) - + def test_hessian(self): x = np.array([1]) y = np.array([2]) - - mu_r = 1. - mu_t = 10. - - - # positive parity - + + mu_r = 1.0 + mu_t = 10.0 + + # positive parity + parity = 1 - + ############ # rotation 1 ############ - + phi_G = np.pi - + f_xx, f_xy, f_yx, f_yy = self.const_mag.hessian(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate - f__xx, f__xy, f__yx, f__yy = self.const_mag.hessian(x__, y__, mu_r, mu_t, parity, 0.0) - + f__xx, f__xy, f__yx, f__yy = self.const_mag.hessian( + x__, y__, mu_r, mu_t, parity, 0.0 + ) + # rotate back - kappa = 1./2 * (f__xx + f__yy) - gamma1__ = 1./2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_G) * gamma1__ - np.sin(2 * phi_G) * gamma2__ gamma2 = +np.sin(2 * phi_G) * gamma1__ + np.cos(2 * phi_G) * gamma2__ f_xx_rot = kappa + gamma1 f_yy_rot = kappa - gamma1 f_xy_rot = gamma2 - + # compare npt.assert_almost_equal(f_xx, f_xx_rot, decimal=4) npt.assert_almost_equal(f_yy, f_yy_rot, decimal=4) npt.assert_almost_equal(f_xy, f_xy_rot, decimal=4) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - + ############ # rotation 2 ############ - - phi_G = np.pi/3. - + + phi_G = np.pi / 3.0 + f_xx, f_xy, f_yx, f_yy = self.const_mag.hessian(x, y, mu_r, mu_t, parity, phi_G) - + # rotate x__, y__ = util.rotate(x, y, phi_G) - + # evaluate - f__xx, f__xy, f__yx, f__yy = self.const_mag.hessian(x__, y__, mu_r, mu_t, parity, 0.0) - + f__xx, f__xy, f__yx, f__yy = self.const_mag.hessian( + x__, y__, mu_r, mu_t, parity, 0.0 + ) + # rotate back - kappa = 1./2 * (f__xx + f__yy) - gamma1__ = 1./2 * (f__xx - f__yy) + kappa = 1.0 / 2 * (f__xx + f__yy) + gamma1__ = 1.0 / 2 * (f__xx - f__yy) gamma2__ = f__xy gamma1 = np.cos(2 * phi_G) * gamma1__ - np.sin(2 * phi_G) * gamma2__ gamma2 = +np.sin(2 * phi_G) * gamma1__ + np.cos(2 * phi_G) * gamma2__ f_xx_rot = kappa + gamma1 f_yy_rot = kappa - gamma1 f_xy_rot = gamma2 - + # compare npt.assert_almost_equal(f_xx, f_xx_rot, decimal=4) npt.assert_almost_equal(f_yy, f_yy_rot, decimal=4) npt.assert_almost_equal(f_xy, f_xy_rot, decimal=4) npt.assert_almost_equal(f_yx, f_xy_rot, decimal=4) - - -if __name__ == '__main__': + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_constant_shift.py b/test/test_LensModel/test_Profiles/test_constant_shift.py index 1e7d7005c..3046c2b87 100644 --- a/test/test_LensModel/test_Profiles/test_constant_shift.py +++ b/test/test_LensModel/test_Profiles/test_constant_shift.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.constant_shift import Shift @@ -9,14 +9,13 @@ class TestShift(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.shift = Shift() - alpha_x, alpha_y = 10., 0.1 - self.kwargs_lens = {'alpha_x': alpha_x, 'alpha_y': alpha_y} + alpha_x, alpha_y = 10.0, 0.1 + self.kwargs_lens = {"alpha_x": alpha_x, "alpha_y": alpha_y} def test_function(self): x = np.array([1]) @@ -31,7 +30,7 @@ def test_function(self): x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) values = self.shift.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(values[0], 0, decimal=5) + npt.assert_almost_equal(values[0], 0, decimal=5) npt.assert_almost_equal(values[1], 0, decimal=5) def test_derivatives(self): @@ -57,5 +56,5 @@ def test_hessian(self): npt.assert_almost_equal(f_yx, 0, decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_convergence.py b/test/test_LensModel/test_Profiles/test_convergence.py index b272268fd..d5e7105f2 100644 --- a/test/test_LensModel/test_Profiles/test_convergence.py +++ b/test/test_LensModel/test_Profiles/test_convergence.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.convergence import Convergence @@ -9,18 +9,17 @@ class TestConvergence(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.profile = Convergence() - self.kwargs_lens = {'kappa': 0.1} + self.kwargs_lens = {"kappa": 0.1} def test_function(self): x = np.array([1]) y = np.array([0]) values = self.profile.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(values[0], self.kwargs_lens['kappa']/2, decimal=5) + npt.assert_almost_equal(values[0], self.kwargs_lens["kappa"] / 2, decimal=5) x = np.array([0]) y = np.array([0]) values = self.profile.function(x, y, **self.kwargs_lens) @@ -29,7 +28,7 @@ def test_function(self): x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) values = self.profile.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(values[0], 0.25, decimal=5) + npt.assert_almost_equal(values[0], 0.25, decimal=5) npt.assert_almost_equal(values[1], 0.5, decimal=5) def test_derivatives(self): @@ -54,13 +53,13 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, 0, decimal=5) npt.assert_almost_equal(f_yx, 0, decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.profile.hessian(x, y, **self.kwargs_lens) npt.assert_almost_equal(values[0], 0.1, decimal=5) npt.assert_almost_equal(values[3], 0.1, decimal=5) npt.assert_almost_equal(values[1], 0, decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_coreBurkert.py b/test/test_LensModel/test_Profiles/test_coreBurkert.py index 1d0417c9d..7caef9a26 100644 --- a/test/test_LensModel/test_Profiles/test_coreBurkert.py +++ b/test/test_LensModel/test_Profiles/test_coreBurkert.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from lenstronomy.LensModel.Profiles.coreBurkert import CoreBurkert @@ -9,22 +9,18 @@ class TestcBurk(object): - """ - tests the Gaussian methods - """ - def setup_method(self): + """Tests the Gaussian methods.""" + def setup_method(self): self.cb = CoreBurkert() def _kappa_integrand(self, x, y, Rs, m0, r_core): - - return 2*np.pi*x * self.cb.density_2d(x, y, Rs, m0, r_core) + return 2 * np.pi * x * self.cb.density_2d(x, y, Rs, m0, r_core) def test_mproj(self): - Rs = 10 - r_core = 0.7*Rs - Rmax = np.linspace(0.5*Rs, 1.5*Rs, 1000000) + r_core = 0.7 * Rs + Rmax = np.linspace(0.5 * Rs, 1.5 * Rs, 1000000) dr = Rmax[1] - Rmax[0] m0 = 1 @@ -34,32 +30,29 @@ def test_mproj(self): npt.assert_almost_equal(integrand, kappa_integrand, decimal=3) def test_potential(self): - Rs = 10 rho0 = 1 - r_core = 0.6*Rs - R = np.linspace(0.1*Rs, 2*Rs, 1000000) + r_core = 0.6 * Rs + R = np.linspace(0.1 * Rs, 2 * Rs, 1000000) potential = self.cb.function(R, 0, Rs, rho0, r_core) alpha_num = np.gradient(potential, R[1] - R[0]) alpha = self.cb.derivatives(R, 0, Rs, rho0, r_core)[0] npt.assert_almost_equal(alpha_num, alpha, decimal=4) def test_derivatives(self): - Rs = 10 rho0 = 1 r_core = 7 - R = np.linspace(0.1*Rs, 4*Rs, 1000) + R = np.linspace(0.1 * Rs, 4 * Rs, 1000) alpha = self.cb.coreBurkAlpha(R, Rs, rho0, r_core, R, 0)[0] alpha_theory = self.cb.mass_2d(R, Rs, rho0, r_core) / np.pi / R - npt.assert_almost_equal(alpha/alpha_theory, 1) + npt.assert_almost_equal(alpha / alpha_theory, 1) def test_rho_angle_transform(self): - Rs = float(10) rho0 = float(1) r_core = float(7) @@ -67,11 +60,11 @@ def test_rho_angle_transform(self): alpha_Rs = self.cb._rho2alpha(rho0, Rs, r_core) alpha_Rs_2 = self.cb.coreBurkAlpha(Rs, Rs, rho0, r_core, Rs, 0)[0] - npt.assert_almost_equal(alpha_Rs*alpha_Rs_2**-1,1) + npt.assert_almost_equal(alpha_Rs * alpha_Rs_2**-1, 1) rho0_2 = self.cb._alpha2rho0(alpha_Rs, Rs, r_core) npt.assert_almost_equal(rho0, rho0_2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cored_density.py b/test/test_LensModel/test_Profiles/test_cored_density.py index 884595d28..0117c053e 100644 --- a/test/test_LensModel/test_Profiles/test_cored_density.py +++ b/test/test_LensModel/test_Profiles/test_cored_density.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.cored_density import CoredDensity @@ -8,9 +8,8 @@ class TestCoredDensity(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.model = CoredDensity() @@ -31,9 +30,9 @@ def test_derivatives(self): def test_dalpha_dr(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) sigma0 = 0.1 - r_core = 7. + r_core = 7.0 dalpha_dr = self.model.d_alpha_dr(r, sigma0, r_core) alpha_r = self.model.alpha_r(r, sigma0, r_core) delta = 0.00001 @@ -48,7 +47,7 @@ def test_hessian(self): sigma0 = 0.1 r_core = 7 f_xx, f_xy, f_yx, f_yy = self.model.hessian(x, y, sigma0, r_core) - kappa = 1./2 * (f_xx + f_yy) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_direct = self.model.kappa_r(r, sigma0, r_core) npt.assert_almost_equal(kappa, kappa_direct, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) @@ -56,7 +55,7 @@ def test_hessian(self): def test_mass_3d(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) sigma0 = 0.1 r_core = 7 m3d = self.model.mass_3d(r, sigma0, r_core) @@ -64,5 +63,5 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cored_density_2.py b/test/test_LensModel/test_Profiles/test_cored_density_2.py index 087e2842d..e82cde29c 100644 --- a/test/test_LensModel/test_Profiles/test_cored_density_2.py +++ b/test/test_LensModel/test_Profiles/test_cored_density_2.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.cored_density_2 import CoredDensity2 @@ -8,9 +8,8 @@ class TestCoredDensity(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.model = CoredDensity2() @@ -25,7 +24,7 @@ def test_function(self): f_x, _ = self.model.derivatives(r, 0, sigma0, r_core) npt.assert_almost_equal(f_x_num, f_x, decimal=3) - #test single value vs list of outputs + # test single value vs list of outputs f_ = self.model.function(1, 0, sigma0, r_core) f_list = self.model.function(np.array([1]), 0, sigma0, r_core) npt.assert_almost_equal(f_, f_list[0], decimal=8) @@ -34,11 +33,11 @@ def test_derivatives(self): pass def test_dalpha_dr(self): - x = np.array([1., 3., 4.]) - y = np.array([2., 1., 1.]) - r = np.sqrt(x ** 2 + y ** 2) + x = np.array([1.0, 3.0, 4.0]) + y = np.array([2.0, 1.0, 1.0]) + r = np.sqrt(x**2 + y**2) sigma0 = 0.1 - r_core = 7. + r_core = 7.0 dalpha_dr = self.model.d_alpha_dr(r, sigma0, r_core) alpha_r = self.model.alpha_r(r, sigma0, r_core) delta = 0.00001 @@ -47,22 +46,21 @@ def test_dalpha_dr(self): npt.assert_almost_equal(dalpha_dr, d_alpha_dr_num) def test_hessian(self): - x = np.linspace(start=0.1, stop=10, num=100) y = 0 r = np.sqrt(x**2 + y**2) sigma0 = 0.1 - r_core = 2. + r_core = 2.0 f_xx, f_xy, f_yx, f_yy = self.model.hessian(x, y, sigma0, r_core) - kappa = 1./2 * (f_xx + f_yy) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_direct = self.model.kappa_r(r, sigma0, r_core) - npt.assert_almost_equal(kappa/kappa_direct, 1, decimal=5) + npt.assert_almost_equal(kappa / kappa_direct, 1, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) def test_mass_3d(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) sigma0 = 0.1 r_core = 7 m3d = self.model.mass_3d(r, sigma0, r_core) @@ -70,5 +68,5 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cored_density_exp.py b/test/test_LensModel/test_Profiles/test_cored_density_exp.py index a6e2d34a8..41730563a 100644 --- a/test/test_LensModel/test_Profiles/test_cored_density_exp.py +++ b/test/test_LensModel/test_Profiles/test_cored_density_exp.py @@ -1,4 +1,4 @@ -__author__ = 'lucateo' +__author__ = "lucateo" from lenstronomy.LensModel.Profiles.cored_density_exp import CoredDensityExp @@ -8,9 +8,8 @@ class TestCoredDensityExp(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.model = CoredDensityExp() @@ -28,18 +27,18 @@ def test_function(self): theta_c_large = 13 f_reference = self.model.function(0, 0, kappa_0, theta_c_large) f_large = self.model.function(r, 0, kappa_0, theta_c_large) - f_MSD = 0.5* kappa_0 * r**2 + f_MSD = 0.5 * kappa_0 * r**2 npt.assert_almost_equal(f_large - f_reference, f_MSD, decimal=3) def test_derivatives(self): x = 0.5 y = 0.8 r = np.sqrt(x**2 + y**2) - kappa_0, theta_c = 0.2, 9 # Trying MSD limit - f_x, f_y = self.model.derivatives( x, y, kappa_0, theta_c) + kappa_0, theta_c = 0.2, 9 # Trying MSD limit + f_x, f_y = self.model.derivatives(x, y, kappa_0, theta_c) alpha_MSD = kappa_0 * r - npt.assert_almost_equal(f_x, alpha_MSD * x/r, decimal=3) - npt.assert_almost_equal(f_y, alpha_MSD * y/r, decimal=3) + npt.assert_almost_equal(f_x, alpha_MSD * x / r, decimal=3) + npt.assert_almost_equal(f_y, alpha_MSD * y / r, decimal=3) def test_hessian(self): x = np.linspace(start=0.01, stop=100, num=100) @@ -48,7 +47,7 @@ def test_hessian(self): kappa_0 = 0.12 theta_c = 6 f_xx, f_xy, f_yx, f_yy = self.model.hessian(x, y, kappa_0, theta_c) - kappa = 1./2 * (f_xx + f_yy) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_direct = self.model.kappa_r(r, kappa_0, theta_c) npt.assert_almost_equal(kappa, kappa_direct, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) @@ -56,7 +55,7 @@ def test_hessian(self): def test_mass_3d(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) kappa_0 = 0.1 theta_c = 7 m3d = self.model.mass_3d(r, kappa_0, theta_c) @@ -64,5 +63,5 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cored_profile_mst.py b/test/test_LensModel/test_Profiles/test_cored_profile_mst.py index 698a63e27..449807d9a 100644 --- a/test/test_LensModel/test_Profiles/test_cored_profile_mst.py +++ b/test/test_LensModel/test_Profiles/test_cored_profile_mst.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.cored_density_mst import CoredDensityMST @@ -10,59 +10,86 @@ class TestMassSheet(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): - self.profile1 = CoredDensityMST(profile_type='CORED_DENSITY') - self.profile2 = CoredDensityMST(profile_type='CORED_DENSITY_2') - self.profile3 = CoredDensityMST(profile_type='CORED_DENSITY_EXP') - self.profile4 = CoredDensityMST(profile_type='CORED_DENSITY_ULDM') - self.kwargs_lens = {'lambda_approx': 0.9, 'r_core': 100, 'center_x': 0, 'center_y': 0} + self.profile1 = CoredDensityMST(profile_type="CORED_DENSITY") + self.profile2 = CoredDensityMST(profile_type="CORED_DENSITY_2") + self.profile3 = CoredDensityMST(profile_type="CORED_DENSITY_EXP") + self.profile4 = CoredDensityMST(profile_type="CORED_DENSITY_ULDM") + self.kwargs_lens = { + "lambda_approx": 0.9, + "r_core": 100, + "center_x": 0, + "center_y": 0, + } def test_function(self): x = np.array([0.01, 1]) y = np.array([0, 0]) f_ = self.profile1.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_[0] - f_[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_[0] - f_[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_ = self.profile2.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_[0] - f_[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_[0] - f_[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_ = self.profile3.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_[0] - f_[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_[0] - f_[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_ = self.profile4.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_[0] - f_[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_[0] - f_[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero def test_derivatives(self): x = np.array([0.01, 1]) y = np.array([0, 0]) f_x, f_y = self.profile1.derivatives(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_x[0] - f_x[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_x[0] - f_x[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_x, f_y = self.profile2.derivatives(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_x[0] - f_x[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_x[0] - f_x[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_x, f_y = self.profile3.derivatives(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_x[0] - f_x[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_x[0] - f_x[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_x, f_y = self.profile4.derivatives(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_x[0] - f_x[1], 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_x[0] - f_x[1], 0, decimal=3 + ) # test to demand that the profile is (almost) zero def test_hessian(self): x = np.array([0.01, 1]) y = np.array([0, 0]) f_xx, f_xy, f_yx, f_yy = self.profile1.hessian(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_xx, 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_xx, 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_xx, f_xy, f_yx, f_yy = self.profile2.hessian(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_xx, 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_xx, 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_xx, f_xy, f_yx, f_yy = self.profile3.hessian(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_xx, 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_xx, 0, decimal=3 + ) # test to demand that the profile is (almost) zero f_xx, f_xy, f_yx, f_yy = self.profile4.hessian(x, y, **self.kwargs_lens) - npt.assert_almost_equal(f_xx, 0, decimal=3) # test to demand that the profile is (almost) zero + npt.assert_almost_equal( + f_xx, 0, decimal=3 + ) # test to demand that the profile is (almost) zero class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - CoredDensityMST(profile_type='WRONG_PROFILE') + CoredDensityMST(profile_type="WRONG_PROFILE") -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_cored_steep_ellipsoid.py b/test/test_LensModel/test_Profiles/test_cored_steep_ellipsoid.py index b0bd111e9..385248524 100644 --- a/test/test_LensModel/test_Profiles/test_cored_steep_ellipsoid.py +++ b/test/test_LensModel/test_Profiles/test_cored_steep_ellipsoid.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -8,59 +8,64 @@ class TestCSP(object): - """ - tests the cored steep ellipsoid (CSE) - """ + """Tests the cored steep ellipsoid (CSE)""" + def setup_method(self): from lenstronomy.LensModel.Profiles.cored_steep_ellipsoid import CSE - self.CSP = CSE(axis='product_avg') - def test_function(self): + self.CSP = CSE(axis="product_avg") - kwargs = {'a': 2, 's': 1, 'e1': 0., 'e2': 0., 'center_x': 0, 'center_y': 0} + def test_function(self): + kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0} - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) f_ = self.CSP.function(x, y, **kwargs) npt.assert_almost_equal(f_, [1.09016, 0.96242], decimal=5) def test_derivatives(self): - kwargs = {'a': 2, 's': 1, 'e1': 0., 'e2': 0., 'center_x': 0, 'center_y': 0} + kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0} - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) f_x, f_y = self.CSP.derivatives(x, y, **kwargs) npt.assert_almost_equal(f_x, [0.2367, 0.55279], decimal=5) - npt.assert_almost_equal(f_y, [0.4734, 0.], decimal=5) + npt.assert_almost_equal(f_y, [0.4734, 0.0], decimal=5) def test_hessian(self): - kwargs = {'a': 2, 's': 1, 'e1': 0., 'e2': 0., 'center_x': 0, 'center_y': 0} + kwargs = {"a": 2, "s": 1, "e1": 0.0, "e2": 0.0, "center_x": 0, "center_y": 0} - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs) npt.assert_almost_equal(f_xy, f_yx, decimal=5) npt.assert_almost_equal(f_xx, [0.16924, -0.09751], decimal=5) - npt.assert_almost_equal(f_xy, [-0.13493, -0.], decimal=5) - npt.assert_almost_equal(f_yy, [-0.03315, 0.27639], decimal=5) + npt.assert_almost_equal(f_xy, [-0.13493, -0.0], decimal=5) + npt.assert_almost_equal(f_yy, [-0.03315, 0.27639], decimal=5) def test_ellipticity(self): - """ - test the definition of the ellipticity normalization (along major axis or product averaged axes) - """ + """Test the definition of the ellipticity normalization (along major axis or + product averaged axes)""" x, y = np.linspace(start=0.001, stop=10, num=100), np.zeros(100) - kwargs_round = {'a': 2, 's': 1, 'e1': 0., 'e2': 0., 'center_x': 0, 'center_y': 0} + kwargs_round = { + "a": 2, + "s": 1, + "e1": 0.0, + "e2": 0.0, + "center_x": 0, + "center_y": 0, + } phi_q, q = param_util.ellipticity2phi_q(0.3, 0) - kwargs = {'a': 2, 's': 1, 'e1': 0.3, 'e2': 0., 'center_x': 0, 'center_y': 0} + kwargs = {"a": 2, "s": 1, "e1": 0.3, "e2": 0.0, "center_x": 0, "center_y": 0} f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs_round) - kappa_round = 1. / 2 * (f_xx + f_yy) + kappa_round = 1.0 / 2 * (f_xx + f_yy) f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(x, y, **kwargs) - kappa_major = 1. / 2 * (f_xx + f_yy) + kappa_major = 1.0 / 2 * (f_xx + f_yy) f_xx, f_xy, f_yx, f_yy = self.CSP.hessian(y, x, **kwargs) - kappa_minor = 1. / 2 * (f_xx + f_yy) + kappa_minor = 1.0 / 2 * (f_xx + f_yy) # import matplotlib.pyplot as plt # plt.plot(x, kappa_major/kappa_round, ',-', label='major/round', alpha=0.5) @@ -70,7 +75,10 @@ def test_ellipticity(self): # plt.legend() # plt.show() - npt.assert_almost_equal(kappa_round,np.sqrt(kappa_minor*kappa_major), decimal=1) + npt.assert_almost_equal( + kappa_round, np.sqrt(kappa_minor * kappa_major), decimal=1 + ) + -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_curved_arc_const.py b/test/test_LensModel/test_Profiles/test_curved_arc_const.py index 20dd011bb..8cf44d683 100644 --- a/test/test_LensModel/test_Profiles/test_curved_arc_const.py +++ b/test/test_LensModel/test_Profiles/test_curved_arc_const.py @@ -1,47 +1,52 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt import pytest from lenstronomy.LensModel.Profiles.curved_arc_sis_mst import CurvedArcSISMST -from lenstronomy.LensModel.Profiles.curved_arc_const import CurvedArcConstMST, CurvedArcConst +from lenstronomy.LensModel.Profiles.curved_arc_const import ( + CurvedArcConstMST, + CurvedArcConst, +) from lenstronomy.Util import util class TestCurvedArcConst(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): self.arc_sis = CurvedArcSISMST() self.arc_const = CurvedArcConst() def test_function(self): - kwargs_arc = {'tangential_stretch': 5, - #'radial_stretch': 1, - 'curvature': 1. / 10, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } - npt.assert_raises(Exception, self.arc_const.function, 0., 0., **kwargs_arc) + kwargs_arc = { + "tangential_stretch": 5, + #'radial_stretch': 1, + "curvature": 1.0 / 10, + "direction": 0, + "center_x": 0, + "center_y": 0, + } + npt.assert_raises(Exception, self.arc_const.function, 0.0, 0.0, **kwargs_arc) def test_derivatives(self): - kwargs_arc = {'tangential_stretch': 3, - #'radial_stretch': 1., - 'curvature': 0.8, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } - - kwargs_arc_sis = {'tangential_stretch': 3, - 'radial_stretch': 1., - 'curvature': 0.8, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } + kwargs_arc = { + "tangential_stretch": 3, + #'radial_stretch': 1., + "curvature": 0.8, + "direction": 0, + "center_x": 0, + "center_y": 0, + } + + kwargs_arc_sis = { + "tangential_stretch": 3, + "radial_stretch": 1.0, + "curvature": 0.8, + "direction": 0, + "center_x": 0, + "center_y": 0, + } x, y = util.make_grid(numPix=100, deltapix=0.01) f_x_sis, f_y_sis = self.arc_sis.derivatives(x, y, **kwargs_arc_sis) beta_x_sis = x - f_x_sis @@ -51,12 +56,15 @@ def test_derivatives(self): beta_y_const = y - f_y_const from lenstronomy.LightModel.light_model import LightModel - gauss = LightModel(['GAUSSIAN']) - kwargs_source = [{'amp': 1, 'sigma': 0.05, 'center_x': 0, 'center_y': 0}] + + gauss = LightModel(["GAUSSIAN"]) + kwargs_source = [{"amp": 1, "sigma": 0.05, "center_x": 0, "center_y": 0}] flux_sis = gauss.surface_brightness(beta_x_sis, beta_y_sis, kwargs_source) flux_const = gauss.surface_brightness(beta_x_const, beta_y_const, kwargs_source) - npt.assert_almost_equal((flux_const - flux_sis) / np.max(flux_const), 0, decimal=2) + npt.assert_almost_equal( + (flux_const - flux_sis) / np.max(flux_const), 0, decimal=2 + ) # check for stability outside the defined bounds of curvature f_x_const, f_y_const = self.arc_const.derivatives(x=0, y=1000, **kwargs_arc) @@ -64,20 +72,25 @@ def test_derivatives(self): npt.assert_almost_equal(f_y_const, 0) def test_hessian(self): - kwargs_arc = {'tangential_stretch': 5, - #'radial_stretch': 1, - 'curvature': 1. / 10, - 'direction': 0.5, - 'center_x': 0, - 'center_y': 0 - } - x, y = 0., 1. + kwargs_arc = { + "tangential_stretch": 5, + #'radial_stretch': 1, + "curvature": 1.0 / 10, + "direction": 0.5, + "center_x": 0, + "center_y": 0, + } + x, y = 0.0, 1.0 f_xx, f_xy, f_yx, f_yy = self.arc_const.hessian(x, y, **kwargs_arc) alpha_ra, alpha_dec = self.arc_const.derivatives(x, y, **kwargs_arc) diff = 0.0000001 - alpha_ra_dx, alpha_dec_dx = self.arc_const.derivatives(x + diff, y, **kwargs_arc) - alpha_ra_dy, alpha_dec_dy = self.arc_const.derivatives(x, y + diff, **kwargs_arc) + alpha_ra_dx, alpha_dec_dx = self.arc_const.derivatives( + x + diff, y, **kwargs_arc + ) + alpha_ra_dy, alpha_dec_dy = self.arc_const.derivatives( + x, y + diff, **kwargs_arc + ) f_xx_num = (alpha_ra_dx - alpha_ra) / diff f_xy_num = (alpha_ra_dy - alpha_ra) / diff @@ -95,30 +108,31 @@ def test_hessian(self): class TestCurvedArcConstMST(object): - def setup_method(self): self.arc_const = CurvedArcConstMST() def test_function(self): - kwargs_arc = {'tangential_stretch': 5, - 'radial_stretch': 1, - 'curvature': 1. / 10, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } - npt.assert_raises(Exception, self.arc_const.function, 0., 0., **kwargs_arc) + kwargs_arc = { + "tangential_stretch": 5, + "radial_stretch": 1, + "curvature": 1.0 / 10, + "direction": 0, + "center_x": 0, + "center_y": 0, + } + npt.assert_raises(Exception, self.arc_const.function, 0.0, 0.0, **kwargs_arc) def test_hessian(self): - kwargs_arc = {'tangential_stretch': 5, - 'radial_stretch': 1, - 'curvature': 1. / 10, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } - #npt.assert_raises(Exception, self.arc_const.hessian, 0., 0., **kwargs_arc) - - -if __name__ == '__main__': + kwargs_arc = { + "tangential_stretch": 5, + "radial_stretch": 1, + "curvature": 1.0 / 10, + "direction": 0, + "center_x": 0, + "center_y": 0, + } + # npt.assert_raises(Exception, self.arc_const.hessian, 0., 0., **kwargs_arc) + + +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_Profiles/test_curved_arc_sis_mst.py b/test/test_LensModel/test_Profiles/test_curved_arc_sis_mst.py index 45fd395ea..505b060ae 100644 --- a/test/test_LensModel/test_Profiles/test_curved_arc_sis_mst.py +++ b/test/test_LensModel/test_Profiles/test_curved_arc_sis_mst.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import pytest @@ -10,9 +10,8 @@ class TestCurvedArcSISMST(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): self.model = CurvedArcSISMST() self.sis = SIS() @@ -22,97 +21,184 @@ def test_spp2stretch(self): center_x, center_y = 1, 1 theta_E = 1 kappa = 0.1 - center_x_spp, center_y_spp = 0., 0 + center_x_spp, center_y_spp = 0.0, 0 - tangential_stretch, radial_stretch, curvature, direction = self.model.sis_mst2stretch(theta_E, kappa, center_x_spp, center_y_spp, center_x, center_y) - theta_E_new, kappa_new, center_x_spp_new, center_y_spp_new = self.model.stretch2sis_mst(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.sis_mst2stretch( + theta_E, kappa, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + kappa_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(kappa_new, kappa, decimal=8) center_x, center_y = -1, 1 - tangential_stretch, radial_stretch, curvature, direction = self.model.sis_mst2stretch(theta_E, kappa, - center_x_spp, center_y_spp, - center_x, center_y) - theta_E_new, kappa_new, center_x_spp_new, center_y_spp_new = self.model.stretch2sis_mst(tangential_stretch, - radial_stretch, curvature, - direction, center_x, - center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.sis_mst2stretch( + theta_E, kappa, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + kappa_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(kappa_new, kappa, decimal=8) center_x, center_y = 0, 0.5 - tangential_stretch, radial_stretch, curvature, direction = self.model.sis_mst2stretch(theta_E, kappa, - center_x_spp, center_y_spp, - center_x, center_y) - theta_E_new, kappa_new, center_x_spp_new, center_y_spp_new = self.model.stretch2sis_mst(tangential_stretch, - radial_stretch, curvature, - direction, center_x, - center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.sis_mst2stretch( + theta_E, kappa, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + kappa_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(kappa_new, kappa, decimal=8) center_x, center_y = 0, -1.5 - tangential_stretch, radial_stretch, r_curvature, direction = self.model.sis_mst2stretch(theta_E, kappa, - center_x_spp, center_y_spp, - center_x, center_y) + ( + tangential_stretch, + radial_stretch, + r_curvature, + direction, + ) = self.model.sis_mst2stretch( + theta_E, kappa, center_x_spp, center_y_spp, center_x, center_y + ) print(tangential_stretch, radial_stretch, r_curvature, direction) - theta_E_new, kappa_new, center_x_spp_new, center_y_spp_new = self.model.stretch2sis_mst(tangential_stretch, - radial_stretch, r_curvature, - direction, center_x, - center_y) + ( + theta_E_new, + kappa_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2sis_mst( + tangential_stretch, + radial_stretch, + r_curvature, + direction, + center_x, + center_y, + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(kappa_new, kappa, decimal=8) def test_function(self): - center_x, center_y = 0., 0. + center_x, center_y = 0.0, 0.0 x, y = 1, 1 radial_stretch = 1 - output = self.model.function(x, y, tangential_stretch=2, radial_stretch=radial_stretch, curvature=1./2, direction=0, center_x=center_x, center_y=center_y) - theta_E, kappa_ext, center_x_sis, center_y_sis = self.model.stretch2sis_mst(tangential_stretch=2, radial_stretch=radial_stretch, curvature=1./2, direction=0, - center_x=center_x, center_y=center_y) - f_sis_out = self.sis.function(1, 1, theta_E, center_x_sis, center_y_sis) # - self.sis.function(0, 0, theta_E, center_x_sis, center_y_sis) - alpha_x, alpha_y = self.sis.derivatives(center_x, center_y, theta_E, center_x_sis, center_y_sis) + output = self.model.function( + x, + y, + tangential_stretch=2, + radial_stretch=radial_stretch, + curvature=1.0 / 2, + direction=0, + center_x=center_x, + center_y=center_y, + ) + theta_E, kappa_ext, center_x_sis, center_y_sis = self.model.stretch2sis_mst( + tangential_stretch=2, + radial_stretch=radial_stretch, + curvature=1.0 / 2, + direction=0, + center_x=center_x, + center_y=center_y, + ) + f_sis_out = self.sis.function( + 1, 1, theta_E, center_x_sis, center_y_sis + ) # - self.sis.function(0, 0, theta_E, center_x_sis, center_y_sis) + alpha_x, alpha_y = self.sis.derivatives( + center_x, center_y, theta_E, center_x_sis, center_y_sis + ) f_sis_0_out = alpha_x * (x - center_x) + alpha_y * (y - center_y) f_mst_out = self.mst.function(x, y, kappa_ext, ra_0=center_x, dec_0=center_y) - lambda_mst = 1. / radial_stretch + lambda_mst = 1.0 / radial_stretch f_out = lambda_mst * (f_sis_out - f_sis_0_out) + f_mst_out npt.assert_almost_equal(output, f_out, decimal=8) def test_derivatives(self): tangential_stretch = 5 radial_stretch = 1 - curvature = 1./10 + curvature = 1.0 / 10 direction = 0.3 center_x = 0 center_y = 0 x, y = 1, 1 - theta_E, kappa, center_x_spp, center_y_spp = self.model.stretch2sis_mst(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) - f_x_sis, f_y_sis = self.sis.derivatives(x, y, theta_E, center_x_spp, center_y_spp) - f_x_mst, f_y_mst = self.mst.derivatives(x, y, kappa, ra_0=center_x, dec_0=center_y) - f_x0, f_y0 = self.sis.derivatives(center_x, center_y, theta_E, center_x_spp, center_y_spp) - f_x_new, f_y_new = self.model.derivatives(x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) + theta_E, kappa, center_x_spp, center_y_spp = self.model.stretch2sis_mst( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_x_sis, f_y_sis = self.sis.derivatives( + x, y, theta_E, center_x_spp, center_y_spp + ) + f_x_mst, f_y_mst = self.mst.derivatives( + x, y, kappa, ra_0=center_x, dec_0=center_y + ) + f_x0, f_y0 = self.sis.derivatives( + center_x, center_y, theta_E, center_x_spp, center_y_spp + ) + f_x_new, f_y_new = self.model.derivatives( + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ) npt.assert_almost_equal(f_x_new, f_x_sis + f_x_mst - f_x0, decimal=8) npt.assert_almost_equal(f_y_new, f_y_sis + f_y_mst - f_y0, decimal=8) def test_hessian(self): - lens = LensModel(lens_model_list=['CURVED_ARC_SIS_MST']) + lens = LensModel(lens_model_list=["CURVED_ARC_SIS_MST"]) center_x, center_y = 0, 0 tangential_stretch = 10 radial_stretch = 1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -120,8 +206,15 @@ def test_hessian(self): tangential_stretch = 10 radial_stretch = 1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -129,9 +222,15 @@ def test_hessian(self): tangential_stretch = 5 radial_stretch = 1.2 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1. / 10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -139,20 +238,32 @@ def test_hessian(self): tangential_stretch = 3 radial_stretch = -1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) - print(tangential_stretch, radial_stretch, 'stretches') + print(tangential_stretch, radial_stretch, "stretches") npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) center_x, center_y = 0, 0 tangential_stretch = -3 radial_stretch = -1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -160,39 +271,74 @@ def test_hessian(self): tangential_stretch = 10.4 radial_stretch = 0.6 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) def test_curved_arc_recovery(self): - """ - test whether the curved arc parameters are satisfied in differential form - """ + """Test whether the curved arc parameters are satisfied in differential form.""" - ext = LensModelExtensions(LensModel(lens_model_list=['CURVED_ARC_SIS_MST'])) - center_x, center_y = 1, 1. # test works except at (0,0) where the direction angle is not well defined - tangential_stretch = 10. + ext = LensModelExtensions(LensModel(lens_model_list=["CURVED_ARC_SIS_MST"])) + center_x, center_y = ( + 1, + 1.0, + ) # test works except at (0,0) where the direction angle is not well defined + tangential_stretch = 10.0 radial_stretch = 1.2 curvature, direction = 0.02, 0.5 - kwargs_lens = {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, - 'curvature': curvature, 'direction': direction, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": curvature, + "direction": direction, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) def _test_curved_arc_recovery(self, kwargs_arc_init): - ext = LensModelExtensions(LensModel(lens_model_list=['CURVED_ARC_SIS_MST'])) - center_x, center_y = kwargs_arc_init['center_x'], kwargs_arc_init['center_y'] + ext = LensModelExtensions(LensModel(lens_model_list=["CURVED_ARC_SIS_MST"])) + center_x, center_y = kwargs_arc_init["center_x"], kwargs_arc_init["center_y"] kwargs_arc = ext.curved_arc_estimate(center_x, center_y, [kwargs_arc_init]) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials(center_x, center_y, [kwargs_arc_init]) - npt.assert_almost_equal(kwargs_arc['tangential_stretch'], kwargs_arc_init['tangential_stretch'], decimal=3) - npt.assert_almost_equal(kwargs_arc['radial_stretch'], kwargs_arc_init['radial_stretch'], decimal=3) - npt.assert_almost_equal(kwargs_arc['curvature'], kwargs_arc_init['curvature'], decimal=3) - npt.assert_almost_equal(dphi_tan_dtan, kwargs_arc_init['curvature'], decimal=3) - npt.assert_almost_equal(kwargs_arc['direction'], kwargs_arc_init['direction'], decimal=3) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(center_x, center_y, [kwargs_arc_init]) + npt.assert_almost_equal( + kwargs_arc["tangential_stretch"], + kwargs_arc_init["tangential_stretch"], + decimal=3, + ) + npt.assert_almost_equal( + kwargs_arc["radial_stretch"], kwargs_arc_init["radial_stretch"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_arc["curvature"], kwargs_arc_init["curvature"], decimal=3 + ) + npt.assert_almost_equal(dphi_tan_dtan, kwargs_arc_init["curvature"], decimal=3) + npt.assert_almost_equal( + kwargs_arc["direction"], kwargs_arc_init["direction"], decimal=3 + ) npt.assert_almost_equal(dlambda_tan_dtan, 0, decimal=3) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_Profiles/test_curved_arc_spp.py b/test/test_LensModel/test_Profiles/test_curved_arc_spp.py index 65790db55..cab2ab66e 100644 --- a/test/test_LensModel/test_Profiles/test_curved_arc_spp.py +++ b/test/test_LensModel/test_Profiles/test_curved_arc_spp.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -9,9 +9,8 @@ class TestCurvedArc(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): self.model = CurvedArcSPP() self.spp = SPP() @@ -20,63 +19,126 @@ def test_spp2stretch(self): center_x, center_y = 1, 1 theta_E = 1 gamma = 1.9 - center_x_spp, center_y_spp = 0., 0 + center_x_spp, center_y_spp = 0.0, 0 - tangential_stretch, radial_stretch, curvature, direction = self.model.spp2stretch(theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y) - theta_E_new, gamma_new, center_x_spp_new, center_y_spp_new = self.model.stretch2spp(tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.spp2stretch( + theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + gamma_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(gamma_new, gamma, decimal=8) center_x, center_y = -1, 1 - tangential_stretch, radial_stretch, curvature, direction = self.model.spp2stretch(theta_E, gamma, - center_x_spp, center_y_spp, - center_x, center_y) - theta_E_new, gamma_new, center_x_spp_new, center_y_spp_new = self.model.stretch2spp(tangential_stretch, - radial_stretch, curvature, - direction, center_x, - center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.spp2stretch( + theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + gamma_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(gamma_new, gamma, decimal=8) center_x, center_y = 0, 0.5 - tangential_stretch, radial_stretch, curvature, direction = self.model.spp2stretch(theta_E, gamma, - center_x_spp, center_y_spp, - center_x, center_y) - theta_E_new, gamma_new, center_x_spp_new, center_y_spp_new = self.model.stretch2spp(tangential_stretch, - radial_stretch, curvature, - direction, center_x, - center_y) + ( + tangential_stretch, + radial_stretch, + curvature, + direction, + ) = self.model.spp2stretch( + theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y + ) + ( + theta_E_new, + gamma_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(gamma_new, gamma, decimal=8) center_x, center_y = 0, -1.5 - tangential_stretch, radial_stretch, r_curvature, direction = self.model.spp2stretch(theta_E, gamma, - center_x_spp, center_y_spp, - center_x, center_y) + ( + tangential_stretch, + radial_stretch, + r_curvature, + direction, + ) = self.model.spp2stretch( + theta_E, gamma, center_x_spp, center_y_spp, center_x, center_y + ) print(tangential_stretch, radial_stretch, r_curvature, direction) - theta_E_new, gamma_new, center_x_spp_new, center_y_spp_new = self.model.stretch2spp(tangential_stretch, - radial_stretch, r_curvature, - direction, center_x, - center_y) + ( + theta_E_new, + gamma_new, + center_x_spp_new, + center_y_spp_new, + ) = self.model.stretch2spp( + tangential_stretch, + radial_stretch, + r_curvature, + direction, + center_x, + center_y, + ) npt.assert_almost_equal(center_x_spp_new, center_x_spp, decimal=8) npt.assert_almost_equal(center_y_spp_new, center_y_spp, decimal=8) npt.assert_almost_equal(theta_E_new, theta_E, decimal=8) npt.assert_almost_equal(gamma_new, gamma, decimal=8) def test_function(self): - center_x, center_y = 0., 0. + center_x, center_y = 0.0, 0.0 x, y = 1, 1 - output = self.model.function(x, y, tangential_stretch=2, radial_stretch=1, curvature=1./2, direction=0, center_x=0, center_y=0) - theta_E, gamma, center_x_spp, center_y_spp = self.model.stretch2spp(tangential_stretch=2, radial_stretch=1, curvature=1./2, direction=0, - center_x=0, center_y=0) + output = self.model.function( + x, + y, + tangential_stretch=2, + radial_stretch=1, + curvature=1.0 / 2, + direction=0, + center_x=0, + center_y=0, + ) + theta_E, gamma, center_x_spp, center_y_spp = self.model.stretch2spp( + tangential_stretch=2, + radial_stretch=1, + curvature=1.0 / 2, + direction=0, + center_x=0, + center_y=0, + ) out_spp = self.spp.function(1, 1, theta_E, gamma, center_x_spp, center_y_spp) - alpha_x, alpha_y = self.spp.derivatives(center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp) + alpha_x, alpha_y = self.spp.derivatives( + center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp + ) f_0 = alpha_x * (x - center_x) + alpha_y * (y - center_y) npt.assert_almost_equal(output, out_spp - f_0, decimal=8) @@ -84,28 +146,48 @@ def test_function(self): def test_derivatives(self): tangential_stretch = 5 radial_stretch = 1 - curvature = 1./10 + curvature = 1.0 / 10 direction = 0.3 center_x = 0 center_y = 0 x, y = 1, 1 - theta_E, gamma, center_x_spp, center_y_spp = self.model.stretch2spp(tangential_stretch, - radial_stretch, curvature, - direction, center_x, center_y) - f_x, f_y = self.spp.derivatives(x, y, theta_E, gamma, center_x_spp, center_y_spp) - f_x0, f_y0 = self.spp.derivatives(center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp) - f_x_new, f_y_new = self.model.derivatives(x, y, tangential_stretch, radial_stretch, curvature, direction, center_x, center_y) + theta_E, gamma, center_x_spp, center_y_spp = self.model.stretch2spp( + tangential_stretch, radial_stretch, curvature, direction, center_x, center_y + ) + f_x, f_y = self.spp.derivatives( + x, y, theta_E, gamma, center_x_spp, center_y_spp + ) + f_x0, f_y0 = self.spp.derivatives( + center_x, center_y, theta_E, gamma, center_x_spp, center_y_spp + ) + f_x_new, f_y_new = self.model.derivatives( + x, + y, + tangential_stretch, + radial_stretch, + curvature, + direction, + center_x, + center_y, + ) npt.assert_almost_equal(f_x_new, f_x - f_x0, decimal=8) npt.assert_almost_equal(f_y_new, f_y - f_y0, decimal=8) def test_hessian(self): - lens = LensModel(lens_model_list=['CURVED_ARC_SPP']) + lens = LensModel(lens_model_list=["CURVED_ARC_SPP"]) center_x, center_y = 0, 0 tangential_stretch = 10 radial_stretch = 1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -113,8 +195,15 @@ def test_hessian(self): tangential_stretch = 10 radial_stretch = 1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -122,9 +211,15 @@ def test_hessian(self): tangential_stretch = 3 radial_stretch = -1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -132,9 +227,15 @@ def test_hessian(self): tangential_stretch = -3 radial_stretch = -1 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) @@ -142,12 +243,18 @@ def test_hessian(self): tangential_stretch = 10.4 radial_stretch = 0.6 kwargs_lens = [ - {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': 1./10.5, - 'direction': 0., - 'center_x': center_x, 'center_y': center_y}] + { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": 1.0 / 10.5, + "direction": 0.0, + "center_x": center_x, + "center_y": center_y, + } + ] mag = lens.magnification(center_x, center_y, kwargs=kwargs_lens) npt.assert_almost_equal(mag, tangential_stretch * radial_stretch, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_Profiles/test_curved_arc_spt.py b/test/test_LensModel/test_Profiles/test_curved_arc_spt.py index ee2707ef8..0f5788d20 100644 --- a/test/test_LensModel/test_Profiles/test_curved_arc_spt.py +++ b/test/test_LensModel/test_Profiles/test_curved_arc_spt.py @@ -7,39 +7,39 @@ class TestCurvedArcSPT(object): - def setup_method(self): self._curve_spt = CurvedArcSPT() self._curve_regular = CurvedArcSISMST() def test_function(self): - kwargs_arc = {'tangential_stretch': 5, - 'radial_stretch': 1, - 'curvature': 1. / 10, - 'direction': 0, - 'center_x': 0, - 'center_y': 0, - 'gamma1': 0, - 'gamma2': 0 - } - npt.assert_raises(Exception, self._curve_spt.function, 0., 0., **kwargs_arc) + kwargs_arc = { + "tangential_stretch": 5, + "radial_stretch": 1, + "curvature": 1.0 / 10, + "direction": 0, + "center_x": 0, + "center_y": 0, + "gamma1": 0, + "gamma2": 0, + } + npt.assert_raises(Exception, self._curve_spt.function, 0.0, 0.0, **kwargs_arc) def test_spt_mapping(self): - e1, e2 = 0.1, -0.2 - kwargs_arc_sis_mst = {'tangential_stretch': 3, - 'radial_stretch': 1.2, - 'curvature': 0.8, - 'direction': 0, - 'center_x': 0, - 'center_y': 0 - } + kwargs_arc_sis_mst = { + "tangential_stretch": 3, + "radial_stretch": 1.2, + "curvature": 0.8, + "direction": 0, + "center_x": 0, + "center_y": 0, + } # inverse reduced shear transform as SPT kwargs_arc_spt = copy.deepcopy(kwargs_arc_sis_mst) - kwargs_arc_spt['gamma1'] = -e1 - kwargs_arc_spt['gamma2'] = -e2 + kwargs_arc_spt["gamma1"] = -e1 + kwargs_arc_spt["gamma2"] = -e2 x, y = util.make_grid(numPix=100, deltapix=0.01) f_x_sis, f_y_sis = self._curve_regular.derivatives(x, y, **kwargs_arc_sis_mst) @@ -50,15 +50,18 @@ def test_spt_mapping(self): beta_y_spt = y - f_y_spt from lenstronomy.LightModel.light_model import LightModel - gauss = LightModel(['GAUSSIAN_ELLIPSE']) - kwargs_source = [{'amp': 1, 'sigma': 0.05, 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0}] + + gauss = LightModel(["GAUSSIAN_ELLIPSE"]) + kwargs_source = [ + {"amp": 1, "sigma": 0.05, "center_x": 0, "center_y": 0, "e1": 0, "e2": 0} + ] kwargs_source_spt = copy.deepcopy(kwargs_source) - kwargs_source_spt[0]['e1'] = e1 - kwargs_source_spt[0]['e2'] = e2 + kwargs_source_spt[0]["e1"] = e1 + kwargs_source_spt[0]["e2"] = e2 flux_sis = gauss.surface_brightness(beta_x_sis, beta_y_sis, kwargs_source) flux_spt = gauss.surface_brightness(beta_x_spt, beta_y_spt, kwargs_source_spt) npt.assert_almost_equal(flux_sis, flux_spt) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestCurvedArcSPT") diff --git a/test/test_LensModel/test_Profiles/test_curved_arc_tan_diff.py b/test/test_LensModel/test_Profiles/test_curved_arc_tan_diff.py index ec22b7973..15089606d 100644 --- a/test/test_LensModel/test_Profiles/test_curved_arc_tan_diff.py +++ b/test/test_LensModel/test_Profiles/test_curved_arc_tan_diff.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -11,141 +11,263 @@ class TestCurvedArcTanDiff(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): self.model = CurvedArcTanDiff() self.sie = SIE() self.mst = Convergence() def test_curved_arc_round_recovery(self): - """ - test whether the curved arc parameters are satisfied in differential form - """ + """Test whether the curved arc parameters are satisfied in differential form.""" - center_x, center_y = 2, 0. # test works except at (0,0) where the direction angle is not well defined - tangential_stretch = 4. - radial_stretch = 1. + center_x, center_y = ( + 2, + 0.0, + ) # test works except at (0,0) where the direction angle is not well defined + tangential_stretch = 4.0 + radial_stretch = 1.0 curvature, direction = 0.5, 0.5 dtan_dtan = 0 - kwargs_lens = {'tangential_stretch': tangential_stretch, 'radial_stretch': radial_stretch, 'curvature': curvature, - 'direction': direction, 'dtan_dtan': dtan_dtan, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": tangential_stretch, + "radial_stretch": radial_stretch, + "curvature": curvature, + "direction": direction, + "dtan_dtan": dtan_dtan, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) - kwargs_lens = {'tangential_stretch': tangential_stretch, 'radial_stretch': 1.1, - 'curvature': curvature, - 'direction': 0.01, 'dtan_dtan': dtan_dtan, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": tangential_stretch, + "radial_stretch": 1.1, + "curvature": curvature, + "direction": 0.01, + "dtan_dtan": dtan_dtan, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) - kwargs_lens = {'tangential_stretch': 10, 'radial_stretch': 1., - 'curvature': 0.2, - 'direction': 0.01, 'dtan_dtan': 0., 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": 10, + "radial_stretch": 1.0, + "curvature": 0.2, + "direction": 0.01, + "dtan_dtan": 0.0, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) def test_curved_arc_recovery(self): - """ - test whether the curved arc parameters are satisfied in differential form - """ + """Test whether the curved arc parameters are satisfied in differential form.""" - center_x, center_y = 3, 0 # test works except at (0,0) where the direction angle is not well defined + center_x, center_y = ( + 3, + 0, + ) # test works except at (0,0) where the direction angle is not well defined - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1., - 'curvature': 0.3, - 'direction': 0.001, 'dtan_dtan': 0.1, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.0, + "curvature": 0.3, + "direction": 0.001, + "dtan_dtan": 0.1, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) # and here we change directions - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1., - 'curvature': 0.3, - 'direction': 0.5, 'dtan_dtan': 0.1, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.0, + "curvature": 0.3, + "direction": 0.5, + "dtan_dtan": 0.1, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) # and here we have the radial stretch != 1, thus applying an MST - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1.1, - 'curvature': 0.3, - 'direction': 0.5, 'dtan_dtan': 0.1, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.1, + "curvature": 0.3, + "direction": 0.5, + "dtan_dtan": 0.1, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1.1, - 'curvature': 0.3, - 'direction': 0.5, 'dtan_dtan': -0.1, 'center_x': center_x, 'center_y': center_y} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.1, + "curvature": 0.3, + "direction": 0.5, + "dtan_dtan": -0.1, + "center_x": center_x, + "center_y": center_y, + } self._test_curved_arc_recovery(kwargs_lens) def _test_in_out_scaling(self): # some scaling tests with plots that are going to be ignored - ext = LensModelExtensions(LensModel(lens_model_list=['CURVED_ARC_TAN_DIFF'])) + ext = LensModelExtensions(LensModel(lens_model_list=["CURVED_ARC_TAN_DIFF"])) # change in dtan_dtan for fixed other components input vs output comparison dtan_dtan_list = np.linspace(start=-0.5, stop=0.5, num=21) dtan_dtan_out_list = [] - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1., - 'curvature': 0.2, - 'direction': 0.001, 'dtan_dtan': 0.1, 'center_x': 1, 'center_y': 0} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.0, + "curvature": 0.2, + "direction": 0.001, + "dtan_dtan": 0.1, + "center_x": 1, + "center_y": 0, + } for dtan_dtan in dtan_dtan_list: - kwargs_lens['dtan_dtan'] = dtan_dtan - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - 1, 0, [kwargs_lens]) + kwargs_lens["dtan_dtan"] = dtan_dtan + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(1, 0, [kwargs_lens]) dtan_dtan_out_list.append(dlambda_tan_dtan) dtan_dtan_out_list = np.array(dtan_dtan_out_list) import matplotlib.pyplot as plt + plt.plot(dtan_dtan_list, dtan_dtan_out_list) - plt.xlabel('dtan_in fixed lens') - plt.ylabel('dtan_out') + plt.xlabel("dtan_in fixed lens") + plt.ylabel("dtan_out") plt.show() - #npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) + # npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) # change in tangential stretch lambda_tan_list = np.linspace(start=2, stop=20, num=21) dtan_dtan_out_list = [] - kwargs_lens = {'tangential_stretch': 2., 'radial_stretch': 1., - 'curvature': 0.2, - 'direction': 0.001, 'dtan_dtan': 0.1, 'center_x': 1, 'center_y': 0} + kwargs_lens = { + "tangential_stretch": 2.0, + "radial_stretch": 1.0, + "curvature": 0.2, + "direction": 0.001, + "dtan_dtan": 0.1, + "center_x": 1, + "center_y": 0, + } for lambda_tan in lambda_tan_list: - kwargs_lens['tangential_stretch'] = lambda_tan - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - 1, 0, [kwargs_lens]) + kwargs_lens["tangential_stretch"] = lambda_tan + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(1, 0, [kwargs_lens]) dtan_dtan_out_list.append(dlambda_tan_dtan) dtan_dtan_out_list = np.array(dtan_dtan_out_list) import matplotlib.pyplot as plt + plt.plot(lambda_tan_list, dtan_dtan_out_list) - plt.xlabel('lambda_tan') - plt.ylabel('dtan_out') + plt.xlabel("lambda_tan") + plt.ylabel("dtan_out") plt.show() - #npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) + # npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) # change in curvature radius curvature_list = np.linspace(start=0.1, stop=1, num=21) dtan_dtan_out_list = [] - kwargs_lens = {'tangential_stretch': 5., 'radial_stretch': 1., - 'curvature': 0.2, - 'direction': 0.001, 'dtan_dtan': 0.1, 'center_x': 1, 'center_y': 0} + kwargs_lens = { + "tangential_stretch": 5.0, + "radial_stretch": 1.0, + "curvature": 0.2, + "direction": 0.001, + "dtan_dtan": 0.1, + "center_x": 1, + "center_y": 0, + } for curvatrue in curvature_list: - kwargs_lens['curvature'] = curvatrue - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - 1, 0, [kwargs_lens]) + kwargs_lens["curvature"] = curvatrue + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(1, 0, [kwargs_lens]) dtan_dtan_out_list.append(dlambda_tan_dtan) dtan_dtan_out_list = np.array(dtan_dtan_out_list) import matplotlib.pyplot as plt + plt.plot(lambda_tan_list, dtan_dtan_out_list) - plt.xlabel('curvature') - plt.ylabel('dtan_out') + plt.xlabel("curvature") + plt.ylabel("dtan_out") plt.show() - #npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) + # npt.assert_almost_equal(dtan_dtan_out_list, dtan_dtan_list) def _test_curved_arc_recovery(self, kwargs_arc_init): - ext = LensModelExtensions(LensModel(lens_model_list=['CURVED_ARC_TAN_DIFF'])) - center_x, center_y = kwargs_arc_init['center_x'], kwargs_arc_init['center_y'] + ext = LensModelExtensions(LensModel(lens_model_list=["CURVED_ARC_TAN_DIFF"])) + center_x, center_y = kwargs_arc_init["center_x"], kwargs_arc_init["center_y"] kwargs_arc = ext.curved_arc_estimate(center_x, center_y, [kwargs_arc_init]) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials(center_x, center_y, [kwargs_arc_init]) - print(lambda_tan, dlambda_tan_dtan, kwargs_arc_init['dtan_dtan']) - npt.assert_almost_equal(kwargs_arc['tangential_stretch'] / kwargs_arc_init['tangential_stretch'], 1, decimal=3) - npt.assert_almost_equal(kwargs_arc['radial_stretch'], kwargs_arc_init['radial_stretch'], decimal=3) - npt.assert_almost_equal(kwargs_arc['curvature'], kwargs_arc_init['curvature'], decimal=2) - npt.assert_almost_equal(dphi_tan_dtan, kwargs_arc_init['curvature'], decimal=2) - npt.assert_almost_equal(kwargs_arc['direction'], kwargs_arc_init['direction'], decimal=3) - npt.assert_almost_equal(dlambda_tan_dtan / lambda_tan, kwargs_arc_init['dtan_dtan'], decimal=2) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(center_x, center_y, [kwargs_arc_init]) + print(lambda_tan, dlambda_tan_dtan, kwargs_arc_init["dtan_dtan"]) + npt.assert_almost_equal( + kwargs_arc["tangential_stretch"] / kwargs_arc_init["tangential_stretch"], + 1, + decimal=3, + ) + npt.assert_almost_equal( + kwargs_arc["radial_stretch"], kwargs_arc_init["radial_stretch"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_arc["curvature"], kwargs_arc_init["curvature"], decimal=2 + ) + npt.assert_almost_equal(dphi_tan_dtan, kwargs_arc_init["curvature"], decimal=2) + npt.assert_almost_equal( + kwargs_arc["direction"], kwargs_arc_init["direction"], decimal=3 + ) + npt.assert_almost_equal( + dlambda_tan_dtan / lambda_tan, kwargs_arc_init["dtan_dtan"], decimal=2 + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_Profiles/test_dipole.py b/test/test_LensModel/test_Profiles/test_dipole.py index 607e504fa..72579db1e 100644 --- a/test/test_LensModel/test_Profiles/test_dipole.py +++ b/test/test_LensModel/test_Profiles/test_dipole.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.dipole import Dipole, DipoleUtil @@ -7,10 +7,10 @@ import numpy.testing as npt import pytest + class TestDipole(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.dipole = Dipole() self.dipole_util = DipoleUtil() @@ -22,24 +22,26 @@ def test_function(self): center1_y = 0 center2_x = 1 center2_y = -1 - c = 1. + c = 1.0 Fm = 0.5 - com_x, com_y = self.dipole_util.com(center1_x, center1_y, center2_x, center2_y, Fm) + com_x, com_y = self.dipole_util.com( + center1_x, center1_y, center2_x, center2_y, Fm + ) phi_dipole = self.dipole_util.angle(center1_x, center1_y, center2_x, center2_y) values = self.dipole.function(x, y, com_x, com_y, phi_dipole, c) - #npt.assert_almost_equal(values[0], 0, decimal=5) + # npt.assert_almost_equal(values[0], 0, decimal=5) x = np.array([0]) y = np.array([0]) values = self.dipole.function(x, y, com_x, com_y, phi_dipole, c) - #npt.assert_almost_equal(values[0], 0, decimal=5) + # npt.assert_almost_equal(values[0], 0, decimal=5) - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values = self.dipole.function(x, y, com_x, com_y, phi_dipole, c) - #npt.assert_almost_equal(values[0], 0, decimal=5) - #npt.assert_almost_equal(values[1], 0, decimal=5) - #npt.assert_almost_equal(values[2], 0, decimal=5) + # npt.assert_almost_equal(values[0], 0, decimal=5) + # npt.assert_almost_equal(values[1], 0, decimal=5) + # npt.assert_almost_equal(values[2], 0, decimal=5) def test_derivatives(self): x = np.array([1]) @@ -48,16 +50,18 @@ def test_derivatives(self): center1_y = 0 center2_x = 1 center2_y = -1 - c = 1. + c = 1.0 Fm = 0.5 - com_x, com_y = self.dipole_util.com(center1_x, center1_y, center2_x, center2_y, Fm) + com_x, com_y = self.dipole_util.com( + center1_x, center1_y, center2_x, center2_y, Fm + ) phi_dipole = self.dipole_util.angle(center1_x, center1_y, center2_x, center2_y) f_x, f_y = self.dipole.derivatives(x, y, com_x, com_y, phi_dipole, c) npt.assert_almost_equal(f_x[0], -0.43412157106222954, decimal=5) npt.assert_almost_equal(f_y[0], 0.43412157106222948, decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.dipole.derivatives(x, y, com_x, com_y, phi_dipole, c) npt.assert_almost_equal(values[0][0], -0.43412157106222954, decimal=5) npt.assert_almost_equal(values[1][0], 0.43412157106222948, decimal=5) @@ -71,17 +75,19 @@ def test_hessian(self): center1_y = 0 center2_x = 1 center2_y = -1 - c = 1. + c = 1.0 Fm = 0.5 - com_x, com_y = self.dipole_util.com(center1_x, center1_y, center2_x, center2_y, Fm) + com_x, com_y = self.dipole_util.com( + center1_x, center1_y, center2_x, center2_y, Fm + ) phi_dipole = self.dipole_util.angle(center1_x, center1_y, center2_x, center2_y) f_xx, f_xy, f_yx, f_yy = self.dipole.hessian(x, y, com_x, com_y, phi_dipole, c) npt.assert_almost_equal(f_xx[0], 0.29625219299960942, decimal=5) npt.assert_almost_equal(f_yy[0], -0.064402650652089, decimal=5) npt.assert_almost_equal(f_xy[0], -0.1159247711737602, decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.dipole.hessian(x, y, com_x, com_y, phi_dipole, c) npt.assert_almost_equal(values[0][0], 0.29625219299960942, decimal=5) npt.assert_almost_equal(values[3][0], -0.064402650652089, decimal=5) @@ -91,9 +97,9 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.16965871600449295, decimal=5) def test_mass_ratio(self): - ratio = self.dipole_util.mass_ratio(theta_E=1., theta_E_sub=0.1) + ratio = self.dipole_util.mass_ratio(theta_E=1.0, theta_E_sub=0.1) assert ratio == 100 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_elliptical_density_slice.py b/test/test_LensModel/test_Profiles/test_elliptical_density_slice.py index 33a07ae61..aba68216e 100644 --- a/test/test_LensModel/test_Profiles/test_elliptical_density_slice.py +++ b/test/test_LensModel/test_Profiles/test_elliptical_density_slice.py @@ -1,4 +1,4 @@ -__author__ = 'lynevdv' +__author__ = "lynevdv" from lenstronomy.LensModel.Profiles.elliptical_density_slice import ElliSLICE @@ -7,87 +7,87 @@ import pytest import numpy.testing as npt + class TestElliSLICE(object): - """ - tests the elliptical slice lens model - """ + """Tests the elliptical slice lens model.""" + def setup_method(self): self.ElliSLICE = ElliSLICE() def test_function(self): x = 0.5 y = 0.1 - a = 2. - b = 1. - psi = 30*np.pi/180. - sigma_0 = 5. + a = 2.0 + b = 1.0 + psi = 30 * np.pi / 180.0 + sigma_0 = 5.0 values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0) npt.assert_almost_equal(values, 4.532482297, decimal=4) - x = 3.*np.sqrt(3)/2. - y = 3./2. + x = 3.0 * np.sqrt(3) / 2.0 + y = 3.0 / 2.0 values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0) npt.assert_almost_equal(values, 15.52885056, decimal=4) x = np.array([0]) y = np.array([0]) values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0) - npt.assert_almost_equal(values[0], 4.054651081,decimal=5) + npt.assert_almost_equal(values[0], 4.054651081, decimal=5) - x = np.array([np.sqrt(3), np.sqrt(3)+0.000000001, np.sqrt(3)-0.000000001]) + x = np.array([np.sqrt(3), np.sqrt(3) + 0.000000001, np.sqrt(3) - 0.000000001]) y = np.array([1, 1.000000001, 0.999999999]) - values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0) + values = self.ElliSLICE.function(x, y, a, b, psi, sigma_0) npt.assert_almost_equal(values[0], values[1], decimal=5) npt.assert_almost_equal(values[1], values[2], decimal=5) def test_derivatives(self): x = 0.5 y = 0.1 - a = 2. - b = 1. - psi = 30 * np.pi / 180. - sigma_0 = 5. + a = 2.0 + b = 1.0 + psi = 30 * np.pi / 180.0 + sigma_0 = 5.0 f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0) npt.assert_almost_equal(f_x, 1.938995765, decimal=6) npt.assert_almost_equal(f_y, -0.13835403, decimal=6) x = 4 - y = 0. - f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, 0., sigma_0) + y = 0.0 + f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, 0.0, sigma_0) npt.assert_almost_equal(f_x, 2.629658164, decimal=6) - npt.assert_almost_equal(f_y, 0., decimal=6) + npt.assert_almost_equal(f_y, 0.0, decimal=6) x = np.array([0.5]) y = np.array([0.1]) f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0) - npt.assert_almost_equal(f_x, 1.938995765, decimal=6) - npt.assert_almost_equal(f_y, -0.13835403, decimal=6) + npt.assert_almost_equal(f_x, 1.938995765, decimal=6) + npt.assert_almost_equal(f_y, -0.13835403, decimal=6) x = np.array([np.sqrt(3), np.sqrt(3) + 0.000000001, np.sqrt(3) - 0.000000001]) y = np.array([1, 1.000000001, 0.999999999]) - f_x,f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0) + f_x, f_y = self.ElliSLICE.derivatives(x, y, a, b, psi, sigma_0) npt.assert_almost_equal(f_x[0], f_x[1], decimal=5) npt.assert_almost_equal(f_y[1], f_y[2], decimal=5) def test_hessian(self): x = 0.5 y = 0.1 - a = 2. - b = 1. - psi = 30 * np.pi / 180. - sigma_0 = 5. + a = 2.0 + b = 1.0 + psi = 30 * np.pi / 180.0 + sigma_0 = 5.0 f_xx, f_xy, f_yx, f_yy = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0) - npt.assert_almost_equal((f_xx+f_yy)/2., 5., decimal=6) + npt.assert_almost_equal((f_xx + f_yy) / 2.0, 5.0, decimal=6) x = np.array([1]) y = np.array([2]) npt.assert_almost_equal(f_xy, f_yx, decimal=7) f_xx, f_xy, f_yx, f_yy = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0) - npt.assert_almost_equal((f_xx+f_yy)/2., 0., decimal=6) - x = np.array([1,3,0.]) - y = np.array([2,1,0.5]) + npt.assert_almost_equal((f_xx + f_yy) / 2.0, 0.0, decimal=6) + x = np.array([1, 3, 0.0]) + y = np.array([2, 1, 0.5]) values = self.ElliSLICE.hessian(x, y, a, b, psi, sigma_0) - npt.assert_almost_equal((values[0][2]+values[3][2])/2., 5., decimal=6) + npt.assert_almost_equal((values[0][2] + values[3][2]) / 2.0, 5.0, decimal=6) -if __name__ == '__main__': - pytest.main() +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_epl.py b/test/test_LensModel/test_Profiles/test_epl.py index 2a524c146..130f952f3 100644 --- a/test/test_LensModel/test_Profiles/test_epl.py +++ b/test/test_LensModel/test_Profiles/test_epl.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -9,52 +9,56 @@ try: import fastell4py + fastell4py_bool = True except: - print("Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!") + print( + "Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!" + ) fastell4py_bool = False class TestEPLvsNIE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): from lenstronomy.LensModel.Profiles.epl import EPL + self.EPL = EPL() from lenstronomy.LensModel.Profiles.nie import NIE + self.NIE = NIE() def test_function(self): - phi_E = 1. - gamma = 2. + phi_E = 1.0 + gamma = 2.0 q = 0.999 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) - values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.) + values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.0) delta_f = values[0] - values[1] delta_f_nie = values_nie[0] - values_nie[1] npt.assert_almost_equal(delta_f, delta_f_nie, decimal=5) q = 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) - values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.) + values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.0) delta_f = values[0] - values[1] delta_f_nie = values_nie[0] - values_nie[1] npt.assert_almost_equal(delta_f, delta_f_nie, decimal=5) q = 0.4 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) - values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.) + values_nie = self.NIE.function(x, y, phi_E, e1, e2, 0.0) delta_f = values[0] - values[1] delta_f_nie = values_nie[0] - values_nie[1] npt.assert_almost_equal(delta_f, delta_f_nie, decimal=5) @@ -62,34 +66,36 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. - gamma = 2. - q = 1. - phi_G = 1. + phi_E = 1.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) - f_x_nie, f_y_nie = self.NIE.derivatives(x, y, phi_E, e1, e2, 0.) + f_x_nie, f_y_nie = self.NIE.derivatives(x, y, phi_E, e1, e2, 0.0) npt.assert_almost_equal(f_x, f_x_nie, decimal=4) npt.assert_almost_equal(f_y, f_y_nie, decimal=4) q = 0.7 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) - f_x_nie, f_y_nie = self.NIE.derivatives(x, y, phi_E, e1, e2, 0.) + f_x_nie, f_y_nie = self.NIE.derivatives(x, y, phi_E, e1, e2, 0.0) npt.assert_almost_equal(f_x, f_x_nie, decimal=4) npt.assert_almost_equal(f_y, f_y_nie, decimal=4) def test_hessian(self): - x = np.array([1.]) - y = np.array([2.]) - phi_E = 1. - gamma = 2. + x = np.array([1.0]) + y = np.array([2.0]) + phi_E = 1.0 + gamma = 2.0 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.EPL.hessian(x, y, phi_E, gamma, e1, e2) - f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.NIE.hessian(x, y, phi_E, e1, e2, 0.) + f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.NIE.hessian( + x, y, phi_E, e1, e2, 0.0 + ) npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4) npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4) npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4) @@ -97,96 +103,106 @@ def test_hessian(self): def test_density_lens(self): r = 1 - kwargs = {'theta_E': 1, 'gamma': 2, 'e1': 0, 'e2': 0} + kwargs = {"theta_E": 1, "gamma": 2, "e1": 0, "e2": 0} rho = self.EPL.density_lens(r, **kwargs) from lenstronomy.LensModel.Profiles.spep import SPEP + spep = SPEP() rho_spep = spep.density_lens(r, **kwargs) npt.assert_almost_equal(rho, rho_spep, decimal=7) def test_mass_3d_lens(self): r = 1 - kwargs = {'theta_E': 1, 'gamma': 2, 'e1': 0, 'e2': 0} + kwargs = {"theta_E": 1, "gamma": 2, "e1": 0, "e2": 0} rho = self.EPL.mass_3d_lens(r, **kwargs) from lenstronomy.LensModel.Profiles.spep import SPEP + spep = SPEP() rho_spep = spep.mass_3d_lens(r, **kwargs) npt.assert_almost_equal(rho, rho_spep, decimal=7) def test_static(self): - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'theta_E': 1., 'gamma': 1.5, 'e1': e1, 'e2': e2} + kwargs_lens = {"theta_E": 1.0, "gamma": 1.5, "e1": e1, "e2": e2} f_ = self.EPL.function(x, y, **kwargs_lens) self.EPL.set_static(**kwargs_lens) f_static = self.EPL.function(x, y, **kwargs_lens) npt.assert_almost_equal(f_, f_static, decimal=8) self.EPL.set_dynamic() - kwargs_lens = {'theta_E': 2., 'gamma': 1.9, 'e1': e1, 'e2': e2} + kwargs_lens = {"theta_E": 2.0, "gamma": 1.9, "e1": e1, "e2": e2} f_dyn = self.EPL.function(x, y, **kwargs_lens) assert f_dyn != f_static def test_regularization(self): - - phi_E = 1. - gamma = 2. - q = 1. - phi_G = 1. + phi_E = 1.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) - npt.assert_almost_equal(f_x, 0.) - npt.assert_almost_equal(f_y, 0.) + npt.assert_almost_equal(f_x, 0.0) + npt.assert_almost_equal(f_y, 0.0) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_xx, f_xy, f_yx, f_yy = self.EPL.hessian(x, y, phi_E, gamma, e1, e2) - assert f_xx > 10 ** 5 - assert f_yy > 10 ** 5 - #npt.assert_almost_equal(f_xx, 10**10) - #npt.assert_almost_equal(f_yy, 10**10) + assert f_xx > 10**5 + assert f_yy > 10**5 + # npt.assert_almost_equal(f_xx, 10**10) + # npt.assert_almost_equal(f_yy, 10**10) npt.assert_almost_equal(f_xy, 0) npt.assert_almost_equal(f_yx, 0) class TestEPLvsPEMD(object): - """ - Test EPL model vs PEMD with FASTELL - This tests get only executed if fastell is installed - """ + """Test EPL model vs PEMD with FASTELL This tests get only executed if fastell is + installed.""" + def setup_method(self): try: import fastell4py + self._fastell4py_bool = True except: - print("Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!") + print( + "Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!" + ) self._fastell4py_bool = False from lenstronomy.LensModel.Profiles.epl import EPL + self.epl = EPL() from lenstronomy.LensModel.Profiles.pemd import PEMD + self.pemd = PEMD(suppress_fastell=True) def test_epl_pemd_convention(self): - """ - tests convention of EPL and PEMD model on the deflection angle basis - """ + """Tests convention of EPL and PEMD model on the deflection angle basis.""" if self._fastell4py_bool is False: assert True else: x, y = util.make_grid(numPix=10, deltapix=0.2) theta_E_list = [0.5, 1, 2] - gamma_list = [1.8, 2., 2.2] - e1_list = [-0.2, 0., 0.2] - e2_list = [-0.2, 0., 0.2] + gamma_list = [1.8, 2.0, 2.2] + e1_list = [-0.2, 0.0, 0.2] + e2_list = [-0.2, 0.0, 0.2] for gamma in gamma_list: for e1 in e1_list: for e2 in e2_list: for theta_E in theta_E_list: - kwargs = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0} + kwargs = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } f_x, f_y = self.epl.derivatives(x, y, **kwargs) f_x_pemd, f_y_pemd = self.pemd.derivatives(x, y, **kwargs) @@ -194,5 +210,5 @@ def test_epl_pemd_convention(self): npt.assert_almost_equal(f_y, f_y_pemd, decimal=4) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_epl_boxydisky.py b/test/test_LensModel/test_Profiles/test_epl_boxydisky.py index 786f04a0e..8aea98404 100644 --- a/test/test_LensModel/test_Profiles/test_epl_boxydisky.py +++ b/test/test_LensModel/test_Profiles/test_epl_boxydisky.py @@ -1,4 +1,4 @@ -__author__ = 'Maverick-Oh' +__author__ = "Maverick-Oh" import numpy as np import pytest @@ -6,23 +6,26 @@ import lenstronomy.Util.param_util as param_util from lenstronomy.Util import util + class TestEPL_BOXYDISKY(object): - """ - Test EPL_BOXYDISKY vs EPL + MULTIPOLE values. - """ + """Test EPL_BOXYDISKY vs EPL + MULTIPOLE values.""" + def setup(self): from lenstronomy.LensModel.Profiles.epl import EPL + self.epl = EPL() from lenstronomy.LensModel.Profiles.multipole import Multipole + self.multipole = Multipole() from lenstronomy.LensModel.Profiles.epl_boxydisky import EPL_BOXYDISKY + self.epl_boxydisky = EPL_BOXYDISKY() self.x, self.y = util.make_grid(numPix=10, deltapix=0.2) self.theta_E_list = [0.5, 1, 2] - self.gamma_list = [1.8, 2., 2.2] - self.e1_list = [-0.2, 0., 0.2] - self.e2_list = [-0.2, 0., 0.2] + self.gamma_list = [1.8, 2.0, 2.2] + self.e1_list = [-0.2, 0.0, 0.2] + self.e2_list = [-0.2, 0.0, 0.2] self.a_m_list = [0.0, 0.05, -0.05] def test_function(self): @@ -31,12 +34,29 @@ def test_function(self): for e2 in self.e2_list: for theta_E in self.theta_E_list: for a_m in self.a_m_list: - kwargs_epl = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2} + kwargs_epl = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + } phi, _ = param_util.ellipticity2phi_q(e1, e2) - kwargs_multipole = {'m': 4, 'a_m': a_m, 'phi_m': phi} - kwargs_epl_boxydisky = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'a_m': a_m} - value1 = self.epl.function(self.x, self.y, **kwargs_epl) + self.multipole.function(self.x, self.y, **kwargs_multipole) - value2 = self.epl_boxydisky.function(self.x, self.y, **kwargs_epl_boxydisky) + kwargs_multipole = {"m": 4, "a_m": a_m, "phi_m": phi} + kwargs_epl_boxydisky = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "a_m": a_m, + } + value1 = self.epl.function( + self.x, self.y, **kwargs_epl + ) + self.multipole.function( + self.x, self.y, **kwargs_multipole + ) + value2 = self.epl_boxydisky.function( + self.x, self.y, **kwargs_epl_boxydisky + ) npt.assert_almost_equal(value1, value2, decimal=10) def test_derivatives(self): @@ -45,15 +65,33 @@ def test_derivatives(self): for e2 in self.e2_list: for theta_E in self.theta_E_list: for a_m in self.a_m_list: - kwargs_epl = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2} + kwargs_epl = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + } phi, _ = param_util.ellipticity2phi_q(e1, e2) - kwargs_multipole = {'m': 4, 'a_m': a_m, 'phi_m': phi} - kwargs_epl_boxydisky = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'a_m': a_m} + kwargs_multipole = {"m": 4, "a_m": a_m, "phi_m": phi} + kwargs_epl_boxydisky = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "a_m": a_m, + } - f_x1, f_y1 = self.epl.derivatives(self.x, self.y, **kwargs_epl) - f_x2, f_y2 = self.multipole.derivatives(self.x, self.y, **kwargs_multipole) - f_x = f_x1 + f_x2; f_y = f_y1 + f_y2 - f_x_, f_y_ = self.epl_boxydisky.derivatives(self.x, self.y, **kwargs_epl_boxydisky) + f_x1, f_y1 = self.epl.derivatives( + self.x, self.y, **kwargs_epl + ) + f_x2, f_y2 = self.multipole.derivatives( + self.x, self.y, **kwargs_multipole + ) + f_x = f_x1 + f_x2 + f_y = f_y1 + f_y2 + f_x_, f_y_ = self.epl_boxydisky.derivatives( + self.x, self.y, **kwargs_epl_boxydisky + ) npt.assert_almost_equal(f_x, f_x_, decimal=10) npt.assert_almost_equal(f_y, f_y_, decimal=10) @@ -64,18 +102,39 @@ def test_hessian(self): for e2 in self.e2_list: for theta_E in self.theta_E_list: for a_m in self.a_m_list: - kwargs_epl = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2} + kwargs_epl = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + } phi, _ = param_util.ellipticity2phi_q(e1, e2) - kwargs_multipole = {'m': 4, 'a_m': a_m, 'phi_m': phi} - kwargs_epl_boxydisky = {'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'a_m': a_m} - f_xx1, f_xy1, f_yx1, f_yy1 = self.epl.hessian(self.x, self.y, **kwargs_epl) - f_xx2, f_xy2, f_yx2, f_yy2 = self.multipole.hessian(self.x, self.y, **kwargs_multipole) - f_xx = f_xx1 + f_xx2; f_xy = f_xy1 + f_xy2; f_yx = f_yx1 + f_yx2; f_yy = f_yy1 + f_yy2 - f_xx_, f_xy_, f_yx_, f_yy_ = self.epl_boxydisky.hessian(self.x, self.y, **kwargs_epl_boxydisky) + kwargs_multipole = {"m": 4, "a_m": a_m, "phi_m": phi} + kwargs_epl_boxydisky = { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "a_m": a_m, + } + f_xx1, f_xy1, f_yx1, f_yy1 = self.epl.hessian( + self.x, self.y, **kwargs_epl + ) + f_xx2, f_xy2, f_yx2, f_yy2 = self.multipole.hessian( + self.x, self.y, **kwargs_multipole + ) + f_xx = f_xx1 + f_xx2 + f_xy = f_xy1 + f_xy2 + f_yx = f_yx1 + f_yx2 + f_yy = f_yy1 + f_yy2 + f_xx_, f_xy_, f_yx_, f_yy_ = self.epl_boxydisky.hessian( + self.x, self.y, **kwargs_epl_boxydisky + ) npt.assert_almost_equal(f_xx, f_xx_, decimal=10) npt.assert_almost_equal(f_xy, f_xy_, decimal=10) npt.assert_almost_equal(f_yx, f_yx_, decimal=10) npt.assert_almost_equal(f_yy, f_yy_, decimal=10) -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_epl_numba.py b/test/test_LensModel/test_Profiles/test_epl_numba.py index d531a12e9..d5c94d138 100644 --- a/test/test_LensModel/test_Profiles/test_epl_numba.py +++ b/test/test_LensModel/test_Profiles/test_epl_numba.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -8,22 +8,23 @@ class TestEPL_numba(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): from lenstronomy.LensModel.Profiles.epl import EPL + self.EPL = EPL() from lenstronomy.LensModel.Profiles.epl_numba import EPL_numba + self.EPL_numba = EPL_numba() def test_function(self): - phi_E = 1. - gamma = 2. + phi_E = 1.0 + gamma = 2.0 q = 0.999 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) values_nb = self.EPL_numba.function(x, y, phi_E, gamma, e1, e2) @@ -33,7 +34,7 @@ def test_function(self): q = 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) values_nb = self.EPL_numba.function(x, y, phi_E, gamma, e1, e2) @@ -43,7 +44,7 @@ def test_function(self): q = 0.4 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.EPL.function(x, y, phi_E, gamma, e1, e2) values_nb = self.EPL_numba.function(x, y, phi_E, gamma, e1, e2) @@ -54,10 +55,10 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.8 - q = 1. - phi_G = 1. + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) f_x_nb, f_y_nb = self.EPL_numba.derivatives(x, y, phi_E, gamma, e1, e2) @@ -65,7 +66,7 @@ def test_derivatives(self): npt.assert_almost_equal(f_y, f_y_nb, decimal=10) q = 0.7 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) f_x_nb, f_y_nb = self.EPL_numba.derivatives(x, y, phi_E, gamma, e1, e2) @@ -73,59 +74,64 @@ def test_derivatives(self): npt.assert_almost_equal(f_y, f_y_nb, decimal=10) def test_hessian(self): - x = np.array([1.]) - y = np.array([2.]) - phi_E = 1. + x = np.array([1.0]) + y = np.array([2.0]) + phi_E = 1.0 gamma = 2.2 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.EPL.hessian(x, y, phi_E, gamma, e1, e2) - f_xx_nb, f_xy_nb, f_yx_nb, f_yy_nb = self.EPL_numba.hessian(x, y, phi_E, gamma, e1, e2) + f_xx_nb, f_xy_nb, f_yx_nb, f_yy_nb = self.EPL_numba.hessian( + x, y, phi_E, gamma, e1, e2 + ) npt.assert_almost_equal(f_xx, f_xx_nb, decimal=10) npt.assert_almost_equal(f_yy, f_yy_nb, decimal=10) npt.assert_almost_equal(f_xy, f_xy_nb, decimal=10) def test_regularization(self): - - phi_E = 1. - gamma = 2. - q = 1. - phi_G = 1. + phi_E = 1.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_x, f_y = self.EPL_numba.derivatives(x, y, phi_E, gamma, e1, e2) - npt.assert_almost_equal(f_x, 0.) - npt.assert_almost_equal(f_y, 0.) + npt.assert_almost_equal(f_x, 0.0) + npt.assert_almost_equal(f_y, 0.0) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma, e1, e2) - npt.assert_almost_equal(f_x, 0.) - npt.assert_almost_equal(f_y, 0.) + npt.assert_almost_equal(f_x, 0.0) + npt.assert_almost_equal(f_y, 0.0) - x = 0. - y = 0. - f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma+0.1, e1, e2) - npt.assert_almost_equal(f_x, 0.) - npt.assert_almost_equal(f_y, 0.) + x = 0.0 + y = 0.0 + f_x, f_y = self.EPL.derivatives(x, y, phi_E, gamma + 0.1, e1, e2) + npt.assert_almost_equal(f_x, 0.0) + npt.assert_almost_equal(f_y, 0.0) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f = self.EPL_numba.function(x, y, phi_E, gamma, e1, e2) - npt.assert_almost_equal(f, 0.) + npt.assert_almost_equal(f, 0.0) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_xx, f_xy, f_yx, f_yy = self.EPL_numba.hessian(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(f_xx, 1e10, decimal=10) npt.assert_almost_equal(f_yy, 0, decimal=10) - npt.assert_almost_equal(f_xy, 0, decimal=5) # floating point cancellation, so less precise + npt.assert_almost_equal( + f_xy, 0, decimal=5 + ) # floating point cancellation, so less precise # Magnification: - npt.assert_almost_equal(1/((1-f_xx)*(1-f_yy)-f_xy**2), 0., decimal=10) + npt.assert_almost_equal( + 1 / ((1 - f_xx) * (1 - f_yy) - f_xy**2), 0.0, decimal=10 + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_flexion.py b/test/test_LensModel/test_Profiles/test_flexion.py index 96ef49cb9..1fa7ab9bc 100644 --- a/test/test_LensModel/test_Profiles/test_flexion.py +++ b/test/test_LensModel/test_Profiles/test_flexion.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.flexion import Flexion @@ -10,14 +10,13 @@ class TestExternalShear(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.flex = Flexion() g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04 - self.kwargs_lens = {'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4} + self.kwargs_lens = {"g1": g1, "g2": g2, "g3": g3, "g4": g4} def test_function(self): x = np.array([1]) @@ -32,7 +31,7 @@ def test_function(self): x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) values = self.flex.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(values[0], 0.09, decimal=5) + npt.assert_almost_equal(values[0], 0.09, decimal=5) npt.assert_almost_equal(values[1], 0.18666666666666668, decimal=5) def test_derivatives(self): @@ -57,8 +56,8 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, 0.08, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.flex.hessian(x, y, **self.kwargs_lens) npt.assert_almost_equal(values[0][0], 0.05, decimal=5) npt.assert_almost_equal(values[3][0], 0.11, decimal=5) @@ -68,23 +67,23 @@ def test_hessian(self): def test_flexion(self): x = np.array(0) y = np.array(2) - flex = LensModel(['FLEXION']) + flex = LensModel(["FLEXION"]) f_xxx, f_xxy, f_xyy, f_yyy = flex.flexion(x, y, [self.kwargs_lens]) - npt.assert_almost_equal(f_xxx, self.kwargs_lens['g1'], decimal=9) - npt.assert_almost_equal(f_xxy, self.kwargs_lens['g2'], decimal=9) - npt.assert_almost_equal(f_xyy, self.kwargs_lens['g3'], decimal=9) - npt.assert_almost_equal(f_yyy, self.kwargs_lens['g4'], decimal=9) + npt.assert_almost_equal(f_xxx, self.kwargs_lens["g1"], decimal=9) + npt.assert_almost_equal(f_xxy, self.kwargs_lens["g2"], decimal=9) + npt.assert_almost_equal(f_xyy, self.kwargs_lens["g3"], decimal=9) + npt.assert_almost_equal(f_yyy, self.kwargs_lens["g4"], decimal=9) def test_magnification(self): ra_0, dec_0 = 1, -1 - flex = LensModel(['FLEXION']) + flex = LensModel(["FLEXION"]) g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04 - kwargs = {'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4, 'ra_0': ra_0, 'dec_0': dec_0} + kwargs = {"g1": g1, "g2": g2, "g3": g3, "g4": g4, "ra_0": ra_0, "dec_0": dec_0} mag = flex.magnification(ra_0, dec_0, [kwargs]) npt.assert_almost_equal(mag, 1, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_flexionfg.py b/test/test_LensModel/test_Profiles/test_flexionfg.py index 604012cd3..d83d461e5 100644 --- a/test/test_LensModel/test_Profiles/test_flexionfg.py +++ b/test/test_LensModel/test_Profiles/test_flexionfg.py @@ -1,4 +1,4 @@ -__author__ = 'ylilan' +__author__ = "ylilan" from lenstronomy.LensModel.Profiles.flexionfg import Flexionfg @@ -8,18 +8,18 @@ import numpy.testing as npt import pytest + class TestFlexionfg(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.flex = Flexionfg() - F1, F2, G1, G2= 0.02, 0.03, -0.04, -0.05 - self.kwargs_lens = {'F1': F1, 'F2': F2, 'G1': G1, 'G2': G2} + F1, F2, G1, G2 = 0.02, 0.03, -0.04, -0.05 + self.kwargs_lens = {"F1": F1, "F2": F2, "G1": G1, "G2": G2} def test_transform_fg(self): - values=self.flex.transform_fg(**self.kwargs_lens) - g1,g2,g3,g4 = 0.01, 0.02, 0.03, 0.04 + values = self.flex.transform_fg(**self.kwargs_lens) + g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04 npt.assert_almost_equal(values[0], g1, decimal=5) npt.assert_almost_equal(values[1], g2, decimal=5) npt.assert_almost_equal(values[2], g3, decimal=5) @@ -73,9 +73,9 @@ def test_hessian(self): def test_flexion(self): x = np.array(0) y = np.array(2) - flex = LensModel(['FLEXIONFG']) + flex = LensModel(["FLEXIONFG"]) f_xxx, f_xxy, f_xyy, f_yyy = flex.flexion(x, y, [self.kwargs_lens]) - _g1,_g2,_g3,_g4 = self.flex.transform_fg(**self.kwargs_lens) + _g1, _g2, _g3, _g4 = self.flex.transform_fg(**self.kwargs_lens) npt.assert_almost_equal(f_xxx, _g1, decimal=9) npt.assert_almost_equal(f_xxy, _g2, decimal=9) npt.assert_almost_equal(f_xyy, _g3, decimal=9) @@ -84,12 +84,12 @@ def test_flexion(self): def test_magnification(self): ra_0, dec_0 = 1, -1 - flex = LensModel(['FLEXIONFG']) + flex = LensModel(["FLEXIONFG"]) F1, F2, G1, G2 = 0.02, 0.03, -0.04, -0.05 - kwargs = {'F1': F1, 'F2': F2, 'G1': G1, 'G2': G2, 'ra_0': ra_0, 'dec_0': dec_0} + kwargs = {"F1": F1, "F2": F2, "G1": G1, "G2": G2, "ra_0": ra_0, "dec_0": dec_0} mag = flex.magnification(ra_0, dec_0, [kwargs]) npt.assert_almost_equal(mag, 1, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_gauss_decomposition.py b/test/test_LensModel/test_Profiles/test_gauss_decomposition.py index fd09d9c3f..3137a43b0 100644 --- a/test/test_LensModel/test_Profiles/test_gauss_decomposition.py +++ b/test/test_LensModel/test_Profiles/test_gauss_decomposition.py @@ -1,4 +1,4 @@ -__author__ = 'ajshajib' +__author__ = "ajshajib" from lenstronomy.LensModel.Profiles.gauss_decomposition import SersicEllipseGaussDec from lenstronomy.LensModel.Profiles.gauss_decomposition import CTNFWGaussDec @@ -11,153 +11,179 @@ class TestSersicEllipseGaussDec(object): - """ - This class tests the methods for Gauss-decomposed elliptic Sersic - convergence. - """ + """This class tests the methods for Gauss-decomposed elliptic Sersic convergence.""" + def setup_method(self): self.sersic_gauss = SersicEllipseGaussDec() self.sersic_light = SersicElliptic(sersic_major_axis=False) self.sersic_sphere = Sersic(sersic_major_axis=False) def test_function(self): - """ - Test the potential function of Gauss-decomposed elliptical Sersic by - asserting that the numerical derivative of the computed potential - matches with the analytical derivative values. + """Test the potential function of Gauss-decomposed elliptical Sersic by + asserting that the numerical derivative of the computed potential matches with + the analytical derivative values. :return: :rtype: """ - k_eff = 1. - R_sersic = 1. - n_sersic = 1. + k_eff = 1.0 + R_sersic = 1.0 + n_sersic = 1.0 e1 = 0.2 e2 = 0.2 - center_x = 0. - center_y = 0. + center_x = 0.0 + center_y = 0.0 - diff = 1.e-6 + diff = 1.0e-6 n = 5 - xs = np.linspace(0.5 * R_sersic, 2. * R_sersic, n) - ys = np.linspace(0.5 * R_sersic, 2. * R_sersic, n) + xs = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) + ys = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) for x, y in zip(xs, ys): - func = self.sersic_gauss.function(x, y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) - - func_dx = self.sersic_gauss.function(x+diff, y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) - - func_dy = self.sersic_gauss.function(x, y+diff, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) + func = self.sersic_gauss.function( + x, + y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) + + func_dx = self.sersic_gauss.function( + x + diff, + y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) + + func_dy = self.sersic_gauss.function( + x, + y + diff, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) f_x_num = (func_dx - func) / diff f_y_num = (func_dy - func) / diff - f_x, f_y = self.sersic_gauss.derivatives(x, y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) + f_x, f_y = self.sersic_gauss.derivatives( + x, + y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) npt.assert_almost_equal(f_x_num, f_x, decimal=4) npt.assert_almost_equal(f_y_num, f_y, decimal=4) def test_derivatives(self): - """ - Test the derivative function of Gauss-decomposed elliptical Sersic by + """Test the derivative function of Gauss-decomposed elliptical Sersic by matching with the spherical case. :return: :rtype: """ - k_eff = 1. - R_sersic = 1. - n_sersic = 1. - e1 = 5.e-5 - e2 = 0. - center_x = 0. - center_y = 0. + k_eff = 1.0 + R_sersic = 1.0 + n_sersic = 1.0 + e1 = 5.0e-5 + e2 = 0.0 + center_x = 0.0 + center_y = 0.0 n = 10 - x = np.linspace(0.5*R_sersic, 2.*R_sersic, n) - y = np.linspace(0.5*R_sersic, 2.*R_sersic, n) + x = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) + y = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) X, Y = np.meshgrid(x, y) - f_x_s, f_y_s = self.sersic_sphere.derivatives(X, Y, center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) - f_x, f_y = self.sersic_gauss.derivatives(X, Y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff - ) - - npt.assert_allclose(f_x, f_x_s, rtol=1e-3, atol=0.) - npt.assert_allclose(f_y, f_y_s, rtol=1e-3, atol=0.) + f_x_s, f_y_s = self.sersic_sphere.derivatives( + X, + Y, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) + f_x, f_y = self.sersic_gauss.derivatives( + X, + Y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) + + npt.assert_allclose(f_x, f_x_s, rtol=1e-3, atol=0.0) + npt.assert_allclose(f_y, f_y_s, rtol=1e-3, atol=0.0) npt.assert_almost_equal(f_x, f_x_s, decimal=3) npt.assert_almost_equal(f_y, f_y_s, decimal=3) def test_hessian(self): - """ - Test the Hessian function of Gauss-decomposed elliptical Sersic by - matching with the spherical case. + """Test the Hessian function of Gauss-decomposed elliptical Sersic by matching + with the spherical case. :return: :rtype: """ - k_eff = 1. - R_sersic = 1. - n_sersic = 1. + k_eff = 1.0 + R_sersic = 1.0 + n_sersic = 1.0 e1 = 5e-5 - e2 = 0. - center_x = 0. - center_y = 0. + e2 = 0.0 + center_x = 0.0 + center_y = 0.0 n = 10 - x = np.linspace(0.5 * R_sersic, 2. * R_sersic, n) - y = np.linspace(0.5 * R_sersic, 2. * R_sersic, n) + x = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) + y = np.linspace(0.5 * R_sersic, 2.0 * R_sersic, n) X, Y = np.meshgrid(x, y) - f_xx_s, f_xy_s, f_yx_s, f_yy_s = self.sersic_sphere.hessian(X, Y, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff) - f_xx, f_xy, f_yx, f_yy = self.sersic_gauss.hessian(X, Y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff) + f_xx_s, f_xy_s, f_yx_s, f_yy_s = self.sersic_sphere.hessian( + X, + Y, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) + f_xx, f_xy, f_yx, f_yy = self.sersic_gauss.hessian( + X, + Y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) npt.assert_almost_equal(f_xx_s, f_xx, decimal=3) npt.assert_almost_equal(f_yy_s, f_yy, decimal=3) @@ -165,91 +191,103 @@ def test_hessian(self): npt.assert_almost_equal(f_xy_s, f_yx_s, decimal=3) def test_density_2d(self): - """ - Test the density function of Gauss-decomposed elliptical Sersic by - checking with the spherical case. + """Test the density function of Gauss-decomposed elliptical Sersic by checking + with the spherical case. :return: :rtype: """ - k_eff = 1. - R_sersic = 1. - n_sersic = 1. + k_eff = 1.0 + R_sersic = 1.0 + n_sersic = 1.0 e1 = 0.2 e2 = 0.2 - center_x = 0. - center_y = 0. + center_x = 0.0 + center_y = 0.0 n = 100 - x = np.logspace(-1., 1., n) - y = np.logspace(-1., 1., n) + x = np.logspace(-1.0, 1.0, n) + y = np.logspace(-1.0, 1.0, n) X, Y = np.meshgrid(x, y) - sersic_analytic = self.sersic_light.function(X, Y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - amp=k_eff) - - sersic_gauss = self.sersic_gauss.density_2d(X, Y, e1=e1, e2=e2, - center_x=center_x, - center_y=center_y, - n_sersic=n_sersic, - R_sersic=R_sersic, - k_eff=k_eff) + sersic_analytic = self.sersic_light.function( + X, + Y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + amp=k_eff, + ) + + sersic_gauss = self.sersic_gauss.density_2d( + X, + Y, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + n_sersic=n_sersic, + R_sersic=R_sersic, + k_eff=k_eff, + ) print(np.abs(sersic_analytic - sersic_gauss) / np.sqrt(sersic_analytic)) - assert np.all(np.abs(sersic_analytic - sersic_gauss) / np.sqrt(sersic_analytic) * 100. < 1.) + assert np.all( + np.abs(sersic_analytic - sersic_gauss) / np.sqrt(sersic_analytic) * 100.0 + < 1.0 + ) def test_gauss_decompose_sersic(self): - """ - Test that `gauss_decompose_sersic()` decomposes the Sersic profile within 1% + """Test that `gauss_decompose_sersic()` decomposes the Sersic profile within 1% Poission noise at R_sersic. :return: :rtype: """ - y = np.logspace(-1., 1., 100) + y = np.logspace(-1.0, 1.0, 100) - k_eff = 1. - R_sersic = 1. - n_sersic = 1. + k_eff = 1.0 + R_sersic = 1.0 + n_sersic = 1.0 - amps, sigmas = self.sersic_gauss.gauss_decompose(n_sersic=n_sersic, - R_sersic=R_sersic, k_eff=k_eff) + amps, sigmas = self.sersic_gauss.gauss_decompose( + n_sersic=n_sersic, R_sersic=R_sersic, k_eff=k_eff + ) - sersic = self.sersic_gauss.get_kappa_1d(y, n_sersic=n_sersic, - R_sersic=R_sersic, k_eff=k_eff) + sersic = self.sersic_gauss.get_kappa_1d( + y, n_sersic=n_sersic, R_sersic=R_sersic, k_eff=k_eff + ) back_sersic = np.zeros_like(y) for a, s in zip(amps, sigmas): - back_sersic += a * np.exp(-y ** 2 / 2. / s ** 2) + back_sersic += a * np.exp(-(y**2) / 2.0 / s**2) - assert np.all(np.abs(sersic-back_sersic)/np.sqrt(sersic)*100. < 1.) + assert np.all(np.abs(sersic - back_sersic) / np.sqrt(sersic) * 100.0 < 1.0) class TestCTNFWGaussDec(object): - """ - This class tests the methods for Gauss-decomposed spherical - cored-truncated NFW profile. - """ + """This class tests the methods for Gauss-decomposed spherical cored-truncated NFW + profile.""" + def setup_method(self): self.ctnfw_gauss = CTNFWGaussDec(n_sigma=15) def test_gauss_decompose_ctnfw(self): - """ - Test the Gaussian decomposition of core-truncated NFW profile. + """Test the Gaussian decomposition of core-truncated NFW profile. + :return: :rtype: """ - rho_s = 5. - r_s = 5. + rho_s = 5.0 + r_s = 5.0 r_core = 0.3 - r_trunc = 10. + r_trunc = 10.0 a = 2 r = np.logspace(-1, 1, 1000) * r_s @@ -259,23 +297,27 @@ def test_gauss_decompose_ctnfw(self): x = r / r_s - true_values = rho_s * (tau * tau / (tau * tau + x * x)) / (x**a + - beta ** a) ** ( 1. / a) / (1. + x) ** 2 + true_values = ( + rho_s + * (tau * tau / (tau * tau + x * x)) + / (x**a + beta**a) ** (1.0 / a) + / (1.0 + x) ** 2 + ) - amps, sigmas = self.ctnfw_gauss.gauss_decompose(r_s=r_s, - r_core=r_core, - r_trunc=r_trunc, - rho_s=rho_s, a=a) + amps, sigmas = self.ctnfw_gauss.gauss_decompose( + r_s=r_s, r_core=r_core, r_trunc=r_trunc, rho_s=rho_s, a=a + ) print(len(sigmas)) gauss_dec_values = np.zeros_like(x) for a, s in zip(amps, sigmas): - gauss_dec_values += a / np.sqrt(2*np.pi) / s * np.exp( - -r**2/2./s**2) + gauss_dec_values += ( + a / np.sqrt(2 * np.pi) / s * np.exp(-(r**2) / 2.0 / s**2) + ) # test if the approximation is valid within 2% npt.assert_allclose(true_values, true_values, rtol=0.02) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_gaussian.py b/test/test_LensModel/test_Profiles/test_gaussian.py index 1f6a5e583..ed4e5d10e 100644 --- a/test/test_LensModel/test_Profiles/test_gaussian.py +++ b/test/test_LensModel/test_Profiles/test_gaussian.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.gaussian_potential import Gaussian from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa @@ -7,76 +7,79 @@ import numpy.testing as npt import pytest + class TestGaussian(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.Gaussian = Gaussian() def test_function(self): x = 1 y = 2 - amp = 1.*2*np.pi - center_x = 1. - center_y = 1. - sigma_x = 1. - sigma_y = 1. + amp = 1.0 * 2 * np.pi + center_x = 1.0 + center_y = 1.0 + sigma_x = 1.0 + sigma_y = 1.0 values = self.Gaussian.function(x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values == np.exp(-1./2) - x = np.array([2,3,4]) - y = np.array([1,1,1]) + assert values == np.exp(-1.0 / 2) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values = self.Gaussian.function(x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values[0] == np.exp(-1./2) - assert values[1] == np.exp(-2.**2/2) - assert values[2] == np.exp(-3.**2/2) + assert values[0] == np.exp(-1.0 / 2) + assert values[1] == np.exp(-(2.0**2) / 2) + assert values[2] == np.exp(-(3.0**2) / 2) def test_derivatives(self): x = 1 y = 2 - amp = 1.*2*np.pi - center_x = 1. - center_y = 1. - sigma_x = 1. - sigma_y = 1. - values = self.Gaussian.derivatives( x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values[0] == 0. - assert values[1] == -np.exp(-1./2) - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.Gaussian.derivatives( x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values[0][0] == -np.exp(-1./2) - assert values[1][0] == 0. - assert values[0][1] == -2*np.exp(-2.**2/2) - assert values[1][1] == 0. + amp = 1.0 * 2 * np.pi + center_x = 1.0 + center_y = 1.0 + sigma_x = 1.0 + sigma_y = 1.0 + values = self.Gaussian.derivatives( + x, y, amp, center_x, center_y, sigma_x, sigma_y + ) + assert values[0] == 0.0 + assert values[1] == -np.exp(-1.0 / 2) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.Gaussian.derivatives( + x, y, amp, center_x, center_y, sigma_x, sigma_y + ) + assert values[0][0] == -np.exp(-1.0 / 2) + assert values[1][0] == 0.0 + assert values[0][1] == -2 * np.exp(-(2.0**2) / 2) + assert values[1][1] == 0.0 def test_hessian(self): x = 1 y = 2 - amp = 1.*2*np.pi - center_x = 1. - center_y = 1. - sigma_x = 1. - sigma_y = 1. - values = self.Gaussian.hessian( x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values[0] == -np.exp(-1./2) - assert values[3] == 0. - assert values[1] == 0. - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.Gaussian.hessian( x, y, amp, center_x, center_y, sigma_x, sigma_y) - assert values[0][0] == 0. - assert values[3][0] == -np.exp(-1./2) - assert values[1][0] == 0. + amp = 1.0 * 2 * np.pi + center_x = 1.0 + center_y = 1.0 + sigma_x = 1.0 + sigma_y = 1.0 + values = self.Gaussian.hessian(x, y, amp, center_x, center_y, sigma_x, sigma_y) + assert values[0] == -np.exp(-1.0 / 2) + assert values[3] == 0.0 + assert values[1] == 0.0 + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.Gaussian.hessian(x, y, amp, center_x, center_y, sigma_x, sigma_y) + assert values[0][0] == 0.0 + assert values[3][0] == -np.exp(-1.0 / 2) + assert values[1][0] == 0.0 npt.assert_almost_equal(values[0][1], 0.40600584970983811, decimal=9) npt.assert_almost_equal(values[3][1], -0.1353352832366127, decimal=9) npt.assert_almost_equal(values[1][1], 0, decimal=9) class TestGaussianKappa(object): - """ - test the Gaussian with Gaussian kappa - """ + """Test the Gaussian with Gaussian kappa.""" + def setup_method(self): self.gaussian_kappa = GaussianKappa() self.gaussian = Gaussian() @@ -84,10 +87,10 @@ def setup_method(self): def test_derivatives(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = 1.*2*np.pi - center_x = 0. - center_y = 0. - sigma = 1. + amp = 1.0 * 2 * np.pi + center_x = 0.0 + center_y = 0.0 + sigma = 1.0 f_x, f_y = self.gaussian_kappa.derivatives(x, y, amp, sigma, center_x, center_y) npt.assert_almost_equal(f_x[2], 0.63813558702212059, decimal=8) npt.assert_almost_equal(f_y[2], 0.63813558702212059, decimal=8) @@ -95,13 +98,15 @@ def test_derivatives(self): def test_hessian(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = 1.*2*np.pi - center_x = 0. - center_y = 0. - sigma = 1. + amp = 1.0 * 2 * np.pi + center_x = 0.0 + center_y = 0.0 + sigma = 1.0 - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian(x, y, amp, sigma, center_x, center_y) - kappa = 1./2 * (f_xx + f_yy) + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian( + x, y, amp, sigma, center_x, center_y + ) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_true = self.gaussian.function(x, y, amp, sigma, sigma, center_x, center_y) print(kappa_true) print(kappa) @@ -111,29 +116,35 @@ def test_hessian(self): def test_density_2d(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = 1.*2*np.pi - center_x = 0. - center_y = 0. - sigma = 1. - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian(x, y, amp, sigma, center_x, center_y) - kappa = 1./2 * (f_xx + f_yy) + amp = 1.0 * 2 * np.pi + center_x = 0.0 + center_y = 0.0 + sigma = 1.0 + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian( + x, y, amp, sigma, center_x, center_y + ) + kappa = 1.0 / 2 * (f_xx + f_yy) amp_3d = self.gaussian_kappa._amp2d_to_3d(amp, sigma, sigma) - density_2d = self.gaussian_kappa.density_2d(x, y, amp_3d, sigma, center_x, center_y) + density_2d = self.gaussian_kappa.density_2d( + x, y, amp_3d, sigma, center_x, center_y + ) npt.assert_almost_equal(kappa[1], density_2d[1], decimal=5) npt.assert_almost_equal(kappa[2], density_2d[2], decimal=5) def test_3d_2d_convention(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = 1.*2*np.pi - center_x = 0. - center_y = 0. - sigma = 1. + amp = 1.0 * 2 * np.pi + center_x = 0.0 + center_y = 0.0 + sigma = 1.0 amp_3d = self.gaussian_kappa._amp2d_to_3d(amp, sigma, sigma) - density_2d_gauss = self.gaussian_kappa.density_2d(x, y, amp_3d, sigma, center_x, center_y) + density_2d_gauss = self.gaussian_kappa.density_2d( + x, y, amp_3d, sigma, center_x, center_y + ) density_2d = self.gaussian.function(x, y, amp, sigma, sigma, center_x, center_y) npt.assert_almost_equal(density_2d_gauss[1], density_2d[1], decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_gaussian_ellipse_kappa.py b/test/test_LensModel/test_Profiles/test_gaussian_ellipse_kappa.py index 6b790cc1c..827ec4244 100644 --- a/test/test_LensModel/test_Profiles/test_gaussian_ellipse_kappa.py +++ b/test/test_LensModel/test_Profiles/test_gaussian_ellipse_kappa.py @@ -1,4 +1,4 @@ -__author__ = 'ajshajib' +__author__ = "ajshajib" from lenstronomy.LensModel.Profiles.gaussian_ellipse_kappa import GaussianEllipseKappa from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa @@ -11,9 +11,8 @@ class TestGaussianEllipseKappa(object): - """ - This class tests the methods for elliptical Gaussian convergence. - """ + """This class tests the methods for elliptical Gaussian convergence.""" + def setup_method(self): """ :return: @@ -23,135 +22,148 @@ def setup_method(self): self.gaussian_kappa_ellipse = GaussianEllipseKappa() def test_function(self): - """ - Test the `function()` method at the spherical limit. + """Test the `function()` method at the spherical limit. :return: :rtype: """ # almost spherical case - x = 1. - y = 1. - e1, e2 = 5e-5, 0. - sigma = 1. - amp = 2. + x = 1.0 + y = 1.0 + e1, e2 = 5e-5, 0.0 + sigma = 1.0 + amp = 2.0 f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2) - r2 = x*x + y*y - f_sphere = amp/(2.*np.pi*sigma**2) * sigma**2 * (np.euler_gamma - - expi(-r2/2./sigma**2) + np.log(r2/2./sigma**2)) + r2 = x * x + y * y + f_sphere = ( + amp + / (2.0 * np.pi * sigma**2) + * sigma**2 + * ( + np.euler_gamma + - expi(-r2 / 2.0 / sigma**2) + + np.log(r2 / 2.0 / sigma**2) + ) + ) npt.assert_almost_equal(f_, f_sphere, decimal=4) # spherical case - e1, e2 = 0., 0. + e1, e2 = 0.0, 0.0 f_ = self.gaussian_kappa_ellipse.function(x, y, amp, sigma, e1, e2) npt.assert_almost_equal(f_, f_sphere, decimal=4) def test_derivatives(self): - """ - Test the `derivatives()` method at the spherical limit. + """Test the `derivatives()` method at the spherical limit. :return: :rtype: """ # almost spherical case - x = 1. - y = 1. - e1, e2 = 5e-5, 0. - sigma = 1. - amp = 2. - - f_x, f_y = self.gaussian_kappa_ellipse.derivatives(x, y, amp, sigma, - e1, e2) - f_x_sphere, f_y_sphere = self.gaussian_kappa.derivatives(x, y, amp=amp, - sigma=sigma) + x = 1.0 + y = 1.0 + e1, e2 = 5e-5, 0.0 + sigma = 1.0 + amp = 2.0 + + f_x, f_y = self.gaussian_kappa_ellipse.derivatives(x, y, amp, sigma, e1, e2) + f_x_sphere, f_y_sphere = self.gaussian_kappa.derivatives( + x, y, amp=amp, sigma=sigma + ) npt.assert_almost_equal(f_x, f_x_sphere, decimal=4) npt.assert_almost_equal(f_y, f_y_sphere, decimal=4) # spherical case - e1, e2 = 0., 0. - f_x, f_y = self.gaussian_kappa_ellipse.derivatives(x, y, amp, sigma, - e1, e2) + e1, e2 = 0.0, 0.0 + f_x, f_y = self.gaussian_kappa_ellipse.derivatives(x, y, amp, sigma, e1, e2) npt.assert_almost_equal(f_x, f_x_sphere, decimal=4) npt.assert_almost_equal(f_y, f_y_sphere, decimal=4) def test_hessian(self): - """ - Test the `hessian()` method at the spherical limit. + """Test the `hessian()` method at the spherical limit. :return: :rtype: """ # almost spherical case - x = 1. - y = 1. - e1, e2 = 5e-5, 0. - sigma = 1. - amp = 2. - - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp, - sigma, e1, e2) - f_xx_sphere, f_xy_sphere, f_yx_sphere, f_yy_sphere = self.gaussian_kappa.hessian(x, - y, amp=amp, sigma=sigma) + x = 1.0 + y = 1.0 + e1, e2 = 5e-5, 0.0 + sigma = 1.0 + amp = 2.0 + + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian( + x, y, amp, sigma, e1, e2 + ) + ( + f_xx_sphere, + f_xy_sphere, + f_yx_sphere, + f_yy_sphere, + ) = self.gaussian_kappa.hessian(x, y, amp=amp, sigma=sigma) npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4) npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4) npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4) npt.assert_almost_equal(f_yx, f_xy, decimal=8) # spherical case - e1, e2 = 0., 0. - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp, sigma, e1, e2) + e1, e2 = 0.0, 0.0 + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian( + x, y, amp, sigma, e1, e2 + ) npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4) npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4) npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4) def test_density_2d(self): - """ - Test the `density_2d()` method at the spherical limit. + """Test the `density_2d()` method at the spherical limit. :return: :rtype: """ # almost spherical case - x = 1. - y = 1. - e1, e2 = 5e-5, 0. - sigma = 1. - amp = 2. + x = 1.0 + y = 1.0 + e1, e2 = 5e-5, 0.0 + sigma = 1.0 + amp = 2.0 f_ = self.gaussian_kappa_ellipse.density_2d(x, y, amp, sigma, e1, e2) - f_sphere = amp / (2.*np.pi*sigma**2) * np.exp(-(x*x+y*y)/2./sigma**2) + f_sphere = ( + amp + / (2.0 * np.pi * sigma**2) + * np.exp(-(x * x + y * y) / 2.0 / sigma**2) + ) npt.assert_almost_equal(f_, f_sphere, decimal=4) def test_w_f_approx(self): - """ - Test the `w_f_approx()` method with values computed using + """Test the `w_f_approx()` method with values computed using `scipy.special.wofz()`. :return: :rtype: """ - x = np.logspace(-3., 3., 100) - y = np.logspace(-3., 3., 100) + x = np.logspace(-3.0, 3.0, 100) + y = np.logspace(-3.0, 3.0, 100) X, Y = np.meshgrid(x, y) - w_f_app = self.gaussian_kappa_ellipse.w_f_approx(X+1j*Y) - w_f_scipy = wofz(X+1j*Y) + w_f_app = self.gaussian_kappa_ellipse.w_f_approx(X + 1j * Y) + w_f_scipy = wofz(X + 1j * Y) npt.assert_allclose(w_f_app.real, w_f_scipy.real, rtol=4e-5, atol=0) npt.assert_allclose(w_f_app.imag, w_f_scipy.imag, rtol=4e-5, atol=0) # check `derivatives()` method with and without `scipy.special.wofz()` - x = 1. - y = 1. + x = 1.0 + y = 1.0 e1, e2 = 5e-5, 0 - sigma = 1. - amp = 2. + sigma = 1.0 + amp = 2.0 # with `scipy.special.wofz()` gauss_scipy = GaussianEllipseKappa(use_scipy_wofz=True) @@ -165,5 +177,5 @@ def test_w_f_approx(self): npt.assert_almost_equal(f_y_sp, f_y_ap, decimal=4) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_gaussian_ellipse_potential.py b/test/test_LensModel/test_Profiles/test_gaussian_ellipse_potential.py index 041582d14..76fb5872b 100644 --- a/test/test_LensModel/test_Profiles/test_gaussian_ellipse_potential.py +++ b/test/test_LensModel/test_Profiles/test_gaussian_ellipse_potential.py @@ -1,6 +1,8 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" -from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import GaussianEllipsePotential +from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import ( + GaussianEllipsePotential, +) from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa import numpy as np @@ -9,9 +11,8 @@ class TestGaussianKappaPot(object): - """ - test the Gaussian with Gaussian kappa - """ + """Test the Gaussian with Gaussian kappa.""" + def setup_method(self): self.gaussian_kappa = GaussianKappa() self.ellipse = GaussianEllipsePotential() @@ -33,7 +34,9 @@ def test_derivatives(self): sigma = 1 amp = 1 f_x, f_y = self.ellipse.derivatives(x, y, amp, sigma, e1, e2) - f_x_sphere, f_y_sphere = self.gaussian_kappa.derivatives(x, y, amp=amp, sigma=sigma) + f_x_sphere, f_y_sphere = self.gaussian_kappa.derivatives( + x, y, amp=amp, sigma=sigma + ) npt.assert_almost_equal(f_x, f_x_sphere, decimal=8) npt.assert_almost_equal(f_y, f_y_sphere, decimal=8) @@ -44,7 +47,12 @@ def test_hessian(self): sigma = 1 amp = 1 f_xx, f_xy, f_yx, f_yy = self.ellipse.hessian(x, y, amp, sigma, e1, e2) - f_xx_sphere, f_xy_sphere, f_yx_sphere, f_yy_sphere = self.gaussian_kappa.hessian(x, y, amp=amp, sigma=sigma) + ( + f_xx_sphere, + f_xy_sphere, + f_yx_sphere, + f_yy_sphere, + ) = self.gaussian_kappa.hessian(x, y, amp=amp, sigma=sigma) npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=5) npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=5) npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=5) @@ -79,5 +87,5 @@ def test_mass_2d_lens(self): npt.assert_almost_equal(f_, f_sphere, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_gnfw.py b/test/test_LensModel/test_Profiles/test_gnfw.py index 9fe3ec335..735f0aa5e 100644 --- a/test/test_LensModel/test_Profiles/test_gnfw.py +++ b/test/test_LensModel/test_Profiles/test_gnfw.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" import unittest from lenstronomy.LensModel.Profiles.general_nfw import GNFW @@ -11,29 +11,46 @@ class TestGNFW(object): - def setup_method(self): self.gnfw = GNFW() self.splcore = SPLCORE() - self.kwargs_lens = {'alpha_Rs': 2.1, 'Rs': 1.5, 'gamma_inner': 1.0, 'gamma_outer': 3.0,'center_x': 0.04, 'center_y': -1.0} + self.kwargs_lens = { + "alpha_Rs": 2.1, + "Rs": 1.5, + "gamma_inner": 1.0, + "gamma_outer": 3.0, + "center_x": 0.04, + "center_y": -1.0, + } def test_alphaRs(self): - - alpha_rs = self.gnfw.derivatives(self.kwargs_lens['Rs'], 0.0, self.kwargs_lens['Rs'], self.kwargs_lens['alpha_Rs'], - self.kwargs_lens['gamma_inner'], self.kwargs_lens['gamma_outer'])[0] - npt.assert_almost_equal(alpha_rs, self.kwargs_lens['alpha_Rs'], 8) + alpha_rs = self.gnfw.derivatives( + self.kwargs_lens["Rs"], + 0.0, + self.kwargs_lens["Rs"], + self.kwargs_lens["alpha_Rs"], + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + )[0] + npt.assert_almost_equal(alpha_rs, self.kwargs_lens["alpha_Rs"], 8) def test_alphaRs_rho0_conversion(self): - - rho0 = self.gnfw.alpha2rho0(self.kwargs_lens['alpha_Rs'], self.kwargs_lens['Rs'], - self.kwargs_lens['gamma_inner'], self.kwargs_lens['gamma_outer']) - alpha_Rs = self.gnfw.rho02alpha(rho0, self.kwargs_lens['Rs'], self.kwargs_lens['gamma_inner'], - self.kwargs_lens['gamma_outer']) - npt.assert_almost_equal(alpha_Rs, self.kwargs_lens['alpha_Rs'], 5) + rho0 = self.gnfw.alpha2rho0( + self.kwargs_lens["alpha_Rs"], + self.kwargs_lens["Rs"], + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + ) + alpha_Rs = self.gnfw.rho02alpha( + rho0, + self.kwargs_lens["Rs"], + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + ) + npt.assert_almost_equal(alpha_Rs, self.kwargs_lens["alpha_Rs"], 5) def test_lensing_quantities(self): - - lensmodel = LensModel(['GNFW']) + lensmodel = LensModel(["GNFW"]) f_x, f_y = self.gnfw.derivatives(1.0, 1.5, **self.kwargs_lens) f_x_, f_y_ = lensmodel.alpha(1.0, 1.5, [self.kwargs_lens]) npt.assert_almost_equal(f_x, f_x_, 5) @@ -46,26 +63,50 @@ def test_lensing_quantities(self): npt.assert_almost_equal(f_xy, f_xy_, 5) def test_mass2d(self): - - rho0 = self.gnfw.alpha2rho0(self.kwargs_lens['alpha_Rs'], self.kwargs_lens['Rs'], - self.kwargs_lens['gamma_inner'], self.kwargs_lens['gamma_outer']) - m2d = self.gnfw.mass_2d(10.0, self.kwargs_lens['Rs'], rho0, self.kwargs_lens['gamma_inner'], - self.kwargs_lens['gamma_outer']) - integrand = lambda x: 2 * 3.14159265 * x * self.gnfw.density_2d(x, 0.0, self.kwargs_lens['Rs'], rho0, self.kwargs_lens['gamma_inner'], - self.kwargs_lens['gamma_outer']) - m2d_num = quad(integrand, 0, 10.)[0] - npt.assert_almost_equal(m2d_num/m2d, 1.0, 5) + rho0 = self.gnfw.alpha2rho0( + self.kwargs_lens["alpha_Rs"], + self.kwargs_lens["Rs"], + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + ) + m2d = self.gnfw.mass_2d( + 10.0, + self.kwargs_lens["Rs"], + rho0, + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + ) + integrand = ( + lambda x: 2 + * 3.14159265 + * x + * self.gnfw.density_2d( + x, + 0.0, + self.kwargs_lens["Rs"], + rho0, + self.kwargs_lens["gamma_inner"], + self.kwargs_lens["gamma_outer"], + ) + ) + m2d_num = quad(integrand, 0, 10.0)[0] + npt.assert_almost_equal(m2d_num / m2d, 1.0, 5) def test_spl_core_match(self): - rs = 1.5 - kwargs_spl = {'sigma0': 1e13, 'gamma': 3.0, 'r_core': 0.00000001} + kwargs_spl = {"sigma0": 1e13, "gamma": 3.0, "r_core": 0.00000001} alpha_rs = self.splcore.derivatives(rs, 0.0, **kwargs_spl)[0] - kwargs_gnfw = {'alpha_Rs': alpha_rs, 'Rs':rs, 'gamma_inner': 2.99999, 'gamma_outer': 3.00001} - m3d_gnfw = self.gnfw.mass_3d_lens(5*rs,**kwargs_gnfw) - m3d_splcore = self.splcore.mass_3d_lens(5*rs, **kwargs_spl) - npt.assert_almost_equal(m3d_gnfw/m3d_splcore, 0.935, 3) + kwargs_gnfw = { + "alpha_Rs": alpha_rs, + "Rs": rs, + "gamma_inner": 2.99999, + "gamma_outer": 3.00001, + } + m3d_gnfw = self.gnfw.mass_3d_lens(5 * rs, **kwargs_gnfw) + m3d_splcore = self.splcore.mass_3d_lens(5 * rs, **kwargs_spl) + npt.assert_almost_equal(m3d_gnfw / m3d_splcore, 0.935, 3) # approximate match to splcore with similar properties -if __name__ == '__main__': - pytest.main() \ No newline at end of file + +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_hernquist.py b/test/test_LensModel/test_Profiles/test_hernquist.py index 520fc6138..d6ffff83d 100644 --- a/test/test_LensModel/test_Profiles/test_hernquist.py +++ b/test/test_LensModel/test_Profiles/test_hernquist.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -10,27 +10,26 @@ class TestHernquist(object): - def setup_method(self): self.profile = Hernquist() def test_function(self): x = np.array([1]) y = np.array([2]) - Rs = 1. + Rs = 1.0 sigma0 = 0.5 values = self.profile.function(x, y, sigma0, Rs) npt.assert_almost_equal(values[0], 0.66514613455415028, decimal=8) x = np.array([0]) y = np.array([0]) - Rs = 1. + Rs = 1.0 sigma0 = 0.5 values = self.profile.function(x, y, sigma0, Rs) npt.assert_almost_equal(values[0], 0, decimal=6) - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.profile.function( x, y, sigma0, Rs) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.profile.function(x, y, sigma0, Rs) npt.assert_almost_equal(values[0], 0.66514613455415028, decimal=8) npt.assert_almost_equal(values[1], 0.87449395673649566, decimal=8) npt.assert_almost_equal(values[2], 1.0549139073851708, decimal=8) @@ -38,21 +37,21 @@ def test_function(self): def test_derivatives(self): x = 1 y = 2 - Rs = 1. + Rs = 1.0 sigma0 = 0.5 - f_x, f_y = self.profile.derivatives( x, y, sigma0, Rs) + f_x, f_y = self.profile.derivatives(x, y, sigma0, Rs) npt.assert_almost_equal(f_x, 0.11160641027573866, decimal=8) npt.assert_almost_equal(f_y, 0.22321282055147731, decimal=8) x = np.array([0]) y = np.array([0]) - f_x, f_y = self.profile.derivatives( x, y, sigma0, Rs) + f_x, f_y = self.profile.derivatives(x, y, sigma0, Rs) npt.assert_almost_equal(f_x, 0, decimal=8) npt.assert_almost_equal(f_y, 0, decimal=8) def test_hessian(self): x = np.array([1]) y = np.array([2]) - Rs = 1. + Rs = 1.0 sigma0 = 0.5 f_xx, f_xy, f_yx, f_yy = self.profile.hessian(x, y, sigma0, Rs) npt.assert_almost_equal(f_xx[0], 0.0779016004481825, decimal=6) @@ -74,22 +73,21 @@ def test_grav_pot(self): npt.assert_almost_equal(grav_pot, 42.411500823462205, decimal=8) def test_sigma0_definition(self): - Rs = 2. + Rs = 2.0 sigma0 = 0.5 f_x, f_y = self.profile.derivatives(Rs, 0, sigma0, Rs) alpha = f_x - npt.assert_almost_equal(alpha, 2/3. * sigma0 * Rs, decimal=5) + npt.assert_almost_equal(alpha, 2 / 3.0 * sigma0 * Rs, decimal=5) class TestHernquistEllipse(object): - def setup_method(self): self.profile = Hernquist_Ellipse() def test_function(self): x = np.array([1]) y = np.array([2]) - Rs = 1. + Rs = 1.0 sigma0 = 0.5 q, phi_G = 0.8, 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) @@ -97,14 +95,14 @@ def test_function(self): npt.assert_almost_equal(values[0], 0.6431256503990406, decimal=6) x = np.array([0]) y = np.array([0]) - Rs = 1. + Rs = 1.0 sigma0 = 0.5 values = self.profile.function(x, y, sigma0, Rs, e1, e2) npt.assert_almost_equal(values[0], 0, decimal=6) - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.profile.function( x, y, sigma0, Rs, e1, e2) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.profile.function(x, y, sigma0, Rs, e1, e2) npt.assert_almost_equal(values[0], 0.5983066087852915, decimal=6) npt.assert_almost_equal(values[1], 0.8014743038748537, decimal=6) npt.assert_almost_equal(values[2], 0.9806423979121521, decimal=6) @@ -112,11 +110,11 @@ def test_function(self): def test_derivatives(self): x = 1 y = 2 - Rs = 1. + Rs = 1.0 sigma0 = 0.5 q, phi_G = 0.8, 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - f_x, f_y = self.profile.derivatives( x, y, sigma0, Rs, e1, e2) + f_x, f_y = self.profile.derivatives(x, y, sigma0, Rs, e1, e2) npt.assert_almost_equal(f_x, 0.06066651014748484, decimal=6) npt.assert_almost_equal(f_y, 0.24321226850316485, decimal=6) x = np.array([0]) @@ -126,9 +124,9 @@ def test_derivatives(self): npt.assert_almost_equal(f_y, 0, decimal=8) def test_hessian(self): - x = np.array([1.]) - y = np.array([2.]) - Rs = 1. + x = np.array([1.0]) + y = np.array([2.0]) + Rs = 1.0 sigma0 = 0.5 q, phi_G = 0.8, 0.5 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) @@ -139,5 +137,5 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, f_yx, decimal=6) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_hernquist_ellipse_cse.py b/test/test_LensModel/test_Profiles/test_hernquist_ellipse_cse.py index 29bc06162..e99205c30 100644 --- a/test/test_LensModel/test_Profiles/test_hernquist_ellipse_cse.py +++ b/test/test_LensModel/test_Profiles/test_hernquist_ellipse_cse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.hernquist_ellipse_cse import HernquistEllipseCSE @@ -10,9 +10,8 @@ class TestHernquistEllipseCSE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.hernquist = Hernquist() self.hernquist_cse = HernquistEllipseCSE() @@ -20,7 +19,7 @@ def setup_method(self): def test_function(self): x = np.linspace(0.01, 2, 10) y = np.zeros_like(x) - kwargs = {'sigma0': 2, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"sigma0": 2, "Rs": 2, "center_x": 0, "center_y": 0} f_nfw = self.hernquist.function(x, y, **kwargs) f_cse = self.hernquist_cse.function(x, y, e1=0, e2=0, **kwargs) @@ -29,7 +28,7 @@ def test_function(self): def test_derivatives(self): x = np.linspace(0.01, 2, 10) y = np.zeros_like(x) - kwargs = {'sigma0': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"sigma0": 0.5, "Rs": 2, "center_x": 0, "center_y": 0} f_x_nfw, f_y_nfw = self.hernquist.derivatives(x, y, **kwargs) f_x_cse, f_y_cse = self.hernquist_cse.derivatives(x, y, e1=0, e2=0, **kwargs) @@ -39,10 +38,12 @@ def test_derivatives(self): def test_hessian(self): x = np.linspace(0.01, 5, 30) y = np.zeros_like(x) - kwargs = {'sigma0': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"sigma0": 0.5, "Rs": 2, "center_x": 0, "center_y": 0} f_xx_nfw, f_xy_nfw, f_yx_nfw, f_yy_nfw = self.hernquist.hessian(x, y, **kwargs) - f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.hernquist_cse.hessian(x, y, e1=0, e2=0, **kwargs) + f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.hernquist_cse.hessian( + x, y, e1=0, e2=0, **kwargs + ) npt.assert_almost_equal(f_xx_cse / f_xx_nfw, 1, decimal=2) npt.assert_almost_equal(f_xy_cse, f_xy_nfw, decimal=5) npt.assert_almost_equal(f_yx_cse, f_yx_nfw, decimal=5) @@ -57,5 +58,5 @@ def test_mass_3d_lens(self): npt.assert_almost_equal(m_3d_nfw, m_3d_cse, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_hessian.py b/test/test_LensModel/test_Profiles/test_hessian.py index efacd7db2..d6467d59f 100644 --- a/test/test_LensModel/test_Profiles/test_hessian.py +++ b/test/test_LensModel/test_Profiles/test_hessian.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import unittest from lenstronomy.LensModel.Profiles.hessian import Hessian @@ -10,20 +10,32 @@ class TestHessian(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.hessian = Hessian() self.f_xx, self.f_yy, self.f_xy, self.f_yx = 0.1, 0.1, -0.1, -0.1 - self.kwargs_lens = {'f_xx': self.f_xx, 'f_yy': self.f_yy, 'f_xy': self.f_xy, 'f_yx': self.f_yx} + self.kwargs_lens = { + "f_xx": self.f_xx, + "f_yy": self.f_yy, + "f_xy": self.f_xy, + "f_yx": self.f_yx, + } def test_function(self): x = 1 y = 2 values = self.hessian.function(x, y, **self.kwargs_lens) - f_true = 1/2. * (self.f_xx * x**2 + (self.f_xy + self.f_yx) * x * y + self.f_yy * y**2) + f_true = ( + 1 + / 2.0 + * ( + self.f_xx * x**2 + + (self.f_xy + self.f_yx) * x * y + + self.f_yy * y**2 + ) + ) npt.assert_almost_equal(values, f_true, decimal=5) x = np.array([0]) y = np.array([0]) @@ -49,9 +61,14 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, self.f_xy, decimal=5) npt.assert_almost_equal(f_yx, self.f_yx, decimal=5) - lensModel = LensModel(['HESSIAN']) + lensModel = LensModel(["HESSIAN"]) f_xy_true, f_yx_true = 0.3, 0.2 - kwargs_lens = {'f_xx': self.f_xx, 'f_yy': self.f_yy, 'f_xy': f_xy_true, 'f_yx': f_yx_true} + kwargs_lens = { + "f_xx": self.f_xx, + "f_yy": self.f_yy, + "f_xy": f_xy_true, + "f_yx": f_yx_true, + } f_xx, f_xy, f_yx, f_yy = lensModel.hessian(x, y, [kwargs_lens], diff=0.001) npt.assert_almost_equal(f_xx, self.f_xx, decimal=9) npt.assert_almost_equal(f_yy, self.f_yy, decimal=9) @@ -59,5 +76,5 @@ def test_hessian(self): npt.assert_almost_equal(f_yx, f_yx_true, decimal=9) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_interpol.py b/test/test_LensModel/test_Profiles/test_interpol.py index bff2b0cd7..41dbdc893 100644 --- a/test/test_LensModel/test_Profiles/test_interpol.py +++ b/test/test_LensModel/test_Profiles/test_interpol.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np import numpy.testing as npt @@ -9,42 +9,76 @@ class TestInterpol(object): - def setup_method(self): pass def test_do_interpol(self): numPix = 101 deltaPix = 0.1 - x_grid_interp, y_grid_interp = util.make_grid(numPix,deltaPix) + x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E': 1., 'center_x': 0.5, 'center_y':-0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian( + x_grid_interp, y_grid_interp, **kwargs_SIS + ) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) interp_func = Interpol(grid=True) interp_func_loop = Interpol(grid=False) - interp_func.do_interp(x_axes, y_axes, util.array2image(f_sis), util.array2image(f_x_sis), util.array2image(f_y_sis), util.array2image(f_xx_sis), util.array2image(f_yy_sis), util.array2image(f_xy_sis)) - interp_func_loop.do_interp(x_axes, y_axes, util.array2image(f_sis), util.array2image(f_x_sis), util.array2image(f_y_sis), util.array2image(f_xx_sis), util.array2image(f_yy_sis), util.array2image(f_xy_sis)) + interp_func.do_interp( + x_axes, + y_axes, + util.array2image(f_sis), + util.array2image(f_x_sis), + util.array2image(f_y_sis), + util.array2image(f_xx_sis), + util.array2image(f_yy_sis), + util.array2image(f_xy_sis), + ) + interp_func_loop.do_interp( + x_axes, + y_axes, + util.array2image(f_sis), + util.array2image(f_x_sis), + util.array2image(f_y_sis), + util.array2image(f_xx_sis), + util.array2image(f_yy_sis), + util.array2image(f_xy_sis), + ) # test derivatives print(interp_func.derivatives(0, 1)) print(sis.derivatives(1, 0, **kwargs_SIS)) - #assert interp_func.derivatives(1, 0) == sis.derivatives(1, 0, **kwargs_SIS) + # assert interp_func.derivatives(1, 0) == sis.derivatives(1, 0, **kwargs_SIS) assert interp_func.derivatives(1, 0) == interp_func_loop.derivatives(1, 0) - alpha1_interp, alpha2_interp = interp_func.derivatives(np.array([0,1,0,1]), np.array([1,1,2,2])) - alpha1_interp_loop, alpha2_interp_loop = interp_func_loop.derivatives(np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2])) - alpha1_true, alpha2_true = sis.derivatives(np.array([0,1,0,1]),np.array([1,1,2,2]), **kwargs_SIS) + alpha1_interp, alpha2_interp = interp_func.derivatives( + np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2]) + ) + alpha1_interp_loop, alpha2_interp_loop = interp_func_loop.derivatives( + np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2]) + ) + alpha1_true, alpha2_true = sis.derivatives( + np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2]), **kwargs_SIS + ) assert alpha1_interp[0] == alpha1_true[0] assert alpha1_interp[1] == alpha1_true[1] assert alpha1_interp[0] == alpha1_interp_loop[0] assert alpha1_interp[1] == alpha1_interp_loop[1] # test hessian - assert interp_func.hessian(1,0) == sis.hessian(1,0, **kwargs_SIS) - f_xx_interp, f_xy_interp, f_yx_interp, f_yy_interp = interp_func.hessian(np.array([0,1,0,1]), np.array([1,1,2,2])) - f_xx_interp_loop, f_xy_interp_loop, f_yx_interp_loop, f_yy_interp_loop = interp_func_loop.hessian(np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2])) - f_xx_true, f_xy_true, f_yx_true, f_yy_true = sis.hessian(np.array([0,1,0,1]),np.array([1,1,2,2]), **kwargs_SIS) + assert interp_func.hessian(1, 0) == sis.hessian(1, 0, **kwargs_SIS) + f_xx_interp, f_xy_interp, f_yx_interp, f_yy_interp = interp_func.hessian( + np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2]) + ) + ( + f_xx_interp_loop, + f_xy_interp_loop, + f_yx_interp_loop, + f_yy_interp_loop, + ) = interp_func_loop.hessian(np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2])) + f_xx_true, f_xy_true, f_yx_true, f_yy_true = sis.hessian( + np.array([0, 1, 0, 1]), np.array([1, 1, 2, 2]), **kwargs_SIS + ) assert f_xx_interp[0] == f_xx_true[0] assert f_xx_interp[1] == f_xx_true[1] assert f_xy_interp[0] == f_xy_true[0] @@ -58,36 +92,58 @@ def test_do_interpol(self): def test_call(self): numPix = 101 deltaPix = 0.1 - x_grid_interp, y_grid_interp = util.make_grid(numPix,deltaPix) + x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E':1., 'center_x': 0.5, 'center_y': -0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian( + x_grid_interp, y_grid_interp, **kwargs_SIS + ) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) interp_func = Interpol(grid=True) - interp_func.do_interp(x_axes, y_axes, util.array2image(f_sis), util.array2image(f_x_sis), util.array2image(f_y_sis), util.array2image(f_xx_sis), util.array2image(f_yy_sis), util.array2image(f_xy_sis)) - x, y = 1., 1. + interp_func.do_interp( + x_axes, + y_axes, + util.array2image(f_sis), + util.array2image(f_x_sis), + util.array2image(f_y_sis), + util.array2image(f_xx_sis), + util.array2image(f_yy_sis), + util.array2image(f_xy_sis), + ) + x, y = 1.0, 1.0 alpha_x, alpha_y = interp_func.derivatives(x, y, **{}) assert alpha_x == 0.31622776601683794 def test_kwargs_interpolation(self): numPix = 101 deltaPix = 0.1 - x_grid_interp, y_grid_interp = util.make_grid(numPix,deltaPix) + x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E':1., 'center_x': 0.5, 'center_y': -0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian( + x_grid_interp, y_grid_interp, **kwargs_SIS + ) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) interp_func = Interpol() - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_sis), 'f_x': util.array2image(f_x_sis), 'f_y': util.array2image(f_y_sis), 'f_xx': util.array2image(f_xx_sis), 'f_yy': util.array2image(f_yy_sis), 'f_xy': util.array2image(f_xy_sis)} - x, y = 1., 1. + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_sis), + "f_x": util.array2image(f_x_sis), + "f_y": util.array2image(f_y_sis), + "f_xx": util.array2image(f_xx_sis), + "f_yy": util.array2image(f_yy_sis), + "f_xy": util.array2image(f_xy_sis), + } + x, y = 1.0, 1.0 alpha_x, alpha_y = interp_func.derivatives(x, y, **kwargs_interp) assert alpha_x == 0.31622776601683794 - x, y = 1., 0. + x, y = 1.0, 0.0 alpha_x, alpha_y = interp_func.derivatives(x, y, **kwargs_interp) alpha_x_true, alpha_y_true = sis.derivatives(x, y, **kwargs_SIS) npt.assert_almost_equal(alpha_x, alpha_x_true, decimal=10) @@ -102,14 +158,19 @@ def test_hessian_finite_differential(self): deltaPix = 0.1 x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E': 1., 'center_x': 0.5, 'center_y': -0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) interp_func = Interpol() - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_sis), - 'f_x': util.array2image(f_x_sis), 'f_y': util.array2image(f_y_sis)} - x, y = 1., 0. + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_sis), + "f_x": util.array2image(f_x_sis), + "f_y": util.array2image(f_y_sis), + } + x, y = 1.0, 0.0 f_xx, f_xy, f_yx, f_yy = interp_func.hessian(x, y, **kwargs_interp) f_xx_true, f_xy_true, f_yx_true, f_yy_true = sis.hessian(x, y, **kwargs_SIS) npt.assert_almost_equal(f_xx, f_xx_true, decimal=1) @@ -118,33 +179,49 @@ def test_hessian_finite_differential(self): npt.assert_almost_equal(f_yy, f_yy_true, decimal=1) def test_interp_func_scaled(self): - numPix = 101 deltaPix = 0.1 - x_grid_interp, y_grid_interp = util.make_grid(numPix,deltaPix) + x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E':1., 'center_x': 0.5, 'center_y': -0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian( + x_grid_interp, y_grid_interp, **kwargs_SIS + ) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_sis), 'f_x': util.array2image(f_x_sis), 'f_y': util.array2image(f_y_sis), 'f_xx': util.array2image(f_xx_sis), 'f_yy': util.array2image(f_yy_sis), 'f_xy': util.array2image(f_xy_sis)} + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_sis), + "f_x": util.array2image(f_x_sis), + "f_y": util.array2image(f_y_sis), + "f_xx": util.array2image(f_xx_sis), + "f_yy": util.array2image(f_yy_sis), + "f_xy": util.array2image(f_xy_sis), + } interp_func = InterpolScaled(grid=False) - x, y = 1., 1. - alpha_x, alpha_y = interp_func.derivatives(x, y, scale_factor=1, **kwargs_interp) + x, y = 1.0, 1.0 + alpha_x, alpha_y = interp_func.derivatives( + x, y, scale_factor=1, **kwargs_interp + ) assert alpha_x == 0.31622776601683794 - f_ = interp_func.function(x, y, scale_factor=1., **kwargs_interp) + f_ = interp_func.function(x, y, scale_factor=1.0, **kwargs_interp) npt.assert_almost_equal(f_, 1.5811388300841898) - f_xx, f_xy, f_yx, f_yy = interp_func.hessian(x, y, scale_factor=1., **kwargs_interp) + f_xx, f_xy, f_yx, f_yy = interp_func.hessian( + x, y, scale_factor=1.0, **kwargs_interp + ) npt.assert_almost_equal(f_xx, 0.56920997883030822, decimal=8) npt.assert_almost_equal(f_yy, 0.063245553203367583, decimal=8) npt.assert_almost_equal(f_xy, -0.18973665961010275, decimal=8) npt.assert_almost_equal(f_xy, f_yx, decimal=8) x_grid, y_grid = util.make_grid(10, deltaPix) - f_xx, f_xy, f_yx, f_yy = interp_func.hessian(x_grid, y_grid, scale_factor=1., **kwargs_interp) + f_xx, f_xy, f_yx, f_yy = interp_func.hessian( + x_grid, y_grid, scale_factor=1.0, **kwargs_interp + ) npt.assert_almost_equal(f_xx[0], 0, decimal=2) def test_shift(self): @@ -153,29 +230,45 @@ def test_shift(self): x_grid_interp, y_grid_interp = util.make_grid(numPix, deltaPix) sis = SIS() - kwargs_SIS = {'theta_E': 1., 'center_x': 0.5, 'center_y': -0.5} + kwargs_SIS = {"theta_E": 1.0, "center_x": 0.5, "center_y": -0.5} f_sis = sis.function(x_grid_interp, y_grid_interp, **kwargs_SIS) f_x_sis, f_y_sis = sis.derivatives(x_grid_interp, y_grid_interp, **kwargs_SIS) - f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian(x_grid_interp, y_grid_interp, **kwargs_SIS) + f_xx_sis, f_xy_sis, f_yx_sis, f_yy_sis = sis.hessian( + x_grid_interp, y_grid_interp, **kwargs_SIS + ) x_axes, y_axes = util.get_axes(x_grid_interp, y_grid_interp) - kwargs_interp = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_sis), - 'f_x': util.array2image(f_x_sis), 'f_y': util.array2image(f_y_sis), - 'f_xx': util.array2image(f_xx_sis), 'f_yy': util.array2image(f_yy_sis), - 'f_xy': util.array2image(f_xy_sis)} + kwargs_interp = { + "grid_interp_x": x_axes, + "grid_interp_y": y_axes, + "f_": util.array2image(f_sis), + "f_x": util.array2image(f_x_sis), + "f_y": util.array2image(f_y_sis), + "f_xx": util.array2image(f_xx_sis), + "f_yy": util.array2image(f_yy_sis), + "f_xy": util.array2image(f_xy_sis), + } interp_func = Interpol(grid=False) - x, y = 1., 1. + x, y = 1.0, 1.0 alpha_x, alpha_y = interp_func.derivatives(x, y, **kwargs_interp) assert alpha_x == 0.31622776601683794 interp_func = Interpol(grid=False) - x_shift = 1. - kwargs_shift = {'grid_interp_x': x_axes + x_shift, 'grid_interp_y': y_axes, 'f_': util.array2image(f_sis), - 'f_x': util.array2image(f_x_sis), 'f_y': util.array2image(f_y_sis), - 'f_xx': util.array2image(f_xx_sis), 'f_yy': util.array2image(f_yy_sis), - 'f_xy': util.array2image(f_xy_sis)} - alpha_x_shift, alpha_y_shift = interp_func.derivatives(x + x_shift, y, **kwargs_shift) + x_shift = 1.0 + kwargs_shift = { + "grid_interp_x": x_axes + x_shift, + "grid_interp_y": y_axes, + "f_": util.array2image(f_sis), + "f_x": util.array2image(f_x_sis), + "f_y": util.array2image(f_y_sis), + "f_xx": util.array2image(f_xx_sis), + "f_yy": util.array2image(f_yy_sis), + "f_xy": util.array2image(f_xy_sis), + } + alpha_x_shift, alpha_y_shift = interp_func.derivatives( + x + x_shift, y, **kwargs_shift + ) npt.assert_almost_equal(alpha_x_shift, alpha_x, decimal=10) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_multi_gaussian_kappa.py b/test/test_LensModel/test_Profiles/test_multi_gaussian_kappa.py index 595ff0fba..81d8d0940 100644 --- a/test/test_LensModel/test_Profiles/test_multi_gaussian_kappa.py +++ b/test/test_LensModel/test_Profiles/test_multi_gaussian_kappa.py @@ -1,10 +1,15 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LightModel.Profiles.gaussian import Gaussian from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa -from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import GaussianEllipsePotential -from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import MultiGaussianKappa, MultiGaussianKappaEllipse +from lenstronomy.LensModel.Profiles.gaussian_ellipse_potential import ( + GaussianEllipsePotential, +) +from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import ( + MultiGaussianKappa, + MultiGaussianKappaEllipse, +) import numpy as np import numpy.testing as npt @@ -12,9 +17,8 @@ class TestGaussianKappa(object): - """ - test the Gaussian with Gaussian kappa - """ + """Test the Gaussian with Gaussian kappa.""" + def setup_method(self): self.gaussian_kappa = MultiGaussianKappa() self.gaussian = Gaussian() @@ -23,10 +27,10 @@ def setup_method(self): def test_derivatives(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = [1.*2*np.pi] - center_x = 0. - center_y = 0. - sigma = [1.] + amp = [1.0 * 2 * np.pi] + center_x = 0.0 + center_y = 0.0 + sigma = [1.0] f_x, f_y = self.gaussian_kappa.derivatives(x, y, amp, sigma, center_x, center_y) npt.assert_almost_equal(f_x[2], 0.63813558702212059, decimal=8) npt.assert_almost_equal(f_y[2], 0.63813558702212059, decimal=8) @@ -34,12 +38,14 @@ def test_derivatives(self): def test_hessian(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = [1.*2*np.pi] - center_x = 0. - center_y = 0. - sigma = [1.] - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian(x, y, amp, sigma, center_x, center_y) - kappa = 1./2 * (f_xx + f_yy) + amp = [1.0 * 2 * np.pi] + center_x = 0.0 + center_y = 0.0 + sigma = [1.0] + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian( + x, y, amp, sigma, center_x, center_y + ) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_true = self.gaussian.function(x, y, amp[0], sigma[0], center_x, center_y) print(kappa_true) print(kappa) @@ -50,29 +56,32 @@ def test_hessian(self): def test_density_2d(self): x = np.linspace(0, 5, 10) y = np.linspace(0, 5, 10) - amp = [1.*2*np.pi] - center_x = 0. - center_y = 0. - sigma = [1.] - f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian(x, y, amp, sigma, center_x, center_y) - kappa = 1./2 * (f_xx + f_yy) + amp = [1.0 * 2 * np.pi] + center_x = 0.0 + center_y = 0.0 + sigma = [1.0] + f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa.hessian( + x, y, amp, sigma, center_x, center_y + ) + kappa = 1.0 / 2 * (f_xx + f_yy) amp_3d = self.g_kappa._amp2d_to_3d(amp, sigma[0], sigma[0]) - density_2d = self.gaussian_kappa.density_2d(x, y, amp_3d, sigma, center_x, center_y) + density_2d = self.gaussian_kappa.density_2d( + x, y, amp_3d, sigma, center_x, center_y + ) npt.assert_almost_equal(kappa[1], density_2d[1], decimal=5) npt.assert_almost_equal(kappa[2], density_2d[2], decimal=5) def test_density(self): - amp = [1.*2*np.pi] + amp = [1.0 * 2 * np.pi] - sigma = [1.] - density = self.gaussian_kappa.density(1., amp, sigma) + sigma = [1.0] + density = self.gaussian_kappa.density(1.0, amp, sigma) npt.assert_almost_equal(density, 0.6065306597126334, decimal=8) class TestGaussianKappaEllipse(object): - """ - test the Gaussian with Gaussian kappa - """ + """Test the Gaussian with Gaussian kappa.""" + def setup_method(self): self.multi = MultiGaussianKappaEllipse() self.single = GaussianEllipsePotential() @@ -83,8 +92,26 @@ def test_function(self): sigma = 1 e1, e2 = 0.1, -0.1 center_x, center_y = 1, 0 - f_ = self.multi.function(x, y, amp=[amp], sigma=[sigma], e1=e1, e2=e2, center_x=center_x, center_y=center_y) - f_single = self.single.function(x, y, amp=amp, sigma=sigma, e1=e1, e2=e2, center_x=center_x, center_y=center_y) + f_ = self.multi.function( + x, + y, + amp=[amp], + sigma=[sigma], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) + f_single = self.single.function( + x, + y, + amp=amp, + sigma=sigma, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) npt.assert_almost_equal(f_, f_single, decimal=8) def test_derivatives(self): @@ -93,8 +120,26 @@ def test_derivatives(self): sigma = 1 e1, e2 = 0.1, -0.1 center_x, center_y = 1, 0 - f_x, f_y = self.multi.derivatives(x, y, amp=[amp], sigma=[sigma], e1=e1, e2=e2, center_x=center_x, center_y=center_y) - f_x_s, f_y_s = self.single.derivatives(x, y, amp=amp, sigma=sigma, e1=e1, e2=e2, center_x=center_x, center_y=center_y) + f_x, f_y = self.multi.derivatives( + x, + y, + amp=[amp], + sigma=[sigma], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) + f_x_s, f_y_s = self.single.derivatives( + x, + y, + amp=amp, + sigma=sigma, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) npt.assert_almost_equal(f_x, f_x_s, decimal=8) npt.assert_almost_equal(f_y, f_y_s, decimal=8) @@ -104,8 +149,26 @@ def test_hessian(self): sigma = 1 e1, e2 = 0.1, -0.1 center_x, center_y = 1, 0 - f_xx, f_xy, f_yx, f_yy = self.multi.hessian(x, y, amp=[amp], sigma=[sigma], e1=e1, e2=e2, center_x=center_x, center_y=center_y) - f_xx_s, f_xy_s, f_yx_s, f_yy_s = self.single.hessian(x, y, amp=amp, sigma=sigma, e1=e1, e2=e2, center_x=center_x, center_y=center_y) + f_xx, f_xy, f_yx, f_yy = self.multi.hessian( + x, + y, + amp=[amp], + sigma=[sigma], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) + f_xx_s, f_xy_s, f_yx_s, f_yy_s = self.single.hessian( + x, + y, + amp=amp, + sigma=sigma, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) npt.assert_almost_equal(f_xx, f_xx_s, decimal=8) npt.assert_almost_equal(f_yy, f_yy_s, decimal=8) npt.assert_almost_equal(f_xy, f_xy_s, decimal=8) @@ -117,8 +180,26 @@ def test_density_2d(self): sigma = 1 e1, e2 = 0.1, -0.1 center_x, center_y = 1, 0 - f_ = self.multi.density_2d(x, y, amp=[amp], sigma=[sigma], e1=e1, e2=e2, center_x=center_x, center_y=center_y) - f_single = self.single.density_2d(x, y, amp=amp, sigma=sigma, e1=e1, e2=e2, center_x=center_x, center_y=center_y) + f_ = self.multi.density_2d( + x, + y, + amp=[amp], + sigma=[sigma], + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) + f_single = self.single.density_2d( + x, + y, + amp=amp, + sigma=sigma, + e1=e1, + e2=e2, + center_x=center_x, + center_y=center_y, + ) npt.assert_almost_equal(f_, f_single, decimal=8) def test_density(self): @@ -131,5 +212,5 @@ def test_density(self): npt.assert_almost_equal(f_, f_single, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_multipole.py b/test/test_LensModel/test_Profiles/test_multipole.py index 348baf95d..16d102e6a 100644 --- a/test/test_LensModel/test_Profiles/test_multipole.py +++ b/test/test_LensModel/test_Profiles/test_multipole.py @@ -1,4 +1,4 @@ -__author__ = 'lynevdv' +__author__ = "lynevdv" from lenstronomy.LensModel.Profiles.multipole import Multipole @@ -9,9 +9,8 @@ class TestMultipole(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.Multipole = Multipole() @@ -20,7 +19,7 @@ def test_function(self): y = 2 m = 4 a_m = 0.05 - phi_m = 25*np.pi/180. + phi_m = 25 * np.pi / 180.0 values = self.Multipole.function(x, y, m, a_m, phi_m) npt.assert_almost_equal(values, 0.006684307, decimal=6) x = np.array([0]) @@ -40,7 +39,7 @@ def test_derivatives(self): y = 2 m = 4 a_m = 0.05 - phi_m = 25 * np.pi / 180. + phi_m = 25 * np.pi / 180.0 f_x, f_y = self.Multipole.derivatives(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_x, -0.003939644, decimal=6) npt.assert_almost_equal(f_y, 0.005311976, decimal=6) @@ -66,7 +65,7 @@ def test_hessian(self): y = 2 m = 4 a_m = 0.05 - phi_m = 25 * np.pi / 180. + phi_m = 25 * np.pi / 180.0 f_xx, f_xy, f_yx, f_yy = self.Multipole.hessian(x, y, m, a_m, phi_m) npt.assert_almost_equal(f_xx, -0.016042338, decimal=6) npt.assert_almost_equal(f_yy, -0.004010584, decimal=6) @@ -78,8 +77,8 @@ def test_hessian(self): npt.assert_almost_equal(f_xx[0], -0.016042338, decimal=6) npt.assert_almost_equal(f_yy[0], -0.004010584, decimal=6) npt.assert_almost_equal(f_xy[0], 0.008021169, decimal=6) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.Multipole.hessian(x, y, m, a_m, phi_m) npt.assert_almost_equal(values[0][0], -0.016042338, decimal=6) npt.assert_almost_equal(values[3][0], -0.004010584, decimal=6) @@ -89,5 +88,5 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.004253867, decimal=6) -if __name__ == '__main__': - pytest.main() +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw.py b/test/test_LensModel/test_Profiles/test_nfw.py index e777a3051..5a3bf8720 100644 --- a/test/test_LensModel/test_Profiles/test_nfw.py +++ b/test/test_LensModel/test_Profiles/test_nfw.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.nfw import NFW @@ -10,63 +10,255 @@ class TestNFW(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.nfw = NFW() def test_function(self): x = np.array([1]) y = np.array([2]) - Rs = 1. + Rs = 1.0 rho0 = 1 alpha_Rs = self.nfw.rho02alpha(rho0, Rs) values = self.nfw.function(x, y, Rs, alpha_Rs) npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5) x = np.array([0]) y = np.array([0]) - Rs = 1. + Rs = 1.0 rho0 = 1 alpha_Rs = self.nfw.rho02alpha(rho0, Rs) values = self.nfw.function(x, y, Rs, alpha_Rs) npt.assert_almost_equal(values[0], 0, decimal=4) - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values = self.nfw.function(x, y, Rs, alpha_Rs) npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5) npt.assert_almost_equal(values[1], 3.5400250357511416, decimal=5) npt.assert_almost_equal(values[2], 4.5623722261790647, decimal=5) def test_derivatives(self): - Rs = .1 + Rs = 0.1 alpha_Rs = 0.0122741127776 - x_array = np.array([0.0, 0.00505050505,0.0101010101,0.0151515152,0.0202020202,0.0252525253, - 0.0303030303,0.0353535354,0.0404040404,0.0454545455,0.0505050505,0.0555555556,0.0606060606,0.0656565657,0.0707070707,0.0757575758,0.0808080808,0.0858585859,0.0909090909,0.095959596,0.101010101,0.106060606, - 0.111111111,0.116161616,0.121212121,0.126262626,0.131313131,0.136363636,0.141414141,0.146464646,0.151515152,0.156565657, - 0.161616162,0.166666667,0.171717172,0.176767677,0.181818182,0.186868687,0.191919192,0.196969697,0.202020202,0.207070707,0.212121212,0.217171717,0.222222222,0.227272727,0.232323232,0.237373737,0.242424242,0.247474747,0.252525253,0.257575758,0.262626263,0.267676768,0.272727273,0.277777778,0.282828283, - 0.287878788,0.292929293,0.297979798,0.303030303,0.308080808,0.313131313,0.318181818,0.323232323,0.328282828,0.333333333,0.338383838,0.343434343,0.348484848, - 0.353535354,0.358585859,0.363636364,0.368686869,0.373737374,0.378787879,0.383838384,0.388888889,0.393939394,0.398989899,0.404040404,0.409090909, - 0.414141414,0.419191919,0.424242424,0.429292929,0.434343434,0.439393939,0.444444444,0.449494949,0.454545455,0.45959596,0.464646465,0.46969697,0.474747475,0.47979798,0.484848485,0.48989899,0.494949495,0.5]) - truth_alpha = np.array([0.0, 0.00321693283, 0.00505903212, - 0.00640987376,0.00746125453,0.00830491158, 0.00899473755, 0.00956596353,0.0100431963,0.0104444157,0.0107831983,0.0110700554,0.0113132882,0.0115195584,0.0116942837,0.0118419208, - 0.011966171,0.0120701346,0.012156428,0.0122272735,0.0122845699,0.0123299487,0.0123648177,0.0123903978,0.0124077515,0.0124178072,0.0124213787,0.0124191816,0.0124118471,0.0123999334,0.0123839353,0.0123642924,0.0123413964, - 0.0123155966,0.0122872054,0.0122565027,0.0122237393,0.0121891409,0.0121529102,0.0121152302,0.0120762657,0.0120361656,0.0119950646,0.0119530846,0.0119103359,0.0118669186,0.0118229235,0.0117784329,0.0117335217, - 0.011688258,0.0116427037,0.0115969149,0.0115509429,0.0115048343,0.0114586314,0.0114123729,0.011366094,0.0113198264,0.0112735995,0.0112274395,0.0111813706,0.0111354147, - 0.0110895915,0.011043919,0.0109984136,0.01095309,0.0109079617,0.0108630406,0.0108183376,0.0107738625,0.010729624,0.01068563,0.0106418875,0.0105984026,0.0105551809,0.0105122271,0.0104695455,0.0104271398,0.010385013,0.0103431679,0.0103016067,0.0102603311, - 0.0102193428,0.0101786427,0.0101382318,0.0100981105,0.0100582792,0.0100187377,0.00997948602,0.00994052364,0.00990184999, - 0.00986346433, 0.00982536573,0.00978755314, 0.00975002537, 0.0097127811, 0.00967581893, 0.00963913734, 0.00960273473, 0.00956660941]) + x_array = np.array( + [ + 0.0, + 0.00505050505, + 0.0101010101, + 0.0151515152, + 0.0202020202, + 0.0252525253, + 0.0303030303, + 0.0353535354, + 0.0404040404, + 0.0454545455, + 0.0505050505, + 0.0555555556, + 0.0606060606, + 0.0656565657, + 0.0707070707, + 0.0757575758, + 0.0808080808, + 0.0858585859, + 0.0909090909, + 0.095959596, + 0.101010101, + 0.106060606, + 0.111111111, + 0.116161616, + 0.121212121, + 0.126262626, + 0.131313131, + 0.136363636, + 0.141414141, + 0.146464646, + 0.151515152, + 0.156565657, + 0.161616162, + 0.166666667, + 0.171717172, + 0.176767677, + 0.181818182, + 0.186868687, + 0.191919192, + 0.196969697, + 0.202020202, + 0.207070707, + 0.212121212, + 0.217171717, + 0.222222222, + 0.227272727, + 0.232323232, + 0.237373737, + 0.242424242, + 0.247474747, + 0.252525253, + 0.257575758, + 0.262626263, + 0.267676768, + 0.272727273, + 0.277777778, + 0.282828283, + 0.287878788, + 0.292929293, + 0.297979798, + 0.303030303, + 0.308080808, + 0.313131313, + 0.318181818, + 0.323232323, + 0.328282828, + 0.333333333, + 0.338383838, + 0.343434343, + 0.348484848, + 0.353535354, + 0.358585859, + 0.363636364, + 0.368686869, + 0.373737374, + 0.378787879, + 0.383838384, + 0.388888889, + 0.393939394, + 0.398989899, + 0.404040404, + 0.409090909, + 0.414141414, + 0.419191919, + 0.424242424, + 0.429292929, + 0.434343434, + 0.439393939, + 0.444444444, + 0.449494949, + 0.454545455, + 0.45959596, + 0.464646465, + 0.46969697, + 0.474747475, + 0.47979798, + 0.484848485, + 0.48989899, + 0.494949495, + 0.5, + ] + ) + truth_alpha = np.array( + [ + 0.0, + 0.00321693283, + 0.00505903212, + 0.00640987376, + 0.00746125453, + 0.00830491158, + 0.00899473755, + 0.00956596353, + 0.0100431963, + 0.0104444157, + 0.0107831983, + 0.0110700554, + 0.0113132882, + 0.0115195584, + 0.0116942837, + 0.0118419208, + 0.011966171, + 0.0120701346, + 0.012156428, + 0.0122272735, + 0.0122845699, + 0.0123299487, + 0.0123648177, + 0.0123903978, + 0.0124077515, + 0.0124178072, + 0.0124213787, + 0.0124191816, + 0.0124118471, + 0.0123999334, + 0.0123839353, + 0.0123642924, + 0.0123413964, + 0.0123155966, + 0.0122872054, + 0.0122565027, + 0.0122237393, + 0.0121891409, + 0.0121529102, + 0.0121152302, + 0.0120762657, + 0.0120361656, + 0.0119950646, + 0.0119530846, + 0.0119103359, + 0.0118669186, + 0.0118229235, + 0.0117784329, + 0.0117335217, + 0.011688258, + 0.0116427037, + 0.0115969149, + 0.0115509429, + 0.0115048343, + 0.0114586314, + 0.0114123729, + 0.011366094, + 0.0113198264, + 0.0112735995, + 0.0112274395, + 0.0111813706, + 0.0111354147, + 0.0110895915, + 0.011043919, + 0.0109984136, + 0.01095309, + 0.0109079617, + 0.0108630406, + 0.0108183376, + 0.0107738625, + 0.010729624, + 0.01068563, + 0.0106418875, + 0.0105984026, + 0.0105551809, + 0.0105122271, + 0.0104695455, + 0.0104271398, + 0.010385013, + 0.0103431679, + 0.0103016067, + 0.0102603311, + 0.0102193428, + 0.0101786427, + 0.0101382318, + 0.0100981105, + 0.0100582792, + 0.0100187377, + 0.00997948602, + 0.00994052364, + 0.00990184999, + 0.00986346433, + 0.00982536573, + 0.00978755314, + 0.00975002537, + 0.0097127811, + 0.00967581893, + 0.00963913734, + 0.00960273473, + 0.00956660941, + ] + ) y_array = np.zeros_like(x_array) f_x, f_y = self.nfw.derivatives(x_array, y_array, Rs, alpha_Rs) - #print(f_x/truth_alpha) + # print(f_x/truth_alpha) for i in range(len(x_array)): npt.assert_almost_equal(f_x[i], truth_alpha[i], decimal=8) def test_hessian(self): x = np.array([1]) y = np.array([2]) - Rs = 1. + Rs = 1.0 rho0 = 1 alpha_Rs = self.nfw.rho02alpha(rho0, Rs) f_xx, f_xy, f_yx, f_yy = self.nfw.hessian(x, y, Rs, alpha_Rs) @@ -75,8 +267,8 @@ def test_hessian(self): npt.assert_almost_equal(f_xy[0], -0.2471232696734742, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.nfw.hessian(x, y, Rs, alpha_Rs) npt.assert_almost_equal(values[0][0], 0.40855527280658294, decimal=5) npt.assert_almost_equal(values[3][0], 0.037870368296371637, decimal=5) @@ -115,20 +307,19 @@ def test_interpol(self): class TestMassAngleConversion(object): - """ - test angular to mass unit conversions - """ + """Test angular to mass unit conversions.""" + def setup_method(self): self.nfw = NFW() self.nfw_ellipse = NFW_ELLIPSE() def test_angle(self): x, y = 1, 0 - alpha1, alpha2 = self.nfw.derivatives(x, y, alpha_Rs=1., Rs=1.) - assert alpha1 == 1. + alpha1, alpha2 = self.nfw.derivatives(x, y, alpha_Rs=1.0, Rs=1.0) + assert alpha1 == 1.0 def test_convertAngle2rho(self): - rho0 = self.nfw.alpha2rho0(alpha_Rs=1., Rs=1.) + rho0 = self.nfw.alpha2rho0(alpha_Rs=1.0, Rs=1.0) assert rho0 == 0.81472283831773229 def test_convertrho02angle(self): @@ -139,5 +330,5 @@ def test_convertrho02angle(self): assert alpha_Rs_in == alpha_Rs_out -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw_core_truncated.py b/test/test_LensModel/test_Profiles/test_nfw_core_truncated.py index 804c6040c..8eaea71dd 100644 --- a/test/test_LensModel/test_Profiles/test_nfw_core_truncated.py +++ b/test/test_LensModel/test_Profiles/test_nfw_core_truncated.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" import unittest from lenstronomy.LensModel.Profiles.nfw_core_truncated import TNFWC @@ -9,29 +9,50 @@ class TestTNFWC(object): - def setup_method(self): self.tnfwc = TNFWC() self.gnfw = GNFW() def test_alphaRs(self): - - kwargs_lens = {'alpha_Rs': 2.1, 'Rs': 1.5, 'r_core': 1.2, 'r_trunc': 3.0, 'center_x': 0.04, 'center_y': -1.0} - alpha_rs = self.tnfwc.derivatives(kwargs_lens['Rs'], 0.0, kwargs_lens['Rs'], kwargs_lens['alpha_Rs'], - kwargs_lens['r_core'], kwargs_lens['r_trunc'])[0] - npt.assert_almost_equal(alpha_rs, kwargs_lens['alpha_Rs'], 8) + kwargs_lens = { + "alpha_Rs": 2.1, + "Rs": 1.5, + "r_core": 1.2, + "r_trunc": 3.0, + "center_x": 0.04, + "center_y": -1.0, + } + alpha_rs = self.tnfwc.derivatives( + kwargs_lens["Rs"], + 0.0, + kwargs_lens["Rs"], + kwargs_lens["alpha_Rs"], + kwargs_lens["r_core"], + kwargs_lens["r_trunc"], + )[0] + npt.assert_almost_equal(alpha_rs, kwargs_lens["alpha_Rs"], 8) def test_alphaRs_rho0_conversion(self): - - kwargs_lens = {'alpha_Rs': 2.1, 'Rs': 1.5, 'r_core': 1.2, 'r_trunc': 3.0, 'center_x': 0.04, 'center_y': -1.0} - rho0 = self.tnfwc.alpha2rho0(kwargs_lens['alpha_Rs'], kwargs_lens['Rs'], - kwargs_lens['r_core'], kwargs_lens['r_trunc']) - alpha_Rs = self.tnfwc.rho02alpha(rho0, kwargs_lens['Rs'], kwargs_lens['r_core'], - kwargs_lens['r_trunc']) - npt.assert_almost_equal(alpha_Rs, kwargs_lens['alpha_Rs'], 5) + kwargs_lens = { + "alpha_Rs": 2.1, + "Rs": 1.5, + "r_core": 1.2, + "r_trunc": 3.0, + "center_x": 0.04, + "center_y": -1.0, + } + rho0 = self.tnfwc.alpha2rho0( + kwargs_lens["alpha_Rs"], + kwargs_lens["Rs"], + kwargs_lens["r_core"], + kwargs_lens["r_trunc"], + ) + alpha_Rs = self.tnfwc.rho02alpha( + rho0, kwargs_lens["Rs"], kwargs_lens["r_core"], kwargs_lens["r_trunc"] + ) + npt.assert_almost_equal(alpha_Rs, kwargs_lens["alpha_Rs"], 5) def test_gnfw_match(self): - # profile reduces to GNFW with gamma_inner = 1.0, gamma_outer = 3.0 when core -> 0 and truncation -> inf alpha_Rs = 2.5 Rs = 1.5 @@ -47,13 +68,17 @@ def test_gnfw_match(self): density_2d_tnfwc = self.tnfwc.density_2d(R, 0.0, Rs, rho0, r_core, r_trunc) rho0 = self.gnfw.alpha2rho0(alpha_Rs, Rs, 1.0, 3.0) density_2d_gnfw = self.gnfw.density_2d(R, 0.0, Rs, rho0, 1.0, 3.0) - npt.assert_almost_equal(density_2d_tnfwc, density_2d_gnfw, 3.) + npt.assert_almost_equal(density_2d_tnfwc, density_2d_gnfw, 3.0) def test_mass3d(self): - - kwargs_lens = {'alpha_Rs': 2.1, 'Rs': 1.5, 'r_core': 1.2, 'r_trunc': 3.0} - rho0 = self.tnfwc.alpha2rho0(kwargs_lens['alpha_Rs'], kwargs_lens['Rs'], kwargs_lens['r_core'], kwargs_lens['r_trunc']) - kwargs_lens_rho0 = {'rho0': rho0, 'Rs': 1.5, 'r_core': 1.2, 'r_trunc': 3.0} + kwargs_lens = {"alpha_Rs": 2.1, "Rs": 1.5, "r_core": 1.2, "r_trunc": 3.0} + rho0 = self.tnfwc.alpha2rho0( + kwargs_lens["alpha_Rs"], + kwargs_lens["Rs"], + kwargs_lens["r_core"], + kwargs_lens["r_trunc"], + ) + kwargs_lens_rho0 = {"rho0": rho0, "Rs": 1.5, "r_core": 1.2, "r_trunc": 3.0} m1 = self.tnfwc.mass_3d(2.0, **kwargs_lens_rho0) m2 = self.tnfwc.mass_3d_lens(2.0, **kwargs_lens) npt.assert_almost_equal(m1, m2) @@ -65,9 +90,10 @@ def test_g(self): out1 = self.tnfwc._g(x, b, t) b = 1.5001 out2 = self.tnfwc._g(x, b, t) - out3 = self.tnfwc._g(x, b, t+0.001) + out3 = self.tnfwc._g(x, b, t + 0.001) npt.assert_almost_equal(out1, out2, 4) npt.assert_almost_equal(out1, out3, 4) -if __name__ == '__main__': - pytest.main() \ No newline at end of file + +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw_ellipse.py b/test/test_LensModel/test_Profiles/test_nfw_ellipse.py index 2b900eac5..6fa914c45 100644 --- a/test/test_LensModel/test_Profiles/test_nfw_ellipse.py +++ b/test/test_LensModel/test_Profiles/test_nfw_ellipse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.nfw import NFW @@ -11,9 +11,8 @@ class TestNFWELLIPSE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.nfw = NFW() self.nfw_e = NFW_ELLIPSE() @@ -21,9 +20,9 @@ def setup_method(self): def test_function(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw.function(x, y, Rs, alpha_Rs) @@ -32,14 +31,14 @@ def test_function(self): x = np.array([0]) y = np.array([0]) - q = .8 + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - values = self.nfw_e.function(x, y, Rs, alpha_Rs,e1, e2) + values = self.nfw_e.function(x, y, Rs, alpha_Rs, e1, e2) npt.assert_almost_equal(values[0], 0, decimal=4) - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values = self.nfw_e.function(x, y, Rs, alpha_Rs, e1, e2) npt.assert_almost_equal(values[0], 1.8690403434928538, decimal=5) npt.assert_almost_equal(values[1], 2.6186971904371217, decimal=5) @@ -48,9 +47,9 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.nfw.derivatives(x, y, Rs, alpha_Rs) @@ -64,10 +63,10 @@ def test_derivatives(self): npt.assert_almost_equal(f_x[0], 0, decimal=5) npt.assert_almost_equal(f_y[0], 0, decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - alpha_Rs = 1. - q = .8 + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + alpha_Rs = 1.0 + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw_e.derivatives(x, y, Rs, alpha_Rs, e1, e2) @@ -79,9 +78,9 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - Rs = 1. - alpha_Rs = 1. - q = 1. + Rs = 1.0 + alpha_Rs = 1.0 + q = 1.0 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.nfw.hessian(x, y, Rs, alpha_Rs) @@ -91,9 +90,9 @@ def test_hessian(self): npt.assert_almost_equal(f_xy[0], f_xy_e[0], decimal=5) npt.assert_almost_equal(f_yx[0], f_yx_e[0], decimal=5) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - q = .8 + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + q = 0.8 phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nfw_e.hessian(x, y, Rs, alpha_Rs, e1, e2) @@ -112,5 +111,5 @@ def test_mass_3d_lens(self): npt.assert_almost_equal(m_3d, 1.1573795105019022, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw_ellipse_cse.py b/test/test_LensModel/test_Profiles/test_nfw_ellipse_cse.py index 424a47a74..c16aec1a8 100644 --- a/test/test_LensModel/test_Profiles/test_nfw_ellipse_cse.py +++ b/test/test_LensModel/test_Profiles/test_nfw_ellipse_cse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.nfw import NFW from lenstronomy.LensModel.Profiles.nfw_ellipse_cse import NFW_ELLIPSE_CSE @@ -11,9 +11,8 @@ class TestNFWELLIPSE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.nfw = NFW() self.nfw_cse = NFW_ELLIPSE_CSE(high_accuracy=True) @@ -22,7 +21,7 @@ def setup_method(self): def test_function(self): x = np.linspace(0.01, 2, 10) y = np.zeros_like(x) - kwargs = {'alpha_Rs': 2, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"alpha_Rs": 2, "Rs": 2, "center_x": 0, "center_y": 0} f_nfw = self.nfw.function(x, y, **kwargs) f_cse = self.nfw_cse.function(x, y, e1=0, e2=0, **kwargs) @@ -33,29 +32,35 @@ def test_function(self): def test_derivatives(self): x = np.linspace(0.01, 2, 10) y = np.zeros_like(x) - kwargs = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"alpha_Rs": 0.5, "Rs": 2, "center_x": 0, "center_y": 0} f_x_nfw, f_y_nfw = self.nfw.derivatives(x, y, **kwargs) f_x_cse, f_y_cse = self.nfw_cse.derivatives(x, y, e1=0, e2=0, **kwargs) npt.assert_almost_equal(f_x_cse, f_x_nfw, decimal=5) npt.assert_almost_equal(f_y_cse, f_y_nfw, decimal=5) - f_x_cse_low, f_y_cse_low = self.nfw_cse_low_accuracy.derivatives(x, y, e1=0, e2=0, **kwargs) + f_x_cse_low, f_y_cse_low = self.nfw_cse_low_accuracy.derivatives( + x, y, e1=0, e2=0, **kwargs + ) npt.assert_almost_equal(f_x_cse_low / f_x_nfw, 1, decimal=2) - npt.assert_almost_equal(f_y_cse_low, f_y_nfw, decimal=2) + npt.assert_almost_equal(f_y_cse_low, f_y_nfw, decimal=2) def test_hessian(self): x = np.linspace(0.01, 2, 10) y = np.zeros_like(x) - kwargs = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0} + kwargs = {"alpha_Rs": 0.5, "Rs": 2, "center_x": 0, "center_y": 0} f_xx_nfw, f_xy_nfw, f_yx_nfw, f_yy_nfw = self.nfw.hessian(x, y, **kwargs) - f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.nfw_cse.hessian(x, y, e1=0, e2=0, **kwargs) + f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.nfw_cse.hessian( + x, y, e1=0, e2=0, **kwargs + ) npt.assert_almost_equal(f_xx_cse, f_xx_nfw, decimal=5) npt.assert_almost_equal(f_xy_cse, f_xy_nfw, decimal=5) npt.assert_almost_equal(f_yx_cse, f_yx_nfw, decimal=5) npt.assert_almost_equal(f_yy_cse, f_yy_nfw, decimal=5) - f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.nfw_cse_low_accuracy.hessian(x, y, e1=0, e2=0, **kwargs) + f_xx_cse, f_xy_cse, f_yx_cse, f_yy_cse = self.nfw_cse_low_accuracy.hessian( + x, y, e1=0, e2=0, **kwargs + ) npt.assert_almost_equal(f_xx_cse / f_xx_nfw, 1, decimal=1) npt.assert_almost_equal(f_xy_cse, f_xy_nfw, decimal=5) npt.assert_almost_equal(f_yx_cse, f_yx_nfw, decimal=5) @@ -70,23 +75,38 @@ def test_mass_3d_lens(self): npt.assert_almost_equal(m_3d_nfw, m_3d_cse, decimal=8) def test_ellipticity(self): - """ - test the definition of the ellipticity normalization (along major axis or product averaged axes) - """ + """Test the definition of the ellipticity normalization (along major axis or + product averaged axes)""" x, y = np.linspace(start=0.001, stop=10, num=100), np.zeros(100) - kwargs_round = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0} - kwargs = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0, 'e1': 0.3, 'e2': 0} + kwargs_round = { + "alpha_Rs": 0.5, + "Rs": 2, + "center_x": 0, + "center_y": 0, + "e1": 0, + "e2": 0, + } + kwargs = { + "alpha_Rs": 0.5, + "Rs": 2, + "center_x": 0, + "center_y": 0, + "e1": 0.3, + "e2": 0, + } f_xx, f_xy, f_yx, f_yy = self.nfw_cse.hessian(x, y, **kwargs_round) - kappa_round = 1. / 2 * (f_xx + f_yy) + kappa_round = 1.0 / 2 * (f_xx + f_yy) f_xx, f_xy, f_yx, f_yy = self.nfw_cse.hessian(x, y, **kwargs) - kappa_major = 1. / 2 * (f_xx + f_yy) + kappa_major = 1.0 / 2 * (f_xx + f_yy) f_xx, f_xy, f_yx, f_yy = self.nfw_cse.hessian(y, x, **kwargs) - kappa_minor = 1. / 2 * (f_xx + f_yy) + kappa_minor = 1.0 / 2 * (f_xx + f_yy) - npt.assert_almost_equal(np.sqrt(kappa_minor * kappa_major),kappa_round, decimal=2) + npt.assert_almost_equal( + np.sqrt(kappa_minor * kappa_major), kappa_round, decimal=2 + ) # import matplotlib.pyplot as plt # plt.plot(x, kappa_round/kappa_round, ':', label='round', alpha=0.5) @@ -96,18 +116,32 @@ def test_ellipticity(self): # plt.plot(x, np.sqrt(kappa_minor**2 + kappa_major**2) / kappa_round / 2, '--', label='square', alpha=0.5) # plt.legend() # plt.show() + def test_einstein_rad(self): - """ - test that the Einstein radius doesn't change significantly with ellipticity - """ - kwargs_round = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0} - kwargs = {'alpha_Rs': 0.5, 'Rs': 2, 'center_x': 0, 'center_y': 0, 'e1': 0.3, 'e2': 0} - LensMod = LensModel(['NFW_ELLIPSE_CSE']) + """Test that the Einstein radius doesn't change significantly with + ellipticity.""" + kwargs_round = { + "alpha_Rs": 0.5, + "Rs": 2, + "center_x": 0, + "center_y": 0, + "e1": 0, + "e2": 0, + } + kwargs = { + "alpha_Rs": 0.5, + "Rs": 2, + "center_x": 0, + "center_y": 0, + "e1": 0.3, + "e2": 0, + } + LensMod = LensModel(["NFW_ELLIPSE_CSE"]) LensAn = LensProfileAnalysis(LensMod) r_Ein_round = LensAn.effective_einstein_radius([kwargs_round]) r_Ein_ell = LensAn.effective_einstein_radius([kwargs]) npt.assert_almost_equal(r_Ein_round, r_Ein_ell, decimal=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw_mass_concentration.py b/test/test_LensModel/test_Profiles/test_nfw_mass_concentration.py index 068c79c76..6988dacc1 100644 --- a/test/test_LensModel/test_Profiles/test_nfw_mass_concentration.py +++ b/test/test_LensModel/test_Profiles/test_nfw_mass_concentration.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.nfw import NFW @@ -10,19 +10,21 @@ class TestNFWMC(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.z_lens, self.z_source = 0.5, 2 from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.nfw = NFW() self.nfwmc = NFWMC(z_source=self.z_source, z_lens=self.z_lens, cosmo=cosmo) - self.lensCosmo = LensCosmo(z_lens=self.z_lens, z_source=self.z_source, cosmo=cosmo) + self.lensCosmo = LensCosmo( + z_lens=self.z_lens, z_source=self.z_source, cosmo=cosmo + ) def test_function(self): - x, y = 1., 1. + x, y = 1.0, 1.0 logM = 12 concentration = 10 f_mc = self.nfwmc.function(x, y, logM, concentration, center_x=0, center_y=0) @@ -31,29 +33,35 @@ def test_function(self): npt.assert_almost_equal(f_mc, f_, decimal=8) def test_derivatives(self): - x, y = 1., 1. + x, y = 1.0, 1.0 logM = 12 concentration = 10 - f_x_mc, f_y_mc = self.nfwmc.derivatives(x, y, logM, concentration, center_x=0, center_y=0) - Rs, alpha_Rs = self.lensCosmo.nfw_physical2angle(10 ** logM, concentration) + f_x_mc, f_y_mc = self.nfwmc.derivatives( + x, y, logM, concentration, center_x=0, center_y=0 + ) + Rs, alpha_Rs = self.lensCosmo.nfw_physical2angle(10**logM, concentration) f_x, f_y = self.nfw.derivatives(x, y, Rs, alpha_Rs, center_x=0, center_y=0) npt.assert_almost_equal(f_x_mc, f_x, decimal=8) npt.assert_almost_equal(f_y_mc, f_y, decimal=8) def test_hessian(self): - x, y = 1., 1. + x, y = 1.0, 1.0 logM = 12 concentration = 10 - f_xx_mc, f_xy_mc, f_yx_mc, f_yy_mc = self.nfwmc.hessian(x, y, logM, concentration, center_x=0, center_y=0) - Rs, alpha_Rs = self.lensCosmo.nfw_physical2angle(10 ** logM, concentration) - f_xx, f_xy, f_yx, f_yy = self.nfw.hessian(x, y, Rs, alpha_Rs, center_x=0, center_y=0) + f_xx_mc, f_xy_mc, f_yx_mc, f_yy_mc = self.nfwmc.hessian( + x, y, logM, concentration, center_x=0, center_y=0 + ) + Rs, alpha_Rs = self.lensCosmo.nfw_physical2angle(10**logM, concentration) + f_xx, f_xy, f_yx, f_yy = self.nfw.hessian( + x, y, Rs, alpha_Rs, center_x=0, center_y=0 + ) npt.assert_almost_equal(f_xx_mc, f_xx, decimal=8) npt.assert_almost_equal(f_yy_mc, f_yy, decimal=8) npt.assert_almost_equal(f_xy_mc, f_xy, decimal=8) npt.assert_almost_equal(f_yx_mc, f_yx, decimal=8) def test_static(self): - x, y = 1., 1. + x, y = 1.0, 1.0 logM = 12 concentration = 10 f_ = self.nfwmc.function(x, y, logM, concentration, center_x=0, center_y=0) @@ -65,5 +73,5 @@ def test_static(self): assert f_dyn != f_static -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nfw_vir_trunc.py b/test/test_LensModel/test_Profiles/test_nfw_vir_trunc.py index 653aa132c..41f91cd77 100644 --- a/test/test_LensModel/test_Profiles/test_nfw_vir_trunc.py +++ b/test/test_LensModel/test_Profiles/test_nfw_vir_trunc.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.nfw_vir_trunc import NFWVirTrunc @@ -11,50 +11,49 @@ class TestNFW(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): z_lens = 0.55 z_source = 2.5 from astropy.cosmology import FlatLambdaCDM + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05) self.nfw = NFWVirTrunc(z_lens=z_lens, z_source=z_source, cosmo=cosmo) self.lensCosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo) NFWVirTrunc(z_lens=z_lens, z_source=z_source, cosmo=None) def test_G(self): - - c = 10. + c = 10.0 num = 1000 l = 2 * c r = np.linspace(0, l, 1000) out = self.nfw._G(c, c=c) assert out == 0 out = self.nfw._G(x=1, c=c) - npt.assert_almost_equal(out, 0.32892146681210577 , decimal=6) + npt.assert_almost_equal(out, 0.32892146681210577, decimal=6) out = self.nfw._G(x=2, c=c) npt.assert_almost_equal(out, 0.12735521436564, decimal=6) - out = self.nfw._G(x=c+1, c=c) + out = self.nfw._G(x=c + 1, c=c) npt.assert_almost_equal(out, 0, decimal=6) kappa = self.nfw._G(r, c=c) * r * np.pi * 2 - kappa_int = np.sum(kappa) / num * l/c - f = self.nfw._f(c)# / self.nfw._f(c=1) + kappa_int = np.sum(kappa) / num * l / c + f = self.nfw._f(c) # / self.nfw._f(c=1) - #import matplotlib.pyplot as plt - #plt.plot(r, kappa) - #plt.show() - #npt.assert_almost_equal(kappa_int, 1, decimal=1) - #assert 1 == 0 + # import matplotlib.pyplot as plt + # plt.plot(r, kappa) + # plt.show() + # npt.assert_almost_equal(kappa_int, 1, decimal=1) + # assert 1 == 0 def test_kappa(self): - c = 1. - logM = 13. - M = 10 ** logM + c = 1.0 + logM = 13.0 + M = 10**logM theta_vir = self.nfw._lens_cosmo.nfw_M_theta_r200(M) - print(theta_vir, 'test theta_vir') - print(theta_vir/c, 'theta_Rs') + print(theta_vir, "test theta_vir") + print(theta_vir / c, "theta_Rs") num = 1000 theta = np.linspace(0, theta_vir, num) @@ -62,21 +61,22 @@ def test_kappa(self): kappa = self.nfw.kappa(theta, logM=logM, c=c) * theta * np.pi * 2 * d_theta f = self.nfw._f(c) - print(f, 'f') + print(f, "f") kappa_int = np.sum(kappa) mass = kappa_int * self.lensCosmo.sigma_crit_angle - npt.assert_almost_equal(mass/M, 1, decimal=2) + npt.assert_almost_equal(mass / M, 1, decimal=2) def test_radial_profile(self): r = np.logspace(start=-2, stop=2, num=100) c = 10 - logM = 13. - #kappa = self.nfw.kappa(r, logM=logM, c=c) + logM = 13.0 + # kappa = self.nfw.kappa(r, logM=logM, c=c) import matplotlib.pyplot as plt - #plt.loglog(r, kappa) - #plt.show() - #assert 1 == 0 + + # plt.loglog(r, kappa) + # plt.show() + # assert 1 == 0 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_nie.py b/test/test_LensModel/test_Profiles/test_nie.py index 2885710ce..a347564fc 100644 --- a/test/test_LensModel/test_Profiles/test_nie.py +++ b/test/test_LensModel/test_Profiles/test_nie.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -12,26 +12,27 @@ try: import fastell4py + bool_test = True except: bool_test = False - print("Warning: fastell4py not available, tests will not crosscheck with fastell4py on your machine") + print( + "Warning: fastell4py not available, tests will not crosscheck with fastell4py on your machine" + ) class TestNIE(object): - """ - tests the Gaussian methods - """ - def setup_method(self): + """Tests the Gaussian methods.""" + def setup_method(self): self.nie = NIE() self.spemd = SPEMD(suppress_fastell=True) self.sis = SIS() def test_function(self): - y = np.array([1., 2]) - x = np.array([0., 0.]) - theta_E = 1. + y = np.array([1.0, 2]) + x = np.array([0.0, 0.0]) + theta_E = 1.0 q = 0.9999 s = 0.00001 phi_G = 0 @@ -49,7 +50,7 @@ def test_function(self): e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.nie.function(x, y, theta_E, e1, e2, s_scale=s) delta_pot = values[1] - values[0] - gamma = 2. + gamma = 2.0 values_spemd = self.spemd.function(x, y, theta_E, gamma, e1, e2, s_scale=s) delta_pot_spemd = values_spemd[1] - values_spemd[0] npt.assert_almost_equal(delta_pot, delta_pot_spemd, decimal=2) @@ -57,7 +58,7 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - theta_E = 1. + theta_E = 1.0 q = 0.99999 phi_G = 0 s = 0.0000001 @@ -72,17 +73,19 @@ def test_derivatives(self): phi_G = 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.nie.derivatives(x, y, theta_E, e1, e2, s_scale=s) - gamma = 2. - f_x_spemd, f_y_spemd = self.spemd.derivatives(x, y, theta_E, gamma, e1, e2, s_scale=s) - print(f_x/f_x_spemd, 'ratio deflections') - print(1+(1-q)/2) + gamma = 2.0 + f_x_spemd, f_y_spemd = self.spemd.derivatives( + x, y, theta_E, gamma, e1, e2, s_scale=s + ) + print(f_x / f_x_spemd, "ratio deflections") + print(1 + (1 - q) / 2) npt.assert_almost_equal(f_x, f_x_spemd, decimal=2) npt.assert_almost_equal(f_y, f_y_spemd, decimal=2) def test_hessian(self): x = np.array([1]) y = np.array([2]) - theta_E = 1. + theta_E = 1.0 q = 0.999999 phi_G = 0 s = 0.0000001 @@ -96,44 +99,43 @@ def test_hessian(self): def test_convergence2surface_brightness(self): from lenstronomy.LightModel.Profiles.nie import NIE as NIE_Light + nie_light = NIE_Light() - kwargs = {'e1': 0.3, 'e2': -0.05, 's_scale': 0.5} + kwargs = {"e1": 0.3, "e2": -0.05, "s_scale": 0.5} x, y = util.make_grid(numPix=10, deltapix=0.1) f_xx, f_xy, f_yx, f_yy = self.nie.hessian(x, y, theta_E=1, **kwargs) - kappa = 1/2. * (f_xx + f_yy) + kappa = 1 / 2.0 * (f_xx + f_yy) flux = nie_light.function(x, y, amp=1, **kwargs) - npt.assert_almost_equal(kappa/np.sum(kappa), flux/np.sum(flux), decimal=5) + npt.assert_almost_equal(kappa / np.sum(kappa), flux / np.sum(flux), decimal=5) def test_static(self): - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'theta_E': 1., 's_scale': .1, 'e1': e1, 'e2': e2} + kwargs_lens = {"theta_E": 1.0, "s_scale": 0.1, "e1": e1, "e2": e2} f_ = self.nie.function(x, y, **kwargs_lens) self.nie.set_static(**kwargs_lens) f_static = self.nie.function(x, y, **kwargs_lens) npt.assert_almost_equal(f_, f_static, decimal=8) self.nie.set_dynamic() - kwargs_lens = {'theta_E': 2., 's_scale': .1, 'e1': e1, 'e2': e2} + kwargs_lens = {"theta_E": 2.0, "s_scale": 0.1, "e1": e1, "e2": e2} f_dyn = self.nie.function(x, y, **kwargs_lens) assert f_dyn != f_static class TestNIEMajorAxis(object): - def setup_method(self): pass def test_kappa(self): nie = NIEMajorAxis() x, y = util.make_grid(numPix=10, deltapix=0.1) - kwargs = {'b': 1, 's': 0.2, 'q': 0.3} + kwargs = {"b": 1, "s": 0.2, "q": 0.3} f_xx, f_xy, f_yx, f_yy = nie.hessian(x, y, **kwargs) - kappa_num = 1./2 * (f_xx + f_yy) + kappa_num = 1.0 / 2 * (f_xx + f_yy) kappa = nie.kappa(x, y, **kwargs) npt.assert_almost_equal(kappa_num, kappa, decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() - diff --git a/test/test_LensModel/test_Profiles/test_nie_potential.py b/test/test_LensModel/test_Profiles/test_nie_potential.py index 243cff18a..c68aad7df 100644 --- a/test/test_LensModel/test_Profiles/test_nie_potential.py +++ b/test/test_LensModel/test_Profiles/test_nie_potential.py @@ -1,4 +1,4 @@ -__author__ = 'gipagano' +__author__ = "gipagano" import numpy as np @@ -11,249 +11,264 @@ class TestNIE_POTENTIAL(object): - """ - tests the NIE_POTENTIAL profile for different rotations - """ - def setup_method(self): + """Tests the NIE_POTENTIAL profile for different rotations.""" + def setup_method(self): self.nie_potential = NIE_POTENTIAL() self.spep = SPEP() def test_function(self): - y = np.array([1., 2]) - x = np.array([0., 0.]) - - theta_E = 1. - theta_c = 0. - + y = np.array([1.0, 2]) + x = np.array([0.0, 0.0]) + + theta_E = 1.0 + theta_c = 0.0 + ############# - # no rotation + # no rotation ############# - - e1, e2 = 0.05, 0.0 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.0 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 # compare the non-rotated output - values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) - delta_pot = values[1] - values[0] - values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) + delta_pot = values[1] - values[0] + values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) delta_pot_spep = values[1] - values[0] npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4) - + ############ # rotation 1 ############ - - e1, e2 = 0.05, 0.1 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.1 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 # compare the rotated output - values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) - delta_pot = values[1] - values[0] - values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) + delta_pot = values[1] - values[0] + values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) delta_pot_spep = values[1] - values[0] npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4) - + ############ # rotation 2 ############ - - e1, e2 = 0.15, 0.13 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.15, 0.13 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 # compare the rotated output - values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) - delta_pot = values[1] - values[0] - values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + values = self.nie_potential.function(x, y, theta_E, theta_c, e1, e2) + delta_pot = values[1] - values[0] + values = self.spep.function(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) delta_pot_spep = values[1] - values[0] npt.assert_almost_equal(delta_pot, delta_pot_spep, decimal=4) def test_derivatives(self): x = np.array([1]) y = np.array([2]) - - theta_E = 1. - theta_c = 0. - + + theta_E = 1.0 + theta_c = 0.0 + ############# - # no rotation + # no rotation ############# - - e1, e2 = 0.05, 0.0 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.0 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the non-rotated output - f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) - f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) + f_x_nie, f_y_nie = self.spep.derivatives( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_x, f_x_nie, decimal=4) npt.assert_almost_equal(f_y, f_y_nie, decimal=4) - + ############ # rotation 1 ############ - - e1, e2 = 0.05, 0.1 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.1 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the rotated output - f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) - f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) + f_x_nie, f_y_nie = self.spep.derivatives( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_x, f_x_nie, decimal=4) npt.assert_almost_equal(f_y, f_y_nie, decimal=4) - + ############ # rotation 2 ############ - - e1, e2 = 0.15, 0.13 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.15, 0.13 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the rotated output - f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) - f_x_nie, f_y_nie = self.spep.derivatives(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_x, f_y = self.nie_potential.derivatives(x, y, theta_E, theta_c, e1, e2) + f_x_nie, f_y_nie = self.spep.derivatives( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_x, f_x_nie, decimal=4) npt.assert_almost_equal(f_y, f_y_nie, decimal=4) def test_hessian(self): x = np.array([1]) y = np.array([2]) - - theta_E = 1. - theta_c = 0. - + + theta_E = 1.0 + theta_c = 0.0 + ############# - # no rotation + # no rotation ############# - - e1, e2 = 0.05, 0.0 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.0 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the non-rotated output - f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2) - f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian( + x, y, theta_E, theta_c, e1, e2 + ) + f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4) npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4) npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4) npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4) - + ############ # rotation 1 ############ - - e1, e2 = 0.05, 0.1 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.05, 0.1 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the rotated output - f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2) - f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian( + x, y, theta_E, theta_c, e1, e2 + ) + f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4) npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4) npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4) npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4) - + ############ # rotation 2 ############ - - e1, e2 = 0.15, 0.13 - eps = np.sqrt(e1**2+e2**2) + + e1, e2 = 0.15, 0.13 + eps = np.sqrt(e1**2 + e2**2) phi_G, q = param_util.ellipticity2phi_q(e1, e2) - + # map the nie_potential input to the spep input - gamma_spep = 2. - q_spep = np.sqrt(q) + gamma_spep = 2.0 + q_spep = np.sqrt(q) e1_spep, e2_spep = param_util.phi_q2_ellipticity(phi_G, q_spep) - theta_E_conv = self.nie_potential._theta_q_convert(theta_E ,q) - theta_E_spep = theta_E_conv*np.sqrt(1-eps)/((1-eps)/(1+eps))**0.25 - + theta_E_conv = self.nie_potential._theta_q_convert(theta_E, q) + theta_E_spep = theta_E_conv * np.sqrt(1 - eps) / ((1 - eps) / (1 + eps)) ** 0.25 + # compare the rotated output - f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian(x, y, theta_E, theta_c, e1, e2) - f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian(x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep) + f_xx, f_xy, f_yx, f_yy = self.nie_potential.hessian( + x, y, theta_E, theta_c, e1, e2 + ) + f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.spep.hessian( + x, y, theta_E_spep, gamma_spep, e1_spep, e2_spep + ) npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4) npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4) npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4) npt.assert_almost_equal(f_yx, f_yx_nie, decimal=4) def test_static(self): - x, y = 1., 1. + x, y = 1.0, 1.0 phi_G, q = 0.3, 0.8 - - e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = {'theta_E': 1., 'theta_c': .1, 'e1': e1, 'e2': e2} - f_ = self.nie_potential.function(x, y, **kwargs_lens) + + e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) + kwargs_lens = {"theta_E": 1.0, "theta_c": 0.1, "e1": e1, "e2": e2} + f_ = self.nie_potential.function(x, y, **kwargs_lens) self.nie_potential.set_static(**kwargs_lens) - f_static = self.nie_potential.function(x, y, **kwargs_lens) + f_static = self.nie_potential.function(x, y, **kwargs_lens) npt.assert_almost_equal(f_, f_static, decimal=8) self.nie_potential.set_dynamic() - kwargs_lens = {'theta_E': 2., 'theta_c': .1, 'e1': e1, 'e2': e2} - f_dyn = self.nie_potential.function(x, y, **kwargs_lens) + kwargs_lens = {"theta_E": 2.0, "theta_c": 0.1, "e1": e1, "e2": e2} + f_dyn = self.nie_potential.function(x, y, **kwargs_lens) assert f_dyn != f_static -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() - diff --git a/test/test_LensModel/test_Profiles/test_numerical_deflections.py b/test/test_LensModel/test_Profiles/test_numerical_deflections.py index 515115b82..c3b57942c 100644 --- a/test/test_LensModel/test_Profiles/test_numerical_deflections.py +++ b/test/test_LensModel/test_Profiles/test_numerical_deflections.py @@ -1,4 +1,4 @@ -__author__ = 'dgilman' +__author__ = "dgilman" from lenstronomy.LensModel.Profiles.numerical_deflections import TabulatedDeflections @@ -10,13 +10,10 @@ import pytest -class TestClass(): +class TestClass: + def __call__(self, x, y, Rs, norm, center_x=0, center_y=0): + """NFW profile :param x: - def __call__(self, x, y, Rs, norm, center_x = 0, center_y = 0): - - """ - NFW profile - :param x: :param y: :param kwargs: :return: @@ -27,19 +24,18 @@ def __call__(self, x, y, Rs, norm, center_x = 0, center_y = 0): _x = x - center_x _y = y - center_y - R = np.sqrt(_x ** 2 + _y ** 2) - x = R * Rs ** -1 + R = np.sqrt(_x**2 + _y**2) + x = R * Rs**-1 - a = 4 * rho0 * Rs * R * self._g(x) / x ** 2 + a = 4 * rho0 * Rs * R * self._g(x) / x**2 - cos = _x/R - sin = _y/R + cos = _x / R + sin = _y / R return a * cos, a * sin def _g(self, X): - """ - - analytic solution of integral for NFW profile to compute deflection angel and gamma + """Analytic solution of integral for NFW profile to compute deflection angel and + gamma. :param x: R/Rs :type x: float >0 @@ -48,49 +44,42 @@ def _g(self, X): if isinstance(X, int) or isinstance(X, float): if X < 1: x = max(c, X) - a = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x) + a = np.log(x / 2.0) + 1 / np.sqrt(1 - x**2) * np.arccosh(1.0 / x) elif X == 1: - a = 1 + np.log(1./2.) + a = 1 + np.log(1.0 / 2.0) else: # X > 1: - a = np.log(X/2) + 1/np.sqrt(X**2-1)*np.arccos(1./X) + a = np.log(X / 2) + 1 / np.sqrt(X**2 - 1) * np.arccos(1.0 / X) else: a = np.empty_like(X) X[X <= c] = c x = X[X < 1] - a[X < 1] = np.log(x/2.) + 1/np.sqrt(1-x**2)*np.arccosh(1./x) - a[X == 1] = 1 + np.log(1./2.) + a[X < 1] = np.log(x / 2.0) + 1 / np.sqrt(1 - x**2) * np.arccosh(1.0 / x) + a[X == 1] = 1 + np.log(1.0 / 2.0) x = X[X > 1] - a[X > 1] = np.log(x/2) + 1/np.sqrt(x**2-1)*np.arccos(1./x) + a[X > 1] = np.log(x / 2) + 1 / np.sqrt(x**2 - 1) * np.arccos(1.0 / x) return a def _alpha2rho0(self, alpha_Rs, Rs): + """Convert angle at Rs into rho0.""" - """ - convert angle at Rs into rho0 - """ - - rho0 = alpha_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.))) + rho0 = alpha_Rs / (4.0 * Rs**2 * (1.0 + np.log(1.0 / 2.0))) return rho0 class TestNumericalAlpha(object): - """ - tests the Gaussian methods - """ - def setup_method(self): + """Tests the Gaussian methods.""" + def setup_method(self): self.numerical_alpha = TabulatedDeflections(custom_class=TestClass()) self.nfw = NFW() def test_no_potential(self): - - npt.assert_raises(Exception, self.numerical_alpha.function, 0., 0., 0., 0.) + npt.assert_raises(Exception, self.numerical_alpha.function, 0.0, 0.0, 0.0, 0.0) def test_derivatives(self): - - Rs = 10. + Rs = 10.0 alpha_Rs = 10 x = np.linspace(Rs, Rs, 1000) y = np.linspace(0.2 * Rs, 2 * Rs, 1000) @@ -102,16 +91,52 @@ def test_derivatives(self): numerical_alpha_class = TestClass() for i, flag in enumerate([False, True]): - lensmodel = LensModel(lens_model_list=['TABULATED_DEFLECTIONS', 'NFW'], z_source=1.5, z_lens=0.5, lens_redshift_list=zlist[i], - multi_plane=flag, numerical_alpha_class=numerical_alpha_class) - - lensmodel_nfw = LensModel(lens_model_list=['NFW', 'NFW'], z_source=1.5, z_lens=0.5, lens_redshift_list=zlist[i], - multi_plane=flag, numerical_alpha_class=numerical_alpha_class) - - keywords_num = [{'norm': alpha_Rs, 'Rs': Rs, 'center_x': center_x, 'center_y': center_y}, - {'alpha_Rs': 0.7*alpha_Rs, 'Rs': 2*Rs, 'center_x': center_x, 'center_y': center_y}] - keywords_nfw = [{'alpha_Rs': alpha_Rs, 'Rs': Rs, 'center_x': center_x, 'center_y': center_y}, - {'alpha_Rs': 0.7 * alpha_Rs, 'Rs': 2 * Rs, 'center_x': center_x, 'center_y': center_y}] + lensmodel = LensModel( + lens_model_list=["TABULATED_DEFLECTIONS", "NFW"], + z_source=1.5, + z_lens=0.5, + lens_redshift_list=zlist[i], + multi_plane=flag, + numerical_alpha_class=numerical_alpha_class, + ) + + lensmodel_nfw = LensModel( + lens_model_list=["NFW", "NFW"], + z_source=1.5, + z_lens=0.5, + lens_redshift_list=zlist[i], + multi_plane=flag, + numerical_alpha_class=numerical_alpha_class, + ) + + keywords_num = [ + { + "norm": alpha_Rs, + "Rs": Rs, + "center_x": center_x, + "center_y": center_y, + }, + { + "alpha_Rs": 0.7 * alpha_Rs, + "Rs": 2 * Rs, + "center_x": center_x, + "center_y": center_y, + }, + ] + keywords_nfw = [ + { + "alpha_Rs": alpha_Rs, + "Rs": Rs, + "center_x": center_x, + "center_y": center_y, + }, + { + "alpha_Rs": 0.7 * alpha_Rs, + "Rs": 2 * Rs, + "center_x": center_x, + "center_y": center_y, + }, + ] dx, dy = lensmodel.alpha(x, y, keywords_num) dxnfw, dynfw = lensmodel_nfw.alpha(x, y, keywords_nfw) @@ -119,8 +144,7 @@ def test_derivatives(self): npt.assert_almost_equal(dy, dynfw) def test_hessian(self): - - Rs = 10. + Rs = 10.0 alpha_Rs = 2 x = np.linspace(Rs, Rs, 1000) y = np.linspace(0.2 * Rs, 2 * Rs, 1000) @@ -132,26 +156,60 @@ def test_hessian(self): numerical_alpha_class = TestClass() for i, flag in enumerate([False, True]): - lensmodel = LensModel(lens_model_list=['TABULATED_DEFLECTIONS', 'NFW'], z_source=1.5, z_lens=0.5, - lens_redshift_list=zlist[i], - multi_plane=flag, numerical_alpha_class=numerical_alpha_class) - - lensmodel_nfw = LensModel(lens_model_list=['NFW', 'NFW'], z_source=1.5, z_lens=0.5, - lens_redshift_list=zlist[i], - multi_plane=flag, numerical_alpha_class=numerical_alpha_class) - - keywords_num = [{'norm': alpha_Rs, 'Rs': Rs, 'center_x': center_x, 'center_y': center_y}, - {'alpha_Rs': 0.7 * alpha_Rs, 'Rs': 2 * Rs, 'center_x': center_x, 'center_y': center_y}] - keywords_nfw = [{'alpha_Rs': alpha_Rs, 'Rs': Rs, 'center_x': center_x, 'center_y': center_y}, - {'alpha_Rs': 0.7 * alpha_Rs, 'Rs': 2 * Rs, 'center_x': center_x, 'center_y': center_y}] + lensmodel = LensModel( + lens_model_list=["TABULATED_DEFLECTIONS", "NFW"], + z_source=1.5, + z_lens=0.5, + lens_redshift_list=zlist[i], + multi_plane=flag, + numerical_alpha_class=numerical_alpha_class, + ) + + lensmodel_nfw = LensModel( + lens_model_list=["NFW", "NFW"], + z_source=1.5, + z_lens=0.5, + lens_redshift_list=zlist[i], + multi_plane=flag, + numerical_alpha_class=numerical_alpha_class, + ) + + keywords_num = [ + { + "norm": alpha_Rs, + "Rs": Rs, + "center_x": center_x, + "center_y": center_y, + }, + { + "alpha_Rs": 0.7 * alpha_Rs, + "Rs": 2 * Rs, + "center_x": center_x, + "center_y": center_y, + }, + ] + keywords_nfw = [ + { + "alpha_Rs": alpha_Rs, + "Rs": Rs, + "center_x": center_x, + "center_y": center_y, + }, + { + "alpha_Rs": 0.7 * alpha_Rs, + "Rs": 2 * Rs, + "center_x": center_x, + "center_y": center_y, + }, + ] hess_num = lensmodel.hessian(x, y, keywords_num) hess_nfw = lensmodel_nfw.hessian(x, y, keywords_nfw) - for (hn, hnfw) in zip(hess_num, hess_nfw): - diff = hn * hnfw ** -1 + for hn, hnfw in zip(hess_num, hess_nfw): + diff = hn * hnfw**-1 L = len(diff) npt.assert_almost_equal(np.sum(diff) * L**-1, 1, 6) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_p_jaffe.py b/test/test_LensModel/test_Profiles/test_p_jaffe.py index 82e6a3ef8..bedeaea95 100644 --- a/test/test_LensModel/test_Profiles/test_p_jaffe.py +++ b/test/test_LensModel/test_Profiles/test_p_jaffe.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe @@ -8,29 +8,28 @@ class TestP_JAFFW(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.profile = PJaffe() def test_function(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 values = self.profile.function(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(values[0], 0.87301557036070054, decimal=8) x = np.array([0]) y = np.array([0]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 values = self.profile.function(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(values[0], 0.20267440905756931, decimal=8) x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) - values = self.profile.function( x, y, sigma0, Ra, Rs) + values = self.profile.function(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(values[0], 0.87301557036070054, decimal=8) npt.assert_almost_equal(values[1], 1.0842781309377669, decimal=8) npt.assert_almost_equal(values[2], 1.2588604178849985, decimal=8) @@ -38,19 +37,19 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 - f_x, f_y = self.profile.derivatives( x, y, sigma0, Ra, Rs) + f_x, f_y = self.profile.derivatives(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(f_x[0], 0.11542369603751264, decimal=8) npt.assert_almost_equal(f_y[0], 0.23084739207502528, decimal=8) x = np.array([0]) y = np.array([0]) - f_x, f_y = self.profile.derivatives( x, y, sigma0, Ra, Rs) + f_x, f_y = self.profile.derivatives(x, y, sigma0, Ra, Rs) assert f_x[0] == 0 assert f_y[0] == 0 - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.profile.derivatives(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(values[0][0], 0.11542369603751264, decimal=8) npt.assert_almost_equal(values[1][0], 0.23084739207502528, decimal=8) @@ -60,21 +59,21 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 f_xx, f_xy, f_yx, f_yy = self.profile.hessian(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(f_xx[0], 0.077446121589827679, decimal=8) npt.assert_almost_equal(f_yy[0], -0.036486601753227141, decimal=8) npt.assert_almost_equal(f_xy[0], -0.075955148895369876, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.profile.hessian(x, y, sigma0, Ra, Rs) npt.assert_almost_equal(values[0][0], 0.077446121589827679, decimal=8) npt.assert_almost_equal(values[3][0], -0.036486601753227141, decimal=8) npt.assert_almost_equal(values[1][0], values[2][0], decimal=8) def test_mass_tot(self): - rho0 = 1. + rho0 = 1.0 Ra, Rs = 0.5, 0.8 values = self.profile.mass_tot(rho0, Ra, Rs) npt.assert_almost_equal(values, 2.429441083345073, decimal=10) @@ -86,12 +85,12 @@ def test_mass_3d_lens(self): def test_grav_pot(self): x = 1 y = 2 - rho0 = 1. + rho0 = 1.0 r = np.sqrt(x**2 + y**2) Ra, Rs = 0.5, 0.8 grav_pot = self.profile.grav_pot(r, rho0, Ra, Rs) npt.assert_almost_equal(grav_pot, 0.89106542283974155, decimal=10) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_p_jaffe_ellipse.py b/test/test_LensModel/test_Profiles/test_p_jaffe_ellipse.py index 6b1dc192b..d7e597139 100644 --- a/test/test_LensModel/test_Profiles/test_p_jaffe_ellipse.py +++ b/test/test_LensModel/test_Profiles/test_p_jaffe_ellipse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.p_jaffe_ellipse import PJaffe_Ellipse @@ -8,31 +8,37 @@ import numpy.testing as npt import pytest + class TestP_JAFFW(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.profile = PJaffe_Ellipse() def test_function(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 q, phi_G = 0.8, 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - values = self.profile.function(x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + values = self.profile.function( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(values[0], 0.9091040398607811, decimal=8) x = np.array([0]) y = np.array([0]) - values = self.profile.function(x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + values = self.profile.function( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(values[0], 0.20267440905756931, decimal=8) x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) - values = self.profile.function( x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + values = self.profile.function( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(values[0], 0.8327830942970774, decimal=8) npt.assert_almost_equal(values[1], 1.0233085474140422, decimal=8) npt.assert_almost_equal(values[2], 1.1868752663038047, decimal=8) @@ -40,22 +46,28 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 q, phi_G = 0.8, 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - f_x, f_y = self.profile.derivatives( x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + f_x, f_y = self.profile.derivatives( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(f_x[0], 0.08130928181117723, decimal=8) npt.assert_almost_equal(f_y[0], 0.25409150565992883, decimal=8) x = np.array([0]) y = np.array([0]) - f_x, f_y = self.profile.derivatives( x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + f_x, f_y = self.profile.derivatives( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) assert f_x[0] == 0 assert f_y[0] == 0 - x = np.array([1,3,4]) - y = np.array([2,1,1]) - values = self.profile.derivatives(x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + values = self.profile.derivatives( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(values[0][0], 0.08130928181117723, decimal=8) npt.assert_almost_equal(values[1][0], 0.25409150565992883, decimal=8) npt.assert_almost_equal(values[0][1], 0.17711143165920576, decimal=8) @@ -64,18 +76,22 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - sigma0 = 1. + sigma0 = 1.0 Ra, Rs = 0.5, 0.8 q, phi_G = 0.8, 0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - f_xx, f_xy, f_yx, f_yy = self.profile.hessian(x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + f_xx, f_xy, f_yx, f_yy = self.profile.hessian( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(f_xx[0], 0.06259391932550429, decimal=8) npt.assert_almost_equal(f_yy[0], -0.05572123112917993, decimal=8) npt.assert_almost_equal(f_xy[0], -0.058485405643460275, decimal=8) npt.assert_almost_equal(f_xy, f_yx, decimal=6) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - values = self.profile.hessian(x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + values = self.profile.hessian( + x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0 + ) npt.assert_almost_equal(values[0][0], 0.06259391932550429, decimal=8) npt.assert_almost_equal(values[3][0], -0.05572123112917993, decimal=8) @@ -84,5 +100,5 @@ def test_mass_3d_lens(self): npt.assert_almost_equal(mass, 0.87077306005349242, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_pemd.py b/test/test_LensModel/test_Profiles/test_pemd.py index 3bf62fce0..4a0ca2e3d 100644 --- a/test/test_LensModel/test_Profiles/test_pemd.py +++ b/test/test_LensModel/test_Profiles/test_pemd.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -8,29 +8,32 @@ try: import fastell4py + fastell4py_bool = True except: - print("Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!") + print( + "Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!" + ) fastell4py_bool = False class TestSPEMD(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): from lenstronomy.LensModel.Profiles.pemd import PEMD from lenstronomy.LensModel.Profiles.spep import SPEP + self.PEMD = PEMD(suppress_fastell=True) self.SPEP = SPEP() def test_function(self): - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1.]) + x = np.array([1.0]) y = np.array([2]) a = np.zeros_like(x) values = self.PEMD.function(x, y, phi_E, gamma, e1, e2) @@ -39,8 +42,8 @@ def test_function(self): else: assert values == 0 a += values - x = np.array(1.) - y = np.array(2.) + x = np.array(1.0) + y = np.array(2.0) a = np.zeros_like(x) values = self.PEMD.function(x, y, phi_E, gamma, e1, e2) print(x, values) @@ -66,10 +69,10 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.PEMD.derivatives(x, y, phi_E, gamma, e1, e2) if fastell4py_bool: @@ -79,8 +82,8 @@ def test_derivatives(self): npt.assert_almost_equal(f_x[0], 0, decimal=7) npt.assert_almost_equal(f_y[0], 0, decimal=7) - x = np.array([1., 3, 4]) - y = np.array([2., 1, 1]) + x = np.array([1.0, 3, 4]) + y = np.array([2.0, 1, 1]) a = np.zeros_like(x) values = self.PEMD.derivatives(x, y, phi_E, gamma, e1, e2) if fastell4py_bool: @@ -94,12 +97,12 @@ def test_derivatives(self): npt.assert_almost_equal(values[0][1], 0, decimal=7) npt.assert_almost_equal(values[1][1], 0, decimal=7) a += values[0] - x = 1. - y = 2. - phi_E = 1. + x = 1.0 + y = 2.0 + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.PEMD.derivatives(x, y, phi_E, gamma, e1, e2) if fastell4py_bool: @@ -108,19 +111,19 @@ def test_derivatives(self): else: npt.assert_almost_equal(f_x, 0, decimal=7) npt.assert_almost_equal(f_y, 0, decimal=7) - x = 0. - y = 0. + x = 0.0 + y = 0.0 f_x, f_y = self.PEMD.derivatives(x, y, phi_E, gamma, e1, e2) - assert f_x == 0. - assert f_y == 0. + assert f_x == 0.0 + assert f_y == 0.0 def test_hessian(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.PEMD.hessian(x, y, phi_E, gamma, e1, e2) if fastell4py_bool: @@ -133,12 +136,12 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, 0, decimal=7) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = 1. - y = 2. - phi_E = 1. + x = 1.0 + y = 2.0 + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) a = np.zeros_like(x) f_xx, f_xy, f_yx, f_yy = self.PEMD.hessian(x, y, phi_E, gamma, e1, e2) @@ -151,10 +154,10 @@ def test_hessian(self): npt.assert_almost_equal(f_yy, 0, decimal=7) npt.assert_almost_equal(f_xy, 0, decimal=7) a += f_xx - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.PEMD.hessian(x, y, phi_E, gamma, e1, e2) - print(values, 'values') + print(values, "values") if fastell4py_bool: npt.assert_almost_equal(values[0][0], 0.41789957732890953, decimal=5) npt.assert_almost_equal(values[3][0], 0.14047593655054141, decimal=5) @@ -168,10 +171,10 @@ def test_hessian(self): def test_spep_spemd(self): x = np.array([1]) y = np.array([0]) - phi_E = 1. - gamma = 2. - q = 1. - phi_G = 1. + phi_E = 1.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.PEMD.derivatives(x, y, phi_E, gamma, e1, e2) f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, phi_E, gamma, e1, e2) @@ -180,10 +183,10 @@ def test_spep_spemd(self): else: pass - theta_E = 2. - gamma = 2. - q = 1. - phi_G = 1. + theta_E = 2.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.PEMD.derivatives(x, y, theta_E, gamma, e1, e2) f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, theta_E, gamma, e1, e2) @@ -192,10 +195,10 @@ def test_spep_spemd(self): else: pass - theta_E = 2. + theta_E = 2.0 gamma = 1.7 - q = 1. - phi_G = 1. + q = 1.0 + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.PEMD.derivatives(x, y, theta_E, gamma, e1, e2) f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, theta_E, gamma, e1, e2) @@ -204,6 +207,7 @@ def test_spep_spemd(self): def test_bounds(self): from lenstronomy.LensModel.Profiles.spemd import SPEMD + profile = SPEMD(suppress_fastell=True) compute_bool = profile._parameter_constraints(q_fastell=-1, gam=-1, s2=-1, q=-1) assert compute_bool is False @@ -220,11 +224,11 @@ def test_is_not_empty(self): def test_density_lens(self): r = 1 - kwargs = {'theta_E': 1, 'gamma': 2, 'e1': 0, 'e2': 0} + kwargs = {"theta_E": 1, "gamma": 2, "e1": 0, "e2": 0} rho = self.PEMD.density_lens(r, **kwargs) rho_spep = self.SPEP.density_lens(r, **kwargs) npt.assert_almost_equal(rho, rho_spep, decimal=7) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_point_mass.py b/test/test_LensModel/test_Profiles/test_point_mass.py index e757a8b11..17b9a3631 100644 --- a/test/test_LensModel/test_Profiles/test_point_mass.py +++ b/test/test_LensModel/test_Profiles/test_point_mass.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.point_mass import PointMass @@ -9,16 +9,15 @@ class TestSIS(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.pointmass = PointMass() def test_function(self): x = np.array([0]) y = np.array([1]) - theta_E = 1. + theta_E = 1.0 values = self.pointmass.function(x, y, theta_E) assert values[0] == 0 x = np.array([0]) @@ -26,9 +25,9 @@ def test_function(self): values = self.pointmass.function(x, y, theta_E) assert values[0] < 0 - x = np.array([1,3,4]) - y = np.array([0,1,1]) - values = self.pointmass.function( x, y, theta_E) + x = np.array([1, 3, 4]) + y = np.array([0, 1, 1]) + values = self.pointmass.function(x, y, theta_E) assert values[0] == 0 assert values[1] == 1.151292546497023 assert values[2] == 1.4166066720281081 @@ -36,7 +35,7 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([0]) - theta_E = 1. + theta_E = 1.0 f_x, f_y = self.pointmass.derivatives(x, y, theta_E) assert f_x[0] == 1 assert f_y[0] == 0 @@ -46,8 +45,8 @@ def test_derivatives(self): assert f_x[0] == 0 assert f_y[0] == 0 - x = np.array([1,3,4]) - y = np.array([0,1,1]) + x = np.array([1, 3, 4]) + y = np.array([0, 1, 1]) values = self.pointmass.derivatives(x, y, theta_E) assert values[0][0] == 1 assert values[1][0] == 0 @@ -57,14 +56,14 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([0]) - theta_E = 1. + theta_E = 1.0 f_xx, f_xy, f_yx, f_yy = self.pointmass.hessian(x, y, theta_E) npt.assert_almost_equal(f_xy, f_yx, decimal=8) assert f_xx[0] == -1 assert f_yy[0] == 1 assert f_xy[0] == -0 - x = np.array([1,3,4]) - y = np.array([0,1,1]) + x = np.array([1, 3, 4]) + y = np.array([0, 1, 1]) values = self.pointmass.hessian(x, y, theta_E) assert values[0][0] == -1 assert values[3][0] == 1 @@ -74,5 +73,5 @@ def test_hessian(self): assert values[1][1] == -0.059999999999999998 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_sersic_lens.py b/test/test_LensModel/test_Profiles/test_sersic_lens.py index 7f3075874..b0badc14c 100644 --- a/test/test_LensModel/test_Profiles/test_sersic_lens.py +++ b/test/test_LensModel/test_Profiles/test_sersic_lens.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.derivative_util as calc_util from lenstronomy.LensModel.Profiles.sersic import Sersic @@ -12,21 +12,18 @@ class TestSersic(object): - """ - tests the Gaussian methods - """ - def setup_method(self): + """Tests the Gaussian methods.""" + def setup_method(self): self.sersic_2 = SersicEllipseKappa() self.sersic = Sersic() self.sersic_light = Sersic_light() def test_function(self): - x = 1 y = 2 - n_sersic = 2. - R_sersic = 1. + n_sersic = 2.0 + R_sersic = 1.0 k_eff = 0.2 values = self.sersic.function(x, y, n_sersic, R_sersic, k_eff) npt.assert_almost_equal(values, 1.0272982586319199, decimal=10) @@ -34,7 +31,7 @@ def test_function(self): x = np.array([0]) y = np.array([0]) values = self.sersic.function(x, y, n_sersic, R_sersic, k_eff) - npt.assert_almost_equal(values[0], 0., decimal=9) + npt.assert_almost_equal(values[0], 0.0, decimal=9) x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) @@ -47,11 +44,13 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - n_sersic = 2. - R_sersic = 1. + n_sersic = 2.0 + R_sersic = 1.0 k_eff = 0.2 f_x, f_y = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff) - f_x2, f_y2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001) + f_x2, f_y2 = self.sersic_2.derivatives( + x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001 + ) npt.assert_almost_equal(f_x[0], 0.16556078301997193, decimal=9) npt.assert_almost_equal(f_y[0], 0.33112156603994386, decimal=9) @@ -61,7 +60,9 @@ def test_derivatives(self): x = np.array([0]) y = np.array([0]) f_x, f_y = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff) - f_x2, f_y2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001) + f_x2, f_y2 = self.sersic_2.derivatives( + x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001 + ) assert f_x[0] == 0 assert f_y[0] == 0 npt.assert_almost_equal(f_x2[0], f_x[0]) @@ -70,7 +71,9 @@ def test_derivatives(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) values = self.sersic.derivatives(x, y, n_sersic, R_sersic, k_eff) - values2 = self.sersic_2.derivatives(x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001) + values2 = self.sersic_2.derivatives( + x, y, n_sersic, R_sersic, k_eff, 0, 0.00000001 + ) npt.assert_almost_equal(values[0][0], 0.16556078301997193, decimal=9) npt.assert_almost_equal(values[1][0], 0.33112156603994386, decimal=9) npt.assert_almost_equal(values[0][1], 0.2772992378623737, decimal=9) @@ -80,28 +83,38 @@ def test_derivatives(self): npt.assert_almost_equal(values2[0][1], values[0][1]) npt.assert_almost_equal(values2[1][1], values[1][1]) - values2 = self.sersic_2.derivatives(0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001) - values = self.sersic.derivatives(0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001) + values2 = self.sersic_2.derivatives( + 0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001 + ) + values = self.sersic.derivatives( + 0.3, -0.2, n_sersic, R_sersic, k_eff, 0, 0.00000001 + ) npt.assert_almost_equal(values2[0], values[0]) npt.assert_almost_equal(values2[1], values[1]) def test_differentails(self): - x_, y_ = 1., 1 - n_sersic = 2. - R_sersic = 1. + x_, y_ = 1.0, 1 + n_sersic = 2.0 + R_sersic = 1.0 k_eff = 0.2 r = np.sqrt(x_**2 + y_**2) d_alpha_dr = self.sersic.d_alpha_dr(x_, y_, n_sersic, R_sersic, k_eff) alpha = self.sersic.alpha_abs(x_, y_, n_sersic, R_sersic, k_eff) - f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_) - f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_) - f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_) - - f_xx = (d_alpha_dr/r - alpha/r**2) * y_**2/r + alpha/r - f_yy = (d_alpha_dr/r - alpha/r**2) * x_**2/r + alpha/r - f_xy = (d_alpha_dr/r - alpha/r**2) * x_*y_/r + f_xx_ = d_alpha_dr * calc_util.d_r_dx( + x_, y_ + ) * x_ / r + alpha * calc_util.d_x_diffr_dx(x_, y_) + f_yy_ = d_alpha_dr * calc_util.d_r_dy( + x_, y_ + ) * y_ / r + alpha * calc_util.d_y_diffr_dy(x_, y_) + f_xy_ = d_alpha_dr * calc_util.d_r_dy( + x_, y_ + ) * x_ / r + alpha * calc_util.d_x_diffr_dy(x_, y_) + + f_xx = (d_alpha_dr / r - alpha / r**2) * y_**2 / r + alpha / r + f_yy = (d_alpha_dr / r - alpha / r**2) * x_**2 / r + alpha / r + f_xy = (d_alpha_dr / r - alpha / r**2) * x_ * y_ / r npt.assert_almost_equal(f_xx, f_xx_, decimal=10) npt.assert_almost_equal(f_yy, f_yy_, decimal=10) npt.assert_almost_equal(f_xy, f_xy_, decimal=10) @@ -109,8 +122,8 @@ def test_differentails(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - n_sersic = 2. - R_sersic = 1. + n_sersic = 2.0 + R_sersic = 1.0 k_eff = 0.2 f_xx, f_xy, f_yx, f_yy = self.sersic.hessian(x, y, n_sersic, R_sersic, k_eff) npt.assert_almost_equal(f_xx[0], 0.1123170666045793, decimal=10) @@ -127,34 +140,36 @@ def test_hessian(self): npt.assert_almost_equal(values[3][1], 0.076243427402007985, decimal=10) npt.assert_almost_equal(values[1][1], -0.048568955656349749, decimal=10) - f_xx2, f_xy2, f_yx2, f_yy2 = self.sersic_2.hessian(x, y, n_sersic, R_sersic, k_eff, 0.0000001, 0) + f_xx2, f_xy2, f_yx2, f_yy2 = self.sersic_2.hessian( + x, y, n_sersic, R_sersic, k_eff, 0.0000001, 0 + ) npt.assert_almost_equal(f_xx2, values[0]) npt.assert_almost_equal(f_yy2, values[3], decimal=6) npt.assert_almost_equal(f_xy2, values[1], decimal=6) npt.assert_almost_equal(f_yx2, values[2], decimal=6) def test_alpha_abs(self): - x = 1. + x = 1.0 dr = 0.0000001 n_sersic = 2.5 - R_sersic = .5 + R_sersic = 0.5 k_eff = 0.2 alpha_abs = self.sersic.alpha_abs(x, 0, n_sersic, R_sersic, k_eff) f_dr = self.sersic.function(x + dr, 0, n_sersic, R_sersic, k_eff) f_ = self.sersic.function(x, 0, n_sersic, R_sersic, k_eff) - alpha_abs_num = -(f_dr - f_)/dr + alpha_abs_num = -(f_dr - f_) / dr npt.assert_almost_equal(alpha_abs_num, alpha_abs, decimal=3) def test_dalpha_dr(self): - x = 1. + x = 1.0 dr = 0.0000001 - n_sersic = 1. - R_sersic = .5 + n_sersic = 1.0 + R_sersic = 0.5 k_eff = 0.2 d_alpha_dr = self.sersic.d_alpha_dr(x, 0, n_sersic, R_sersic, k_eff) alpha_dr = self.sersic.alpha_abs(x + dr, 0, n_sersic, R_sersic, k_eff) alpha = self.sersic.alpha_abs(x, 0, n_sersic, R_sersic, k_eff) - d_alpha_dr_num = (alpha_dr - alpha)/dr + d_alpha_dr_num = (alpha_dr - alpha) / dr npt.assert_almost_equal(d_alpha_dr, d_alpha_dr_num, decimal=3) def test_mag_sym(self): @@ -162,8 +177,8 @@ def test_mag_sym(self): :return: """ - r = 2. - angle1 = 0. + r = 2.0 + angle1 = 0.0 angle2 = 1.5 x1 = r * np.cos(angle1) y1 = r * np.sin(angle1) @@ -173,8 +188,12 @@ def test_mag_sym(self): n_sersic = 4.5 R_sersic = 2.5 k_eff = 0.8 - f_xx1, f_xy1, f_yx1, f_yy1 = self.sersic.hessian(x1, y1, n_sersic, R_sersic, k_eff) - f_xx2, f_xy2, f_yx2, f_yy2 = self.sersic.hessian(x2, y2, n_sersic, R_sersic, k_eff) + f_xx1, f_xy1, f_yx1, f_yy1 = self.sersic.hessian( + x1, y1, n_sersic, R_sersic, k_eff + ) + f_xx2, f_xy2, f_yx2, f_yy2 = self.sersic.hessian( + x2, y2, n_sersic, R_sersic, k_eff + ) kappa_1 = (f_xx1 + f_yy1) / 2 kappa_2 = (f_xx2 + f_yy2) / 2 npt.assert_almost_equal(kappa_1, kappa_2, decimal=10) @@ -183,39 +202,43 @@ def test_mag_sym(self): npt.assert_almost_equal(A_1, A_2, decimal=10) def test_convergernce(self): - """ - test the convergence and compares it with the original Sersic profile - :return: - """ + """Test the convergence and compares it with the original Sersic profile + :return:""" x = np.array([0, 0, 0, 0, 0]) y = np.array([0.5, 1, 1.5, 2, 2.5]) n_sersic = 4.5 R_sersic = 2.5 k_eff = 0.2 f_xx, f_xy, f_yx, f_yy = self.sersic.hessian(x, y, n_sersic, R_sersic, k_eff) - kappa = (f_xx + f_yy) / 2. + kappa = (f_xx + f_yy) / 2.0 assert kappa[0] > 0 - flux = self.sersic_light.function(x, y, amp=1., R_sersic=R_sersic, n_sersic=n_sersic) + flux = self.sersic_light.function( + x, y, amp=1.0, R_sersic=R_sersic, n_sersic=n_sersic + ) flux /= flux[0] kappa /= kappa[0] npt.assert_almost_equal(flux[1], kappa[1], decimal=5) - xvalues = np.linspace(0.5, 3., 100) + xvalues = np.linspace(0.5, 3.0, 100) - e1, e2 = 0.4, 0. + e1, e2 = 0.4, 0.0 q = ellipticity2phi_q(e1, e2)[1] - kappa_ellipse = self.sersic_2.projected_mass(xvalues, 0, q, n_sersic, R_sersic, k_eff) - fxx, _, _, fyy = self.sersic_2.hessian(xvalues, 0, n_sersic, R_sersic, k_eff, e1, e2) + kappa_ellipse = self.sersic_2.projected_mass( + xvalues, 0, q, n_sersic, R_sersic, k_eff + ) + fxx, _, _, fyy = self.sersic_2.hessian( + xvalues, 0, n_sersic, R_sersic, k_eff, e1, e2 + ) - npt.assert_almost_equal(kappa_ellipse, 0.5*(fxx + fyy), decimal=5) + npt.assert_almost_equal(kappa_ellipse, 0.5 * (fxx + fyy), decimal=5) def test_sersic_util(self): - n = 1. - Re = 2. + n = 1.0 + Re = 2.0 k, bn = self.sersic.k_bn(n, Re) Re_new = self.sersic.k_Re(n, k) assert Re == Re_new -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_shapelet_pot_cartesian.py b/test/test_LensModel/test_Profiles/test_shapelet_pot_cartesian.py index 85114c06c..77a91739a 100644 --- a/test/test_LensModel/test_Profiles/test_shapelet_pot_cartesian.py +++ b/test/test_LensModel/test_Profiles/test_shapelet_pot_cartesian.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.shapelet_pot_polar import PolarShapelets from lenstronomy.LensModel.Profiles.shapelet_pot_cartesian import CartShapelets @@ -9,9 +9,8 @@ class TestCartShapelets(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.polarShapelets = PolarShapelets() self.cartShapelets = CartShapelets() @@ -19,34 +18,34 @@ def setup_method(self): def test_function(self): x = np.array([1]) y = np.array([2]) - beta = 1. - coeffs = (1., 1.) + beta = 1.0 + coeffs = (1.0, 1.0) values = self.cartShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0.11180585426466888, decimal=9) - x = 1. - y = 2. - beta = 1. - coeffs = (1., 1.) + x = 1.0 + y = 2.0 + beta = 1.0 + coeffs = (1.0, 1.0) values = self.cartShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values, 0.11180585426466891, decimal=9) x = np.array([0]) y = np.array([0]) - beta = 1. - coeffs = (0, 1.) + beta = 1.0 + coeffs = (0, 1.0) values = self.cartShapelets.function(x, y, coeffs, beta) assert values[0] == 0 - coeffs = (1, 1., 0, 0, 1, 1) + coeffs = (1, 1.0, 0, 0, 1, 1) values = self.cartShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0.16524730314632363, decimal=9) - coeffs = (1, 1., 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0) + coeffs = (1, 1.0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0) values = self.cartShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0.16524730314632363, decimal=9) - coeffs = (0., 0., 0, 0, 0., 0., 0, 0, 0, 0, 0, 0, 0, 0, 0) + coeffs = (0.0, 0.0, 0, 0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0) values = self.cartShapelets.function(x, y, coeffs, beta) assert values[0] == 0 @@ -55,29 +54,29 @@ def test_derivatives(self): :return: """ - beta = 1. - coeffs = [0,0,0,1.,0,0,0,0] - kwargs_lens1 = {'coeffs': coeffs, 'beta': beta} + beta = 1.0 + coeffs = [0, 0, 0, 1.0, 0, 0, 0, 0] + kwargs_lens1 = {"coeffs": coeffs, "beta": beta} - x1 = 1. - y1 = 2. + x1 = 1.0 + y1 = 2.0 f_x1, f_y1 = self.cartShapelets.derivatives(x1, y1, **kwargs_lens1) - x2 = np.array([1.]) - y2 = np.array([2.]) + x2 = np.array([1.0]) + y2 = np.array([2.0]) f_x2, f_y2 = self.cartShapelets.derivatives(x2, y2, **kwargs_lens1) assert f_x1 == f_x2[0] - x3 = np.array([1., 0]) - y3 = np.array([2., 0]) + x3 = np.array([1.0, 0]) + y3 = np.array([2.0, 0]) f_x3, f_y3 = self.cartShapelets.derivatives(x3, y3, **kwargs_lens1) assert f_x1 == f_x3[0] def test_hessian(self): - beta = 1. - coeffs = [1, 1, 0, 1., 0, 0, 0, 0] - kwargs_lens1 = {'coeffs': coeffs, 'beta': beta} + beta = 1.0 + coeffs = [1, 1, 0, 1.0, 0, 0, 0, 0] + kwargs_lens1 = {"coeffs": coeffs, "beta": beta} - x1 = np.array([1., 2]) + x1 = np.array([1.0, 2]) y1 = np.array([1, 1]) f_xx, f_xy, f_yx, f_yy = self.cartShapelets.hessian(x1, y1, **kwargs_lens1) assert f_xx[0] == -1.174101305389919 @@ -86,5 +85,5 @@ def test_hessian(self): assert f_yx[0] == f_xy[0] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_shapelet_pot_polar.py b/test/test_LensModel/test_Profiles/test_shapelet_pot_polar.py index f25021fff..7d9c546ba 100644 --- a/test/test_LensModel/test_Profiles/test_shapelet_pot_polar.py +++ b/test/test_LensModel/test_Profiles/test_shapelet_pot_polar.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.shapelet_pot_polar import PolarShapelets @@ -8,43 +8,42 @@ class TestCartShapelets(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.polarShapelets = PolarShapelets() def test_function(self): x = np.array([1]) y = np.array([2]) - beta = 1. - coeffs = (1., 1.) + beta = 1.0 + coeffs = (1.0, 1.0) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], -0.046311501189135587, decimal=8) - x = 1. - y = 2. - beta = 1. - coeffs = (1., 1.) + x = 1.0 + y = 2.0 + beta = 1.0 + coeffs = (1.0, 1.0) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values, -0.046311501189135587, decimal=8) x = np.array([0]) y = np.array([0]) - beta = 1. - coeffs = (0, 1.) + beta = 1.0 + coeffs = (0, 1.0) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0, decimal=8) - coeffs = (1, 1., 0, 0, 1, 1) + coeffs = (1, 1.0, 0, 0, 1, 1) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0, decimal=8) - coeffs = (1, 1., 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0) + coeffs = (1, 1.0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0, decimal=8) - coeffs = (0., 0., 0, 0, 0., 0., 0, 0, 0, 0, 0, 0, 0, 0, 0) + coeffs = (0.0, 0.0, 0, 0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0) values = self.polarShapelets.function(x, y, coeffs, beta) npt.assert_almost_equal(values[0], 0, decimal=8) @@ -53,31 +52,31 @@ def test_derivatives(self): :return: """ - beta = 1. - coeffs = [1,0,0,1.,0,0,0,0] - kwargs_lens1 = {'coeffs': coeffs, 'beta': beta} + beta = 1.0 + coeffs = [1, 0, 0, 1.0, 0, 0, 0, 0] + kwargs_lens1 = {"coeffs": coeffs, "beta": beta} - x1 = 1. - y1 = 2. + x1 = 1.0 + y1 = 2.0 f_x1, f_y1 = self.polarShapelets.derivatives(x1, y1, **kwargs_lens1) - x2 = np.array([1.]) - y2 = np.array([2.]) + x2 = np.array([1.0]) + y2 = np.array([2.0]) f_x2, f_y2 = self.polarShapelets.derivatives(x2, y2, **kwargs_lens1) assert f_x1 == f_x2[0] npt.assert_almost_equal(f_x1, -0.046311501189135601, decimal=8) npt.assert_almost_equal(f_y1, -0.092623002378271174, decimal=8) - x3 = np.array([1., 0]) - y3 = np.array([2., 0]) + x3 = np.array([1.0, 0]) + y3 = np.array([2.0, 0]) f_x3, f_y3 = self.polarShapelets.derivatives(x3, y3, **kwargs_lens1) assert f_x1 == f_x3[0] def test_hessian(self): - beta = 1. - coeffs = [1, 1, 0, 1., 0, 0, 0, 0] - kwargs_lens1 = {'coeffs': coeffs, 'beta': beta} + beta = 1.0 + coeffs = [1, 1, 0, 1.0, 0, 0, 0, 0] + kwargs_lens1 = {"coeffs": coeffs, "beta": beta} - x1 = np.array([1., 2]) + x1 = np.array([1.0, 2]) y1 = np.array([1, 1]) f_xx, f_xy, f_yx, f_yy = self.polarShapelets.hessian(x1, y1, **kwargs_lens1) npt.assert_almost_equal(f_xx[0], 0.20755374871029733, decimal=8) @@ -86,5 +85,5 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, f_yx, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_shear.py b/test/test_LensModel/test_Profiles/test_shear.py index c2641015c..1a24b5885 100644 --- a/test/test_LensModel/test_Profiles/test_shear.py +++ b/test/test_LensModel/test_Profiles/test_shear.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.shear import Shear, ShearGammaPsi, ShearReduced @@ -13,14 +13,13 @@ class TestShear(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.extShear = Shear() gamma1, gamma2 = 0.1, 0.1 - self.kwargs_lens = {'gamma1': gamma1, 'gamma2': gamma2} + self.kwargs_lens = {"gamma1": gamma1, "gamma2": gamma2} def test_function(self): x = np.array([1]) @@ -35,7 +34,7 @@ def test_function(self): x = np.array([2, 3, 4]) y = np.array([1, 1, 1]) values = self.extShear.function(x, y, **self.kwargs_lens) - npt.assert_almost_equal(values[0], 0.35, decimal=5) + npt.assert_almost_equal(values[0], 0.35, decimal=5) npt.assert_almost_equal(values[1], 0.7, decimal=5) def test_derivatives(self): @@ -61,23 +60,22 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, 0.1, decimal=5) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.extShear.hessian(x, y, **self.kwargs_lens) npt.assert_almost_equal(values[0], 0.1, decimal=5) npt.assert_almost_equal(values[3], -0.1, decimal=5) npt.assert_almost_equal(values[1], 0.1, decimal=5) gamma1, gamma2 = 0.1, -0.1 - kwargs = {'gamma1': gamma1, 'gamma2': gamma2} - lensModel = LensModel(['SHEAR']) + kwargs = {"gamma1": gamma1, "gamma2": gamma2} + lensModel = LensModel(["SHEAR"]) gamma1, gamma2 = lensModel.gamma(x, y, [kwargs]) npt.assert_almost_equal(gamma1, gamma1, decimal=9) npt.assert_almost_equal(gamma2, gamma2, decimal=9) class TestShearGammaPsi(object): - def setup_method(self): self.shear_e1e2 = Shear() self.shear = ShearGammaPsi() @@ -111,7 +109,6 @@ def test_hessian(self): class TestShearReduced(object): - def setup_method(self): self.shear = ShearReduced() @@ -132,21 +129,45 @@ def test_distortions(self): flux_round = gauss.function(x, y, amp=1, sigma=0.1, e1=0, e2=0) - e1, e2, = 0, 0.1 + ( + e1, + e2, + ) = ( + 0, + 0.1, + ) f_x, f_y = shear.derivatives(x, y, gamma1=-e1, gamma2=-e2) - flux_distorted = gauss.function(x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2) + flux_distorted = gauss.function( + x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2 + ) npt.assert_almost_equal(flux_round, flux_distorted, decimal=9) - e1, e2, = 0.2, 0 + ( + e1, + e2, + ) = ( + 0.2, + 0, + ) f_x, f_y = shear.derivatives(x, y, gamma1=-e1, gamma2=-e2) - flux_distorted = gauss.function(x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2) + flux_distorted = gauss.function( + x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2 + ) npt.assert_almost_equal(flux_round, flux_distorted, decimal=9) - e1, e2, = -0.2, 0.1 + ( + e1, + e2, + ) = ( + -0.2, + 0.1, + ) f_x, f_y = shear.derivatives(x, y, gamma1=-e1, gamma2=-e2) - flux_distorted = gauss.function(x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2) + flux_distorted = gauss.function( + x - f_x, y - f_y, amp=1, sigma=0.1, e1=e1, e2=e2 + ) npt.assert_almost_equal(flux_round, flux_distorted, decimal=9) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_sie.py b/test/test_LensModel/test_Profiles/test_sie.py index f86d27556..479d8513c 100644 --- a/test/test_LensModel/test_Profiles/test_sie.py +++ b/test/test_LensModel/test_Profiles/test_sie.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -7,69 +7,69 @@ class TestSIE(object): - """ - tests the Gaussian methods - """ - def setup_method(self): - from lenstronomy.LensModel.Profiles.sie import SIE - from lenstronomy.LensModel.Profiles.epl import EPL - from lenstronomy.LensModel.Profiles.nie import NIE - self.sie = SIE(NIE=False) - self.sie_nie = SIE(NIE=True) - self.epl = EPL() - self.nie = NIE() + """Tests the Gaussian methods.""" - def test_function(self): - x = np.array([1]) - y = np.array([2]) - theta_E = 1. - q = 0.9 - phi_G = 1. - e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - values = self.sie.function(x, y, theta_E, e1, e2) - gamma = 2 - values_spemd = self.epl.function(x, y, theta_E, gamma, e1, e2) - assert values == values_spemd + def setup_method(self): + from lenstronomy.LensModel.Profiles.sie import SIE + from lenstronomy.LensModel.Profiles.epl import EPL + from lenstronomy.LensModel.Profiles.nie import NIE - values_nie = self.sie_nie.function(x, y, theta_E, e1, e2) - s_scale = 0.0000001 - values_spemd = self.nie.function(x, y, theta_E, e1, e2, s_scale) - npt.assert_almost_equal(values_nie, values_spemd, decimal=6) + self.sie = SIE(NIE=False) + self.sie_nie = SIE(NIE=True) + self.epl = EPL() + self.nie = NIE() - def test_derivatives(self): - x = np.array([1]) - y = np.array([2]) - theta_E = 1. - q = 0.7 - phi_G = 1. - e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - values = self.sie.derivatives(x, y, theta_E, e1, e2) - gamma = 2 - values_spemd = self.epl.derivatives(x, y, theta_E, gamma, e1, e2) - assert values == values_spemd + def test_function(self): + x = np.array([1]) + y = np.array([2]) + theta_E = 1.0 + q = 0.9 + phi_G = 1.0 + e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) + values = self.sie.function(x, y, theta_E, e1, e2) + gamma = 2 + values_spemd = self.epl.function(x, y, theta_E, gamma, e1, e2) + assert values == values_spemd - values = self.sie_nie.derivatives(x, y, theta_E, e1, e2) - s_scale = 0.0000001 - values_spemd = self.nie.derivatives(x, y, theta_E, e1, e2, s_scale) - npt.assert_almost_equal(values, values_spemd, decimal=6) + values_nie = self.sie_nie.function(x, y, theta_E, e1, e2) + s_scale = 0.0000001 + values_spemd = self.nie.function(x, y, theta_E, e1, e2, s_scale) + npt.assert_almost_equal(values_nie, values_spemd, decimal=6) - def test_hessian(self): - x = np.array([1]) - y = np.array([2]) - theta_E = 1. - q = 0.7 - phi_G = 1. - e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - values = self.sie.hessian(x, y, theta_E, e1, e2) - gamma = 2 - values_spemd = self.epl.hessian(x, y, theta_E, gamma, e1, e2) - assert values[0] == values_spemd[0] + def test_derivatives(self): + x = np.array([1]) + y = np.array([2]) + theta_E = 1.0 + q = 0.7 + phi_G = 1.0 + e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) + values = self.sie.derivatives(x, y, theta_E, e1, e2) + gamma = 2 + values_spemd = self.epl.derivatives(x, y, theta_E, gamma, e1, e2) + assert values == values_spemd - values = self.sie_nie.hessian(x, y, theta_E, e1, e2) - s_scale = 0.0000001 - values_spemd = self.nie.hessian(x, y, theta_E, e1, e2, s_scale) - npt.assert_almost_equal(values, values_spemd, decimal=5) + values = self.sie_nie.derivatives(x, y, theta_E, e1, e2) + s_scale = 0.0000001 + values_spemd = self.nie.derivatives(x, y, theta_E, e1, e2, s_scale) + npt.assert_almost_equal(values, values_spemd, decimal=6) + def test_hessian(self): + x = np.array([1]) + y = np.array([2]) + theta_E = 1.0 + q = 0.7 + phi_G = 1.0 + e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) + values = self.sie.hessian(x, y, theta_E, e1, e2) + gamma = 2 + values_spemd = self.epl.hessian(x, y, theta_E, gamma, e1, e2) + assert values[0] == values_spemd[0] -if __name__ == '__main__': + values = self.sie_nie.hessian(x, y, theta_E, e1, e2) + s_scale = 0.0000001 + values_spemd = self.nie.hessian(x, y, theta_E, e1, e2, s_scale) + npt.assert_almost_equal(values, values_spemd, decimal=5) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_sis.py b/test/test_LensModel/test_Profiles/test_sis.py index 4cf0359a5..7baa28331 100644 --- a/test/test_LensModel/test_Profiles/test_sis.py +++ b/test/test_LensModel/test_Profiles/test_sis.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.sis import SIS @@ -9,27 +9,26 @@ class TestSIS(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.SIS = SIS() def test_function(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 values = self.SIS.function(x, y, phi_E) npt.assert_almost_equal(values[0], 2.2360679774997898, decimal=9) x = np.array([0]) y = np.array([0]) - phi_E = 1. - values = self.SIS.function( x, y, phi_E) + phi_E = 1.0 + values = self.SIS.function(x, y, phi_E) assert values[0] == 0 - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.SIS.function( x, y, phi_E) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.SIS.function(x, y, phi_E) npt.assert_almost_equal(values[0], 2.2360679774997898, decimal=9) npt.assert_almost_equal(values[1], 3.1622776601683795, decimal=9) npt.assert_almost_equal(values[2], 4.1231056256176606, decimal=9) @@ -37,18 +36,18 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. - f_x, f_y = self.SIS.derivatives( x, y, phi_E) + phi_E = 1.0 + f_x, f_y = self.SIS.derivatives(x, y, phi_E) npt.assert_almost_equal(f_x[0], 0.44721359549995793, decimal=9) npt.assert_almost_equal(f_y[0], 0.89442719099991586, decimal=9) x = np.array([0]) y = np.array([0]) - f_x, f_y = self.SIS.derivatives( x, y, phi_E) + f_x, f_y = self.SIS.derivatives(x, y, phi_E) assert f_x[0] == 0 assert f_y[0] == 0 - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.SIS.derivatives(x, y, phi_E) assert values[0][0] == 0.44721359549995793 assert values[1][0] == 0.89442719099991586 @@ -58,15 +57,15 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. - f_xx, f_xy, f_yx, f_yy = self.SIS.hessian( x, y, phi_E) + phi_E = 1.0 + f_xx, f_xy, f_yx, f_yy = self.SIS.hessian(x, y, phi_E) npt.assert_almost_equal(f_xx[0], 0.35777087639996635, decimal=9) npt.assert_almost_equal(f_yy[0], 0.089442719099991588, decimal=9) npt.assert_almost_equal(f_xy[0], -0.17888543819998318, decimal=9) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) - values = self.SIS.hessian( x, y, phi_E) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + values = self.SIS.hessian(x, y, phi_E) npt.assert_almost_equal(values[0][0], 0.35777087639996635, decimal=9) npt.assert_almost_equal(values[3][0], 0.089442719099991588, decimal=9) npt.assert_almost_equal(values[1][0], -0.17888543819998318, decimal=9) @@ -75,11 +74,11 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.094868329805051374, decimal=9) def test_theta2rho(self): - theta_E = 2. + theta_E = 2.0 rho0 = self.SIS.theta2rho(theta_E) theta_E_new = self.SIS.rho2theta(rho0) npt.assert_almost_equal(theta_E_new, theta_E, decimal=7) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_sis_truncate.py b/test/test_LensModel/test_Profiles/test_sis_truncate.py index fd20de992..f6d4a717b 100644 --- a/test/test_LensModel/test_Profiles/test_sis_truncate.py +++ b/test/test_LensModel/test_Profiles/test_sis_truncate.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.sis_truncate import SIS_truncate @@ -8,27 +8,26 @@ class TestSIS_truncate(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.SIS = SIS_truncate() def test_function(self): x = 1 y = 0 - phi_E = 1. + phi_E = 1.0 r_trunc = 2 values = self.SIS.function(x, y, phi_E, r_trunc) assert values == 1 x = np.array([0]) y = np.array([0]) - phi_E = 1. + phi_E = 1.0 values = self.SIS.function(x, y, phi_E, r_trunc) assert values[0] == 0 - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values = self.SIS.function(x, y, phi_E, r_trunc) npt.assert_almost_equal(values[0], 2.2221359549995796, decimal=9) npt.assert_almost_equal(values[1], 2.8245553203367586, decimal=9) @@ -37,7 +36,7 @@ def test_function(self): def test_derivatives(self): x = 1 y = 2 - phi_E = 1. + phi_E = 1.0 r_trunc = 2 f_x, f_y = self.SIS.derivatives(x, y, phi_E, r_trunc) npt.assert_almost_equal(f_x, 0.39442719099991586, decimal=9) @@ -48,8 +47,8 @@ def test_derivatives(self): assert f_x[0] == 0 assert f_y[0] == 0 - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.SIS.derivatives(x, y, phi_E, r_trunc) npt.assert_almost_equal(values[0][0], 0.39442719099991586, decimal=9) npt.assert_almost_equal(values[1][0], 0.78885438199983171, decimal=9) @@ -59,15 +58,15 @@ def test_derivatives(self): def test_hessian(self): x = 1 y = 0 - phi_E = 1. + phi_E = 1.0 r_trunc = 2 f_xx, f_xy, f_yx, f_yy = self.SIS.hessian(x, y, phi_E, r_trunc) assert f_xx == 0 assert f_yy == 1 assert f_xy == 0 npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.SIS.hessian(x, y, phi_E, r_trunc) npt.assert_almost_equal(values[0][0], 0.21554175279993265, decimal=9) npt.assert_almost_equal(values[3][0], -0.3211145618000168, decimal=9) @@ -77,5 +76,5 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.18973665961010272, decimal=9) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_spemd.py b/test/test_LensModel/test_Profiles/test_spemd.py index c011e5b81..28304bfbe 100644 --- a/test/test_LensModel/test_Profiles/test_spemd.py +++ b/test/test_LensModel/test_Profiles/test_spemd.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np @@ -8,30 +8,34 @@ try: import fastell4py + fastell4py_bool = True except: - print("Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!") + print( + "Warning: fastell4py not available, tests will be trivially fulfilled without giving the right answer!" + ) fastell4py_bool = False class TestSPEMD(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): from lenstronomy.LensModel.Profiles.spemd import SPEMD + self.SPEMD = SPEMD(suppress_fastell=True) from lenstronomy.LensModel.Profiles.nie import NIE + self.NIE = NIE() def test_function(self): - phi_E = 1. - gamma = 2. + phi_E = 1.0 + gamma = 2.0 q = 0.999 - phi_G = 1. + phi_G = 1.0 s_scale = 0.1 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - x = np.array([1., 2]) + x = np.array([1.0, 2]) y = np.array([2, 0]) values = self.SPEMD.function(x, y, phi_E, gamma, e1, e2, s_scale) if fastell4py_bool: @@ -45,10 +49,10 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. - gamma = 2. - q = 1. - phi_G = 1. + phi_E = 1.0 + gamma = 2.0 + q = 1.0 + phi_G = 1.0 s_scale = 0.1 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.SPEMD.derivatives(x, y, phi_E, gamma, e1, e2, s_scale) @@ -61,7 +65,7 @@ def test_derivatives(self): npt.assert_almost_equal(f_y, 0, decimal=7) q = 0.7 - phi_G = 1. + phi_G = 1.0 s_scale = 0.001 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.SPEMD.derivatives(x, y, phi_E, gamma, e1, e2, s_scale) @@ -74,17 +78,19 @@ def test_derivatives(self): npt.assert_almost_equal(f_y, 0, decimal=7) def test_hessian(self): - x = np.array([1.]) - y = np.array([2.]) - phi_E = 1. - gamma = 2. + x = np.array([1.0]) + y = np.array([2.0]) + phi_E = 1.0 + gamma = 2.0 q = 0.9 - phi_G = 1. + phi_G = 1.0 s_scale = 0.001 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.SPEMD.hessian(x, y, phi_E, gamma, e1, e2, s_scale) if fastell4py_bool: - f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.NIE.hessian(x, y, phi_E, e1, e2, s_scale) + f_xx_nie, f_xy_nie, f_yx_nie, f_yy_nie = self.NIE.hessian( + x, y, phi_E, e1, e2, s_scale + ) npt.assert_almost_equal(f_xx, f_xx_nie, decimal=4) npt.assert_almost_equal(f_yy, f_yy_nie, decimal=4) npt.assert_almost_equal(f_xy, f_xy_nie, decimal=4) @@ -96,7 +102,9 @@ def test_hessian(self): npt.assert_almost_equal(f_xy, f_yx, decimal=8) def test_bounds(self): - compute_bool = self.SPEMD._parameter_constraints(q_fastell=-1, gam=-1, s2=-1, q=-1) + compute_bool = self.SPEMD._parameter_constraints( + q_fastell=-1, gam=-1, s2=-1, q=-1 + ) assert compute_bool is False def test_is_not_empty(self): @@ -110,5 +118,5 @@ def test_is_not_empty(self): assert not func(np.array([]), np.array([])) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_spep.py b/test/test_LensModel/test_Profiles/test_spep.py index 6a49cf03b..0fd76870e 100644 --- a/test/test_LensModel/test_Profiles/test_spep.py +++ b/test/test_LensModel/test_Profiles/test_spep.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.spep import SPEP @@ -9,10 +9,10 @@ import pytest import numpy.testing as npt + class TestSPEP(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.SPEP = SPEP() self.SIE = SIE() @@ -20,10 +20,10 @@ def setup_method(self): def test_function(self): x = 1 y = 2 - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) values = self.SPEP.function(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(values, 2.104213947346917, decimal=7) @@ -42,10 +42,10 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.SPEP.derivatives(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(f_x[0], 0.43989645846696634, decimal=7) @@ -67,10 +67,10 @@ def test_derivatives(self): x = 1 y = 2 - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_x, f_y = self.SPEP.derivatives(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(f_x, 0.43989645846696634, decimal=7) @@ -84,18 +84,18 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 0.9 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.SPEP.hessian(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(f_xx[0], 0.46312881977317422, decimal=7) npt.assert_almost_equal(f_yy[0], 0.15165326557198552, decimal=7) npt.assert_almost_equal(f_xy[0], -0.20956958696323871, decimal=7) npt.assert_almost_equal(f_xy, f_yx, decimal=8) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) values = self.SPEP.hessian(x, y, phi_E, gamma, e1, e2) npt.assert_almost_equal(values[0][0], 0.46312881977317422, decimal=7) npt.assert_almost_equal(values[3][0], 0.15165326557198552, decimal=7) @@ -105,12 +105,12 @@ def test_hessian(self): npt.assert_almost_equal(values[1][1], -0.10270375656049677, decimal=7) def test_spep_sie_conventions(self): - x = np.array([1., 2., 0.]) - y = np.array([2, 1., 1.]) - phi_E = 1. + x = np.array([1.0, 2.0, 0.0]) + y = np.array([2, 1.0, 1.0]) + phi_E = 1.0 gamma = 2 q = 0.9999 - phi_G = 1. + phi_G = 1.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) f_xx, f_xy, f_yx, f_yy = self.SPEP.hessian(x, y, phi_E, gamma, e1, e2) f_xx_sie, f_xy_sie, f_yx_sie, f_yy_sie = self.SIE.hessian(x, y, phi_E, e1, e2) @@ -120,6 +120,5 @@ def test_spep_sie_conventions(self): npt.assert_almost_equal(f_yx, f_yx_sie, decimal=4) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() - diff --git a/test/test_LensModel/test_Profiles/test_splcore.py b/test/test_LensModel/test_Profiles/test_splcore.py index 4351253f3..83c5bff5d 100644 --- a/test/test_LensModel/test_Profiles/test_splcore.py +++ b/test/test_LensModel/test_Profiles/test_splcore.py @@ -1,4 +1,4 @@ -__author__ = 'dangilman' +__author__ = "dangilman" from lenstronomy.LensModel.Profiles.splcore import SPLCORE @@ -9,67 +9,62 @@ class TestSPLCORE(object): - def setup_method(self): - self.profile = SPLCORE() def test_no_potential(self): - - npt.assert_raises(Exception, self.profile.function, 0., 0., 0., 0., 0.) + npt.assert_raises(Exception, self.profile.function, 0.0, 0.0, 0.0, 0.0, 0.0) def test_origin(self): - - x = 0. - y = 0. - sigma0 = 1. + x = 0.0 + y = 0.0 + sigma0 = 1.0 r_core = 0.1 gamma = 2.4 alpha_x, alpha_y = self.profile.derivatives(x, y, sigma0, r_core, gamma) - npt.assert_almost_equal(alpha_x, 0.) - npt.assert_almost_equal(alpha_y, 0.) + npt.assert_almost_equal(alpha_x, 0.0) + npt.assert_almost_equal(alpha_y, 0.0) fxx, fxy, fyx, fyy = self.profile.hessian(x, y, sigma0, r_core, gamma) kappa = self.profile.density_2d(x, y, sigma0 / r_core, r_core, gamma) npt.assert_almost_equal(fxx, kappa) npt.assert_almost_equal(fyy, kappa) - npt.assert_almost_equal(fxy, 0.) - npt.assert_almost_equal(fyx, 0.) + npt.assert_almost_equal(fxy, 0.0) + npt.assert_almost_equal(fyx, 0.0) r = 0.01 xmin = 0.001 - rmin = self.profile._safe_r_division(r, 1., xmin) + rmin = self.profile._safe_r_division(r, 1.0, xmin) npt.assert_equal(rmin, r) r = 1e-9 - rmin = self.profile._safe_r_division(r, 1., xmin) + rmin = self.profile._safe_r_division(r, 1.0, xmin) npt.assert_equal(rmin, xmin) xmin = 1e-2 r = np.logspace(-3, 0, 100) inds = np.where(r < xmin) - rmin = self.profile._safe_r_division(r, 1., xmin) + rmin = self.profile._safe_r_division(r, 1.0, xmin) npt.assert_almost_equal(rmin[inds], xmin) def test_g_function(self): - gamma = 2.5 rc = 0.01 - rho0 = 1. - R = 5. + rho0 = 1.0 + R = 5.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand3d, 0, R, args=args)[0] mass_analytic = self.profile.mass_3d(R, rho0, rc, gamma) npt.assert_almost_equal(mass_analytic, mass_numerical) - gamma = 2. + gamma = 2.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand3d, 0, R, args=args)[0] mass_analytic = self.profile.mass_3d(R, rho0, rc, gamma) npt.assert_almost_equal(mass_analytic, mass_numerical) - gamma = 3. + gamma = 3.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand3d, 0, R, args=args)[0] mass_analytic = self.profile.mass_3d(R, rho0, rc, gamma) @@ -85,11 +80,10 @@ def test_g_function(self): npt.assert_almost_equal(mass_analytic_from_sigm0, mass_numerical) def test_f_function(self): - gamma = 2.5 rc = 0.01 - rho0 = 1. - R = 5. + rho0 = 1.0 + R = 5.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand2d, 0, R, args=args)[0] @@ -99,7 +93,7 @@ def test_f_function(self): mass_analytic_from_sigm0 = self.profile.mass_2d_lens(R, sigma0, rc, gamma) npt.assert_almost_equal(mass_analytic_from_sigm0, mass_numerical) - gamma = 2. + gamma = 2.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand2d, 0, R, args=args)[0] mass_analytic = self.profile.mass_2d(R, rho0, rc, gamma) @@ -108,7 +102,7 @@ def test_f_function(self): mass_analytic_from_sigm0 = self.profile.mass_2d_lens(R, sigma0, rc, gamma) npt.assert_almost_equal(mass_analytic_from_sigm0, mass_numerical) - gamma = 3. + gamma = 3.0 args = (rho0, rc, gamma) mass_numerical = quad(self._mass_integrand2d, 0, R, args=args)[0] mass_analytic = self.profile.mass_2d(R, rho0, rc, gamma) @@ -127,11 +121,13 @@ def test_f_function(self): npt.assert_almost_equal(mass_analytic_from_sigm0, mass_numerical) def _mass_integrand3d(self, r, rho0, rc, gamma): - return 4 * np.pi * r ** 2 * rho0 * rc ** gamma / (rc ** 2 + r ** 2) ** (gamma / 2) + return ( + 4 * np.pi * r**2 * rho0 * rc**gamma / (rc**2 + r**2) ** (gamma / 2) + ) def _mass_integrand2d(self, r, rho0, rc, gamma): return 2 * np.pi * r * self.profile.density_2d(r, 0, rho0, rc, gamma) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_spp.py b/test/test_LensModel/test_Profiles/test_spp.py index 4a55cd6fd..501acc962 100644 --- a/test/test_LensModel/test_Profiles/test_spp.py +++ b/test/test_LensModel/test_Profiles/test_spp.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.spep import SPEP from lenstronomy.LensModel.Profiles.spp import SPP @@ -9,10 +9,10 @@ import numpy.testing as npt import pytest + class TestSPEP(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.SPEP = SPEP() self.SPP = SPP() @@ -21,12 +21,12 @@ def setup_method(self): def test_function(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 1 - phi_G = 0. + phi_G = 0.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q)) + E = phi_E / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) values_spep = self.SPEP.function(x, y, E, gamma, e1, e2) values_spp = self.SPP.function(x, y, E, gamma) assert values_spep[0] == values_spp[0] @@ -36,8 +36,8 @@ def test_function(self): values_spp = self.SPP.function(x, y, E, gamma) assert values_spep[0] == values_spp[0] - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) values_spep = self.SPEP.function(x, y, E, gamma, e1, e2) values_spp = self.SPP.function(x, y, E, gamma) assert values_spep[0] == values_spp[0] @@ -47,12 +47,12 @@ def test_function(self): def test_derivatives(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 q = 1 - phi_G = 0. + phi_G = 0.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q)) + E = phi_E / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, E, gamma, e1, e2) f_x_spp, f_y_spp = self.SPP.derivatives(x, y, E, gamma) npt.assert_almost_equal(f_x_spep, f_x_spp, decimal=9) @@ -64,8 +64,8 @@ def test_derivatives(self): npt.assert_almost_equal(f_x_spep, f_x_spp, decimal=9) npt.assert_almost_equal(f_y_spep, f_y_spp, decimal=9) - x = np.array([1,3,4]) - y = np.array([2,1,1]) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) f_x_spep, f_y_spep = self.SPEP.derivatives(x, y, E, gamma, e1, e2) f_x_spp, f_y_spp = self.SPP.derivatives(x, y, E, gamma) npt.assert_almost_equal(f_x_spep, f_x_spp, decimal=9) @@ -74,21 +74,25 @@ def test_derivatives(self): def test_hessian(self): x = np.array([1]) y = np.array([2]) - phi_E = 1. + phi_E = 1.0 gamma = 1.9 - q = 1. - phi_G = 0. + q = 1.0 + phi_G = 0.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - E = phi_E / (((3-gamma)/2.)**(1./(1-gamma))*np.sqrt(q)) - f_xx_spep, f_xy_spep, f_yx_spep, f_yy_spep = self.SPEP.hessian(x, y, E, gamma, e1, e2) + E = phi_E / (((3 - gamma) / 2.0) ** (1.0 / (1 - gamma)) * np.sqrt(q)) + f_xx_spep, f_xy_spep, f_yx_spep, f_yy_spep = self.SPEP.hessian( + x, y, E, gamma, e1, e2 + ) f_xx_spp, f_xy_spp, f_yx_spp, f_yy_spp = self.SPP.hessian(x, y, E, gamma) assert f_xx_spep[0] == f_xx_spp[0] assert f_yy_spep[0] == f_yy_spp[0] assert f_xy_spep[0] == f_xy_spp[0] assert f_yx_spep[0] == f_yx_spp[0] - x = np.array([1,3,4]) - y = np.array([2,1,1]) - f_xx_spep, f_xy_spep, f_yx_spep, f_yy_spep = self.SPEP.hessian(x, y, E, gamma, e1, e2) + x = np.array([1, 3, 4]) + y = np.array([2, 1, 1]) + f_xx_spep, f_xy_spep, f_yx_spep, f_yy_spep = self.SPEP.hessian( + x, y, E, gamma, e1, e2 + ) f_xx_spp, f_xy_spp, f_yx_spp, f_yy_spp = self.SPP.hessian(x, y, E, gamma) assert f_xx_spep[0] == f_xx_spp[0] assert f_yy_spep[0] == f_yy_spp[0] @@ -103,8 +107,8 @@ def test_hessian(self): def test_compare_sis(self): x = np.array([1]) y = np.array([2]) - theta_E = 1. - gamma = 2. + theta_E = 1.0 + gamma = 2.0 f_sis = self.SIS.function(x, y, theta_E) f_spp = self.SPP.function(x, y, theta_E, gamma) f_x_sis, f_y_sis = self.SIS.derivatives(x, y, theta_E) @@ -119,7 +123,7 @@ def test_compare_sis(self): npt.assert_almost_equal(f_xy_sis[0], f_xy_spp[0], decimal=7) def test_unit_conversion(self): - theta_E = 2. + theta_E = 2.0 gamma = 2.2 rho0 = self.SPP.theta2rho(theta_E, gamma) theta_E_out = self.SPP.rho2theta(rho0, gamma) @@ -140,5 +144,5 @@ def test_grav_pot(self): npt.assert_almost_equal(grav_pot, 12.566370614359172, decimal=8) -if __name__ == '__main__': - pytest.main() +if __name__ == "__main__": + pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_synthesis.py b/test/test_LensModel/test_Profiles/test_synthesis.py index 6fe6497b0..b6bcb15d7 100644 --- a/test/test_LensModel/test_Profiles/test_synthesis.py +++ b/test/test_LensModel/test_Profiles/test_synthesis.py @@ -1,4 +1,4 @@ -__author__ = 'mgomer' +__author__ = "mgomer" from lenstronomy.LensModel.Profiles.synthesis import SynthesisProfile from lenstronomy.Analysis.lens_profile import LensProfileAnalysis @@ -8,95 +8,119 @@ import numpy.testing as npt import pytest + class TestSynthesis(object): - """ - tests the synthesis model's ability to approximate several profiles - """ + """Tests the synthesis model's ability to approximate several profiles.""" + def setup_method(self): - self.lin_fit_hyperparams = {'num_r_evals': 100} - self.s_list = np.logspace(-6., 3., 30) - self.x_test=np.linspace(0.01, 2, 10) + self.lin_fit_hyperparams = {"num_r_evals": 100} + self.s_list = np.logspace(-6.0, 3.0, 30) + self.x_test = np.linspace(0.01, 2, 10) self.y_test = np.zeros_like(self.x_test) - def test_CSE_components(self): - #test potential, deflection, and kappa using CSE components for a few profiles + # test potential, deflection, and kappa using CSE components for a few profiles kwargs_list = [] - center_x=0 - center_y=0 + center_x = 0 + center_y = 0 for s in self.s_list: - kwargs_list.append({'a': 1, 's': s, 'e1': 0, 'e2': 0, 'center_x': center_x, 'center_y': center_y}) + kwargs_list.append( + { + "a": 1, + "s": s, + "e1": 0, + "e2": 0, + "center_x": center_x, + "center_y": center_y, + } + ) # test nfw - kwargs_nfw = [{'Rs': 1.5, 'alpha_Rs': 1, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'NFW', - 'component_lens_model': 'CSE', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_nfw = LensModel(['NFW']) - self.compare_synth(lensmodel_synth,lensmodel_nfw,kwargs_nfw) - #test sersic - kwargs_sersic = [{'k_eff':3, 'R_sersic':1.5, 'n_sersic':3.5}] - kwargs_synthesis = {'target_lens_model': 'SERSIC', - 'component_lens_model': 'CSE', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_sersic = LensModel(['SERSIC']) - self.compare_synth(lensmodel_synth,lensmodel_sersic,kwargs_sersic) - #test hernquist - kwargs_hernquist = [{'sigma0':10, 'Rs':1.5, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'HERNQUIST', - 'component_lens_model': 'CSE', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_hernquist = LensModel(['HERNQUIST']) - self.compare_synth(lensmodel_synth,lensmodel_hernquist,kwargs_hernquist) + kwargs_nfw = [ + {"Rs": 1.5, "alpha_Rs": 1, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "NFW", + "component_lens_model": "CSE", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_nfw = LensModel(["NFW"]) + self.compare_synth(lensmodel_synth, lensmodel_nfw, kwargs_nfw) + # test sersic + kwargs_sersic = [{"k_eff": 3, "R_sersic": 1.5, "n_sersic": 3.5}] + kwargs_synthesis = { + "target_lens_model": "SERSIC", + "component_lens_model": "CSE", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_sersic = LensModel(["SERSIC"]) + self.compare_synth(lensmodel_synth, lensmodel_sersic, kwargs_sersic) + # test hernquist + kwargs_hernquist = [ + {"sigma0": 10, "Rs": 1.5, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "HERNQUIST", + "component_lens_model": "CSE", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_hernquist = LensModel(["HERNQUIST"]) + self.compare_synth(lensmodel_synth, lensmodel_hernquist, kwargs_hernquist) def test_gaussian_components(self): - #test potential, deflection, and kappa using Gaussian components for a few profiles + # test potential, deflection, and kappa using Gaussian components for a few profiles kwargs_list = [] - center_x=0 - center_y=0 + center_x = 0 + center_y = 0 for s in self.s_list: - kwargs_list.append({'amp': 1, 'sigma': s, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0}) + kwargs_list.append( + {"amp": 1, "sigma": s, "e1": 0, "e2": 0, "center_x": 0, "center_y": 0} + ) # test nfw - kwargs_nfw = [{'Rs': 1.5, 'alpha_Rs': 1, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'NFW', - 'component_lens_model': 'GAUSSIAN_ELLIPSE_KAPPA', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_nfw = LensModel(['NFW']) - self.compare_synth(lensmodel_synth,lensmodel_nfw,kwargs_nfw) - #test sersic - kwargs_sersic = [{'k_eff':3, 'R_sersic':1.5, 'n_sersic':3.5}] - kwargs_synthesis = {'target_lens_model': 'SERSIC', - 'component_lens_model': 'GAUSSIAN_ELLIPSE_KAPPA', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_sersic = LensModel(['SERSIC']) - self.compare_synth(lensmodel_synth,lensmodel_sersic,kwargs_sersic) - #test hernquist - kwargs_hernquist = [{'sigma0':10, 'Rs':1.5, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'HERNQUIST', - 'component_lens_model': 'GAUSSIAN_ELLIPSE_KAPPA', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_hernquist = LensModel(['HERNQUIST']) - self.compare_synth(lensmodel_synth,lensmodel_hernquist,kwargs_hernquist) + kwargs_nfw = [ + {"Rs": 1.5, "alpha_Rs": 1, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "NFW", + "component_lens_model": "GAUSSIAN_ELLIPSE_KAPPA", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_nfw = LensModel(["NFW"]) + self.compare_synth(lensmodel_synth, lensmodel_nfw, kwargs_nfw) + # test sersic + kwargs_sersic = [{"k_eff": 3, "R_sersic": 1.5, "n_sersic": 3.5}] + kwargs_synthesis = { + "target_lens_model": "SERSIC", + "component_lens_model": "GAUSSIAN_ELLIPSE_KAPPA", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_sersic = LensModel(["SERSIC"]) + self.compare_synth(lensmodel_synth, lensmodel_sersic, kwargs_sersic) + # test hernquist + kwargs_hernquist = [ + {"sigma0": 10, "Rs": 1.5, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "HERNQUIST", + "component_lens_model": "GAUSSIAN_ELLIPSE_KAPPA", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_hernquist = LensModel(["HERNQUIST"]) + self.compare_synth(lensmodel_synth, lensmodel_hernquist, kwargs_hernquist) - def compare_synth(self,lensmodel_synth,lensmodel_target,kwargs_target): + def compare_synth(self, lensmodel_synth, lensmodel_target, kwargs_target): # check potentials, deflection, kappa synth_pot = lensmodel_synth.potential(self.x_test, self.y_test, kwargs_target) target_pot = lensmodel_target.potential(self.x_test, self.y_test, kwargs_target) @@ -105,33 +129,45 @@ def compare_synth(self,lensmodel_synth,lensmodel_target,kwargs_target): synth_kappa = lensmodel_synth.kappa(self.x_test, self.y_test, kwargs_target) target_kappa = lensmodel_target.kappa(self.x_test, self.y_test, kwargs_target) - npt.assert_allclose(synth_pot, target_pot, rtol=1e-2) #potentials within 1% + npt.assert_allclose(synth_pot, target_pot, rtol=1e-2) # potentials within 1% npt.assert_allclose(synth_defl, target_defl, rtol=1e-2) # deflections within 1% npt.assert_allclose(synth_kappa, target_kappa, rtol=1e-2) # kappa within 1% def test_ellipticity_and_centers(self): - #check that even with ellipticity and offset center, same radial profile - r_test=self.x_test + # check that even with ellipticity and offset center, same radial profile + r_test = self.x_test kwargs_list = [] center_x = 1 center_y = -1 for s in self.s_list: - kwargs_list.append({'a': 1, 's': s, 'e1': 0.3, 'e2': 0, 'center_x': center_x, 'center_y': center_y}) + kwargs_list.append( + { + "a": 1, + "s": s, + "e1": 0.3, + "e2": 0, + "center_x": center_x, + "center_y": center_y, + } + ) # test nfw from CSEs - kwargs_nfw = [{'Rs': 1.5, 'alpha_Rs': 1, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'NFW', - 'component_lens_model': 'CSE', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } - lensmodel_synth = LensModel(['SYNTHESIS'], kwargs_synthesis=kwargs_synthesis) - lensmodel_nfw = LensModel(['NFW']) + kwargs_nfw = [ + {"Rs": 1.5, "alpha_Rs": 1, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "NFW", + "component_lens_model": "CSE", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } + lensmodel_synth = LensModel(["SYNTHESIS"], kwargs_synthesis=kwargs_synthesis) + lensmodel_nfw = LensModel(["NFW"]) LensAn_synth = LensProfileAnalysis(lensmodel_synth) synth_avg_kappa = LensAn_synth.radial_lens_profile(r_test, kwargs_nfw) LensAn_nfw = LensProfileAnalysis(lensmodel_nfw) nfw_avg_kappa = LensAn_nfw.radial_lens_profile(r_test, kwargs_nfw) - npt.assert_allclose(nfw_avg_kappa,synth_avg_kappa, rtol=5e-2) + npt.assert_allclose(nfw_avg_kappa, synth_avg_kappa, rtol=5e-2) def test_set_static_method(self): # test that when set_static is used the result doesn't load the kwargs anymore, instead using the saved weights @@ -139,19 +175,33 @@ def test_set_static_method(self): center_x = 0 center_y = 0 for s in self.s_list: - kwargs_list.append({'a': 1, 's': s, 'e1': 0, 'e2': 0, 'center_x': center_x, 'center_y': center_y}) + kwargs_list.append( + { + "a": 1, + "s": s, + "e1": 0, + "e2": 0, + "center_x": center_x, + "center_y": center_y, + } + ) # test nfw - kwargs_nfw = [{'Rs': 1.5, 'alpha_Rs': 1, 'center_x': center_x, 'center_y': center_y}] - kwargs_synthesis = {'target_lens_model': 'NFW', - 'component_lens_model': 'CSE', - 'kwargs_list': kwargs_list, - 'lin_fit_hyperparams': self.lin_fit_hyperparams - } + kwargs_nfw = [ + {"Rs": 1.5, "alpha_Rs": 1, "center_x": center_x, "center_y": center_y} + ] + kwargs_synthesis = { + "target_lens_model": "NFW", + "component_lens_model": "CSE", + "kwargs_list": kwargs_list, + "lin_fit_hyperparams": self.lin_fit_hyperparams, + } synth_prof = SynthesisProfile(**kwargs_synthesis) synth_func = synth_prof.function(self.x_test, self.y_test, **kwargs_nfw[0]) - linear_weights=synth_prof.linear_weight_mle_fit(kwargs_nfw,kwargs_list) + linear_weights = synth_prof.linear_weight_mle_fit(kwargs_nfw, kwargs_list) synth_prof.set_static(linear_weights) - kwargs_nfw[0]['Rs']=10 #change kwargs, doesn't affect static weights - changed_synth_func = synth_prof.function(self.x_test, self.y_test, **kwargs_nfw[0]) - npt.assert_array_equal(synth_func,changed_synth_func) \ No newline at end of file + kwargs_nfw[0]["Rs"] = 10 # change kwargs, doesn't affect static weights + changed_synth_func = synth_prof.function( + self.x_test, self.y_test, **kwargs_nfw[0] + ) + npt.assert_array_equal(synth_func, changed_synth_func) diff --git a/test/test_LensModel/test_Profiles/test_tnfw.py b/test/test_LensModel/test_Profiles/test_tnfw.py index 094439fb7..638538c8f 100644 --- a/test/test_LensModel/test_Profiles/test_tnfw.py +++ b/test/test_LensModel/test_Profiles/test_tnfw.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.tnfw import TNFW @@ -10,7 +10,6 @@ class TestTNFW(object): - def setup_method(self): self.nfw = NFW() self.tnfw = TNFW() @@ -20,7 +19,7 @@ def test_deflection(self): alpha_Rs = 0.1 r_trunc = 1000000000000 * Rs x = np.linspace(0.0 * Rs, 5 * Rs, 1000) - y = np.linspace(0., 1, 1000) + y = np.linspace(0.0, 1, 1000) xdef_t, ydef_t = self.tnfw.derivatives(x, y, Rs, alpha_Rs, r_trunc) xdef, ydef = self.nfw.derivatives(x, y, Rs, alpha_Rs) @@ -29,7 +28,6 @@ def test_deflection(self): np.testing.assert_almost_equal(ydef_t, ydef, 5) def test_potential_limit(self): - Rs = 0.2 alpha_Rs = 0.1 r_trunc = 1000000000000 * Rs @@ -42,21 +40,20 @@ def test_potential_limit(self): np.testing.assert_almost_equal(pot, pot_t, 4) def test_potential_derivative(self): - Rs = 1.2 alpha_Rs = 1 - r_trunc = 3*Rs + r_trunc = 3 * Rs R = np.linspace(0.5 * Rs, 2.2 * Rs, 5000) dx = R[1] - R[0] - + alpha_tnfw = self.tnfw.nfwAlpha(R, Rs, 1, r_trunc, R, 0)[0] - + potential_array = self.tnfw.nfwPot(R, Rs, 1, r_trunc) alpha_tnfw_num_array = np.gradient(potential_array, dx) - + potential_from_float = [self.tnfw.nfwPot(R_i, Rs, 1, r_trunc) for R_i in R] alpha_tnfw_num_from_float = np.gradient(potential_from_float, dx) - + npt.assert_almost_equal(alpha_tnfw_num_array, alpha_tnfw, 4) npt.assert_almost_equal(alpha_tnfw_num_from_float, alpha_tnfw, 4) @@ -67,8 +64,10 @@ def test_gamma(self): x = np.linspace(0.1 * Rs, 5 * Rs, 1000) y = np.linspace(0.2, 1, 1000) - g1t, g2t = self.tnfw.nfwGamma((x ** 2 + y ** 2) ** .5, Rs, alpha_Rs, r_trunc, x, y) - g1, g2 = self.nfw.nfwGamma((x ** 2 + y ** 2) ** .5, Rs, alpha_Rs, x, y) + g1t, g2t = self.tnfw.nfwGamma( + (x**2 + y**2) ** 0.5, Rs, alpha_Rs, r_trunc, x, y + ) + g1, g2 = self.nfw.nfwGamma((x**2 + y**2) ** 0.5, Rs, alpha_Rs, x, y) np.testing.assert_almost_equal(g1t, g1, 5) np.testing.assert_almost_equal(g2t, g2, 5) @@ -91,7 +90,9 @@ def test_hessian(self): Rs = 0.2 r_trunc = 5 xxt, xyt, yxt, yyt = self.tnfw.hessian(Rs, 0, Rs, alpha_Rs, r_trunc) - xxt_delta, xyt_delta, yxt_delta, yyt_delta = self.tnfw.hessian(Rs+0.000001, 0, Rs, alpha_Rs, r_trunc) + xxt_delta, xyt_delta, yxt_delta, yyt_delta = self.tnfw.hessian( + Rs + 0.000001, 0, Rs, alpha_Rs, r_trunc + ) npt.assert_almost_equal(xxt, xxt_delta, decimal=6) def test_density_2d(self): @@ -106,7 +107,6 @@ def test_density_2d(self): np.testing.assert_almost_equal(kappa, kappa_t, 5) def test_transform(self): - rho0, Rs = 1, 2 trs = self.tnfw.rho02alpha(rho0, Rs) @@ -115,7 +115,6 @@ def test_transform(self): npt.assert_almost_equal(rho0, rho_out) def test_numerical_derivatives(self): - Rs = 0.2 alpha_Rs = 0.1 r_trunc = 1.5 * Rs @@ -124,22 +123,23 @@ def test_numerical_derivatives(self): x0, y0 = 0.1, 0.1 - x_def_t, y_def_t = self.tnfw.derivatives(x0,y0,Rs,alpha_Rs,r_trunc) - x_def_t_deltax, _ = self.tnfw.derivatives(x0+diff, y0, Rs, alpha_Rs,r_trunc) - x_def_t_deltay, y_def_t_deltay = self.tnfw.derivatives(x0, y0 + diff, Rs, alpha_Rs,r_trunc) - actual = self.tnfw.hessian(x0,y0,Rs,alpha_Rs,r_trunc) + x_def_t, y_def_t = self.tnfw.derivatives(x0, y0, Rs, alpha_Rs, r_trunc) + x_def_t_deltax, _ = self.tnfw.derivatives(x0 + diff, y0, Rs, alpha_Rs, r_trunc) + x_def_t_deltay, y_def_t_deltay = self.tnfw.derivatives( + x0, y0 + diff, Rs, alpha_Rs, r_trunc + ) + actual = self.tnfw.hessian(x0, y0, Rs, alpha_Rs, r_trunc) - f_xx_approx = (x_def_t_deltax - x_def_t) * diff ** -1 - f_yy_approx = (y_def_t_deltay - y_def_t) * diff ** -1 - f_xy_approx = (x_def_t_deltay - y_def_t) * diff ** -1 + f_xx_approx = (x_def_t_deltax - x_def_t) * diff**-1 + f_yy_approx = (y_def_t_deltay - y_def_t) * diff**-1 + f_xy_approx = (x_def_t_deltay - y_def_t) * diff**-1 numerical = [f_xx_approx, f_xy_approx, f_xy_approx, f_yy_approx] - for (approx,true) in zip(numerical,actual): - npt.assert_almost_equal(approx,true) + for approx, true in zip(numerical, actual): + npt.assert_almost_equal(approx, true) def test_F_function_at_one(self): - - f_tnfw = self.tnfw.F(1.) + f_tnfw = self.tnfw.F(1.0) npt.assert_(f_tnfw == 1) f_tnfw = self.tnfw.F(np.ones((2, 2))) f_tnfw = f_tnfw.ravel() @@ -152,9 +152,9 @@ def test_F_function_at_zero(self): def test__cos_function(self): # test private _cos_function function for raise - x = 3 +6j + x = 3 + 6j npt.assert_raises(Exception, self.tnfw._cos_function, x) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_tnfw_ellipse.py b/test/test_LensModel/test_Profiles/test_tnfw_ellipse.py index 63547a1d3..1b436f590 100644 --- a/test/test_LensModel/test_Profiles/test_tnfw_ellipse.py +++ b/test/test_LensModel/test_Profiles/test_tnfw_ellipse.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LensModel.Profiles.tnfw import TNFW @@ -11,9 +11,8 @@ class TestNFWELLIPSE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.tnfw = TNFW() self.nfw_e = NFW_ELLIPSE() @@ -23,15 +22,27 @@ def test_function(self): x = np.linspace(start=0.1, stop=10, num=10) y = np.linspace(start=0.1, stop=10, num=10) # test round case against TNFW - kwargs_tnfw_e_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5, 'e1': 0., 'e2': 0} - kwargs_tnfw_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5} + kwargs_tnfw_e_round = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 5, + "e1": 0.0, + "e2": 0, + } + kwargs_tnfw_round = {"Rs": 1, "alpha_Rs": 0.1, "r_trunc": 5} f_e = self.tnfw_e.function(x, y, **kwargs_tnfw_e_round) f_r = self.tnfw.function(x, y, **kwargs_tnfw_round) npt.assert_almost_equal(f_e, f_r, decimal=5) # test elliptical case with r_trunc -> infinity against NFW_ELLIPSE - kwargs_tnfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 500, 'e1': 0.2, 'e2': -0.01} - kwargs_nfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'e1': 0.2, 'e2': -0.01} + kwargs_tnfw_e = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 500, + "e1": 0.2, + "e2": -0.01, + } + kwargs_nfw_e = {"Rs": 1, "alpha_Rs": 0.1, "e1": 0.2, "e2": -0.01} f_te = self.tnfw_e.function(x, y, **kwargs_tnfw_e) f_e = self.nfw_e.function(x, y, **kwargs_nfw_e) npt.assert_almost_equal(f_te, f_e, decimal=3) @@ -40,16 +51,28 @@ def test_derivatives(self): x = np.linspace(start=0.1, stop=10, num=10) y = np.linspace(start=0.1, stop=10, num=10) # test round case against TNFW - kwargs_tnfw_e_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5, 'e1': 0., 'e2': 0} - kwargs_tnfw_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5} + kwargs_tnfw_e_round = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 5, + "e1": 0.0, + "e2": 0, + } + kwargs_tnfw_round = {"Rs": 1, "alpha_Rs": 0.1, "r_trunc": 5} f_xe, f_ye = self.tnfw_e.derivatives(x, y, **kwargs_tnfw_e_round) f_xr, f_yr = self.tnfw.derivatives(x, y, **kwargs_tnfw_round) npt.assert_almost_equal(f_xe, f_xr, decimal=5) npt.assert_almost_equal(f_ye, f_yr, decimal=5) # test elliptical case with r_trunc -> infinity against NFW_ELLIPSE - kwargs_tnfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 500, 'e1': 0.2, 'e2': -0.01} - kwargs_nfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'e1': 0.2, 'e2': -0.01} + kwargs_tnfw_e = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 500, + "e1": 0.2, + "e2": -0.01, + } + kwargs_nfw_e = {"Rs": 1, "alpha_Rs": 0.1, "e1": 0.2, "e2": -0.01} out_te = self.tnfw_e.derivatives(x, y, **kwargs_tnfw_e) out_e = self.nfw_e.derivatives(x, y, **kwargs_nfw_e) npt.assert_almost_equal(out_te, out_e, decimal=3) @@ -58,29 +81,53 @@ def test_hessian(self): x = np.linspace(start=0.1, stop=10, num=10) y = np.linspace(start=0.1, stop=10, num=10) # test round case against TNFW - kwargs_tnfw_e_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5, 'e1': 0., 'e2': 0} - kwargs_tnfw_round = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5} + kwargs_tnfw_e_round = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 5, + "e1": 0.0, + "e2": 0, + } + kwargs_tnfw_round = {"Rs": 1, "alpha_Rs": 0.1, "r_trunc": 5} out_e = self.tnfw_e.hessian(x, y, **kwargs_tnfw_e_round) out_r = self.tnfw.hessian(x, y, **kwargs_tnfw_round) npt.assert_almost_equal(out_e, out_r, decimal=4) # test elliptical case with r_trunc -> infinity against NFW_ELLIPSE - kwargs_tnfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 500, 'e1': 0.2, 'e2': -0.01} - kwargs_nfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'e1': 0.2, 'e2': -0.01} + kwargs_tnfw_e = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 500, + "e1": 0.2, + "e2": -0.01, + } + kwargs_nfw_e = {"Rs": 1, "alpha_Rs": 0.1, "e1": 0.2, "e2": -0.01} out_te = self.tnfw_e.hessian(x, y, **kwargs_tnfw_e) out_e = self.nfw_e.hessian(x, y, **kwargs_nfw_e) npt.assert_almost_equal(out_te, out_e, decimal=3) def test_mass_3d_lens(self): with npt.assert_raises(ValueError): - kwargs_tnfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5, 'e1': 0.1, 'e2': -0.02} + kwargs_tnfw_e = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 5, + "e1": 0.1, + "e2": -0.02, + } self.tnfw_e.mass_3d_lens(1, **kwargs_tnfw_e) def test_density_lens(self): with npt.assert_raises(ValueError): - kwargs_tnfw_e = {'Rs': 1, 'alpha_Rs': 0.1, 'r_trunc': 5, 'e1': 0.1, 'e2': -0.02} + kwargs_tnfw_e = { + "Rs": 1, + "alpha_Rs": 0.1, + "r_trunc": 5, + "e1": 0.1, + "e2": -0.02, + } self.tnfw_e.density_lens(1, **kwargs_tnfw_e) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Profiles/test_uldm.py b/test/test_LensModel/test_Profiles/test_uldm.py index 0c2524496..912d8b5fe 100644 --- a/test/test_LensModel/test_Profiles/test_uldm.py +++ b/test/test_LensModel/test_Profiles/test_uldm.py @@ -1,4 +1,4 @@ -__author__ = 'lucateo' +__author__ = "lucateo" from lenstronomy.LensModel.Profiles.uldm import Uldm @@ -8,9 +8,8 @@ class TestUldm(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.model = Uldm() @@ -31,19 +30,19 @@ def test_function(self): theta_c_large = 10 f_reference = self.model.function(0, 0, kappa_0, theta_c_large, 0, 0, slope) f_large = self.model.function(r, 0, kappa_0, theta_c_large, 0, 0, slope) - f_MSD = 0.5* kappa_0 * r**2 + f_MSD = 0.5 * kappa_0 * r**2 npt.assert_almost_equal(f_large - f_reference, f_MSD, decimal=3) def test_derivatives(self): x = 0.5 y = 0.8 r = np.sqrt(x**2 + y**2) - kappa_0, theta_c = 0.2, 9 # Trying MSD limit + kappa_0, theta_c = 0.2, 9 # Trying MSD limit slope = 6.5 - f_x, f_y = self.model.derivatives( x, y, kappa_0, theta_c, 0, 0, slope) + f_x, f_y = self.model.derivatives(x, y, kappa_0, theta_c, 0, 0, slope) alpha_MSD = kappa_0 * r - npt.assert_almost_equal(f_x, alpha_MSD * x/r, decimal=3) - npt.assert_almost_equal(f_y, alpha_MSD * y/r, decimal=3) + npt.assert_almost_equal(f_x, alpha_MSD * x / r, decimal=3) + npt.assert_almost_equal(f_y, alpha_MSD * y / r, decimal=3) def test_hessian(self): x = np.linspace(start=0.01, stop=100, num=100) @@ -53,14 +52,14 @@ def test_hessian(self): theta_c = 6 slope = 5.5 f_xx, f_xy, f_yx, f_yy = self.model.hessian(x, y, kappa_0, theta_c, 0, 0, slope) - kappa = 1./2 * (f_xx + f_yy) + kappa = 1.0 / 2 * (f_xx + f_yy) kappa_direct = self.model.kappa_r(r, kappa_0, theta_c, slope) npt.assert_almost_equal(kappa, kappa_direct, decimal=5) def test_mass_3d(self): x = np.array([1, 3, 4]) y = np.array([2, 1, 1]) - r = np.sqrt(x ** 2 + y ** 2) + r = np.sqrt(x**2 + y**2) kappa_0 = 0.1 theta_c = 7 slope = 4.5 @@ -69,6 +68,5 @@ def test_mass_3d(self): npt.assert_almost_equal(m3d, m3d_lens, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() - diff --git a/test/test_LensModel/test_Solver/test_lens_equation_solver.py b/test/test_LensModel/test_Solver/test_lens_equation_solver.py index 01258fffb..546969557 100644 --- a/test/test_LensModel/test_Solver/test_lens_equation_solver.py +++ b/test/test_LensModel/test_Solver/test_lens_equation_solver.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -9,7 +9,6 @@ class TestLensEquationSolver(object): - def setup_method(self): """ @@ -18,7 +17,7 @@ def setup_method(self): pass def test_spep_sis(self): - lens_model_list = ['SPEP', 'SIS'] + lens_model_list = ["SPEP", "SIS"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.1 @@ -26,50 +25,112 @@ def test_spep_sis(self): min_distance = 0.05 search_window = 10 gamma = 1.9 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.2, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, min_distance=min_distance, search_window=search_window, precision_limit=10**(-10), num_iter_max=10) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.2, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) def test_nfw(self): - lens_model_list = ['NFW_ELLIPSE', 'SIS'] + lens_model_list = ["NFW_ELLIPSE", "SIS"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.1 sourcePos_y = -0.1 min_distance = 0.05 search_window = 10 - Rs = 4. - kwargs_lens = [{'alpha_Rs': 1., 'Rs': Rs, 'e1': 0.2, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 1, 'center_x': 0, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=min_distance, - search_window=search_window, - precision_limit=10**(-10), num_iter_max=10, - verbose=True, magnification_limit=1) + Rs = 4.0 + kwargs_lens = [ + { + "alpha_Rs": 1.0, + "Rs": Rs, + "e1": 0.2, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 1, "center_x": 0, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + verbose=True, + magnification_limit=1, + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) def test_multiplane(self): - lens_model_list = ['SPEP', 'SIS'] - lensModel = LensModel(lens_model_list, z_source=1., lens_redshift_list=[0.5, 0.3], multi_plane=True) + lens_model_list = ["SPEP", "SIS"] + lensModel = LensModel( + lens_model_list, + z_source=1.0, + lens_redshift_list=[0.5, 0.3], + multi_plane=True, + ) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.1 sourcePos_y = -0.1 min_distance = 0.05 search_window = 10 gamma = 1.9 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.2, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, min_distance=min_distance, search_window=search_window, precision_limit=10**(-10), num_iter_max=10) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.2, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) def test_central_image(self): - lens_model_list = ['SPEP', 'SIS', 'SHEAR'] - kwargs_spep = {'theta_E': 1, 'gamma': 2, 'e1': 0.2, 'e2': -0.03, 'center_x': 0, 'center_y': 0} - kwargs_sis = {'theta_E': 1, 'center_x': 1.5, 'center_y': 0} - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0} + lens_model_list = ["SPEP", "SIS", "SHEAR"] + kwargs_spep = { + "theta_E": 1, + "gamma": 2, + "e1": 0.2, + "e2": -0.03, + "center_x": 0, + "center_y": 0, + } + kwargs_sis = {"theta_E": 1, "center_x": 1.5, "center_y": 0} + kwargs_shear = {"gamma1": 0.01, "gamma2": 0} kwargs_lens = [kwargs_spep, kwargs_sis, kwargs_shear] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) @@ -77,17 +138,22 @@ def test_central_image(self): sourcePos_y = -0.1 min_distance = 0.05 search_window = 10 - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=min_distance, - search_window=search_window, - precision_limit=10 ** (-10), num_iter_max=10) + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) print(x_pos, y_pos) assert len(x_pos) == 4 def test_example(self): - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) @@ -95,32 +161,60 @@ def test_example(self): sourcePos_y = 0.0 min_distance = 0.05 search_window = 10 - gamma = 2. + gamma = 2.0 gamma1, gamma2 = -0.04, -0.1 - kwargs_shear = {'gamma1': gamma1, 'gamma2': gamma2} # shear values to the source plane - kwargs_spemd = {'theta_E': 1., 'gamma': gamma, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.01, - 'e2': 0.05} # parameters of the deflector lens model + kwargs_shear = { + "gamma1": gamma1, + "gamma2": gamma2, + } # shear values to the source plane + kwargs_spemd = { + "theta_E": 1.0, + "gamma": gamma, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.01, + "e2": 0.05, + } # parameters of the deflector lens model kwargs_lens = [kwargs_spemd, kwargs_shear] - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=min_distance, - search_window=search_window, - precision_limit=10 ** (-10), num_iter_max=10, - arrival_time_sort=True) - - x_pos_non_linear, y_pos_non_linear = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - min_distance=min_distance, - search_window=search_window, - precision_limit=10 ** (-10), num_iter_max=10, - arrival_time_sort=True, non_linear=True) - - x_pos_stoch, y_pos_stoch = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - solver='stochastic', - search_window=search_window, - precision_limit=10 ** (-10), - arrival_time_sort=True, x_center=0, - y_center=0, num_random=100, - ) + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + arrival_time_sort=True, + ) + + ( + x_pos_non_linear, + y_pos_non_linear, + ) = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + min_distance=min_distance, + search_window=search_window, + precision_limit=10 ** (-10), + num_iter_max=10, + arrival_time_sort=True, + non_linear=True, + ) + + x_pos_stoch, y_pos_stoch = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + solver="stochastic", + search_window=search_window, + precision_limit=10 ** (-10), + arrival_time_sort=True, + x_center=0, + y_center=0, + num_random=100, + ) assert len(x_pos) == 4 assert len(x_pos_stoch) == 4 assert len(x_pos_non_linear) == 4 @@ -128,52 +222,90 @@ def test_example(self): npt.assert_almost_equal(x_pos, x_pos_non_linear, decimal=5) def test_analytical_lens_equation_solver(self): - lensModel = LensModel(['EPL_NUMBA', 'SHEAR']) + lensModel = LensModel(["EPL_NUMBA", "SHEAR"]) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.03 sourcePos_y = 0.0 - kwargs_lens = [{'theta_E': 1., 'gamma': 2.2, 'center_x': 0.01, 'center_y': 0.02, 'e1': 0.01, 'e2': 0.05}, - {'gamma1': -0.04, 'gamma2': -0.1, 'ra_0': 0.01, 'dec_0': 0.02}] - - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, solver='analytical') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.2, + "center_x": 0.01, + "center_y": 0.02, + "e1": 0.01, + "e2": 0.05, + }, + {"gamma1": -0.04, "gamma2": -0.1, "ra_0": 0.01, "dec_0": 0.02}, + ] + + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, sourcePos_y, kwargs_lens, solver="analytical" + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) assert len(source_x) == len(source_y) >= 4 npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) npt.assert_almost_equal(sourcePos_y, source_y, decimal=10) - x_pos_ls, y_pos_ls = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, solver='analytical') - for x, y in zip(x_pos_ls, y_pos_ls): # Check if it found all solutions lenstronomy found - assert np.sqrt((x-x_pos)**2+(y-y_pos)**2).min() < 1e-8 + x_pos_ls, y_pos_ls = lensEquationSolver.image_position_from_source( + sourcePos_x, sourcePos_y, kwargs_lens, solver="analytical" + ) + for x, y in zip( + x_pos_ls, y_pos_ls + ): # Check if it found all solutions lenstronomy found + assert np.sqrt((x - x_pos) ** 2 + (y - y_pos) ** 2).min() < 1e-8 # here we test with shear and mass profile centroids not aligned - lensModel = LensModel(['EPL_NUMBA', 'SHEAR']) + lensModel = LensModel(["EPL_NUMBA", "SHEAR"]) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.03 sourcePos_y = 0.0 - kwargs_lens = [{'theta_E': 1., 'gamma': 2.2, 'center_x': 0.01, 'center_y': 0.02, 'e1': 0.01, 'e2': 0.05}, - {'gamma1': -0.04, 'gamma2': -0.1, 'ra_0': 0.0, 'dec_0': 1.0}] - - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - solver='analytical') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.2, + "center_x": 0.01, + "center_y": 0.02, + "e1": 0.01, + "e2": 0.05, + }, + {"gamma1": -0.04, "gamma2": -0.1, "ra_0": 0.0, "dec_0": 1.0}, + ] + + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, sourcePos_y, kwargs_lens, solver="analytical" + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) assert len(source_x) == len(source_y) >= 2 npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) npt.assert_almost_equal(sourcePos_y, source_y, decimal=10) def test_caustics(self): - lm = LensModel(['EPL_NUMBA', 'SHEAR']) + lm = LensModel(["EPL_NUMBA", "SHEAR"]) leqs = LensEquationSolver(lm) - kwargs = [{'theta_E': 1., 'e1': 0.5, 'e2': 0.1, 'center_x': 0.0, 'center_y': 0.0, 'gamma': 1.9}, - {'gamma1': 0.03, 'gamma2': 0.01, 'ra_0': 0.0, 'dec_0': 0.0}] + kwargs = [ + { + "theta_E": 1.0, + "e1": 0.5, + "e2": 0.1, + "center_x": 0.0, + "center_y": 0.0, + "gamma": 1.9, + }, + {"gamma1": 0.03, "gamma2": 0.01, "ra_0": 0.0, "dec_0": 0.0}, + ] # Calculate the caustics and a few critical curves. - caus = caustics_epl_shear(kwargs, return_which='caustic') - lensplane_caus = caustics_epl_shear(kwargs, return_which='caustic', sourceplane=False) - cut = caustics_epl_shear(kwargs, return_which='cut') - lensplane_cut = caustics_epl_shear(kwargs, return_which='cut', sourceplane=False) - twoimg = caustics_epl_shear(kwargs, return_which='double') - fourimg = caustics_epl_shear(kwargs, return_which='quad') + caus = caustics_epl_shear(kwargs, return_which="caustic") + lensplane_caus = caustics_epl_shear( + kwargs, return_which="caustic", sourceplane=False + ) + cut = caustics_epl_shear(kwargs, return_which="cut") + lensplane_cut = caustics_epl_shear( + kwargs, return_which="cut", sourceplane=False + ) + twoimg = caustics_epl_shear(kwargs, return_which="double") + fourimg = caustics_epl_shear(kwargs, return_which="quad") min_mag = np.abs(lm.magnification(*lensplane_caus, kwargs)).min() assert min_mag > 1e12 assert np.abs(lm.magnification(*lensplane_cut, kwargs)).min() > 1e12 @@ -181,10 +313,12 @@ def test_caustics(self): # Test whether the caustics indeed the number of images they say N = 20 xpl, ypl = np.linspace(-1, 1, N), np.linspace(-1, 1, N) - xgr, ygr = np.meshgrid(xpl, ypl, indexing='ij') + xgr, ygr = np.meshgrid(xpl, ypl, indexing="ij") xf, yf = xgr.flatten(), ygr.flatten() - sols = [leqs.image_position_from_source(x, y , kwargs, solver='analytical') - for x, y in zip(xf, yf)] + sols = [ + leqs.image_position_from_source(x, y, kwargs, solver="analytical") + for x, y in zip(xf, yf) + ] numsols = np.array([len(p[0]) for p in sols]) from matplotlib.path import Path @@ -203,32 +337,61 @@ def test_analytical_sie(self): sourcePos_x = 0.03 sourcePos_y = 0.0 - lensModel = LensModel(['SIE']) + lensModel = LensModel(["SIE"]) lensEquationSolver = LensEquationSolver(lensModel) - kwargs_lens = [{'theta_E': 1., 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.5, 'e2': 0.05}, ] - - x_pos, y_pos = lensEquationSolver.image_position_from_source(sourcePos_x, sourcePos_y, kwargs_lens, - solver='analytical', magnification_limit=1e-3) + kwargs_lens = [ + {"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0, "e1": 0.5, "e2": 0.05}, + ] + + x_pos, y_pos = lensEquationSolver.image_position_from_source( + sourcePos_x, + sourcePos_y, + kwargs_lens, + solver="analytical", + magnification_limit=1e-3, + ) source_x, source_y = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) assert len(source_x) == len(source_y) == 4 npt.assert_almost_equal(sourcePos_x, source_x, decimal=10) npt.assert_almost_equal(sourcePos_y, source_y, decimal=10) def test_assertions(self): - lensModel = LensModel(['SPEP']) + lensModel = LensModel(["SPEP"]) lensEquationSolver = LensEquationSolver(lensModel) - kwargs_lens = [{'theta_E': 1, 'gamma': 2, 'e1': 0.2, 'e2': -0.03, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + { + "theta_E": 1, + "gamma": 2, + "e1": 0.2, + "e2": -0.03, + "center_x": 0, + "center_y": 0, + } + ] with pytest.raises(ValueError): - lensEquationSolver.image_position_from_source(0.1, 0., kwargs_lens, solver='analytical') + lensEquationSolver.image_position_from_source( + 0.1, 0.0, kwargs_lens, solver="analytical" + ) - lensModel = LensModel(['EPL_NUMBA', 'SHEAR']) + lensModel = LensModel(["EPL_NUMBA", "SHEAR"]) lensEquationSolver = LensEquationSolver(lensModel) - kwargs_lens = [{'theta_E': 1., 'gamma': 2.2, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.01, 'e2': 0.05}, - {'gamma1': -0.04, 'gamma2': -0.1, 'ra_0': 0.0, 'dec_0': 0.0}] + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.2, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.01, + "e2": 0.05, + }, + {"gamma1": -0.04, "gamma2": -0.1, "ra_0": 0.0, "dec_0": 0.0}, + ] with pytest.raises(ValueError): - lensEquationSolver.image_position_from_source(0.1, 0., kwargs_lens, solver='nonexisting') + lensEquationSolver.image_position_from_source( + 0.1, 0.0, kwargs_lens, solver="nonexisting" + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Solver/test_solver.py b/test/test_LensModel/test_Solver/test_solver.py index 912e9eaf3..49a9c24be 100644 --- a/test/test_LensModel/test_Solver/test_solver.py +++ b/test/test_LensModel/test_Solver/test_solver.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -10,7 +10,6 @@ class TestSolver4Point(object): - def setup_method(self): """ @@ -19,9 +18,9 @@ def setup_method(self): pass def test_constraint_lensmodel(self): - lens_model_list = ['SPEP', 'SIS'] + lens_model_list = ["SPEP", "SIS"] lensModel = LensModel(lens_model_list) - solver = Solver(solver_type='PROFILE', lensModel=lensModel, num_images=4) + solver = Solver(solver_type="PROFILE", lensModel=lensModel, num_images=4) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.1 @@ -31,65 +30,146 @@ def test_constraint_lensmodel(self): gamma = 1.9 phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=deltapix, search_window=numPix*deltapix) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0}, {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) - - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], 1., decimal=3) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + + npt.assert_almost_equal( + kwargs_lens_new[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) + + npt.assert_almost_equal(kwargs_lens_new[0]["theta_E"], 1.0, decimal=3) lensModel = LensModel(lens_model_list=lens_model_list) - x_source_new, y_source_new = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) - dist = np.sqrt((x_source_new - x_source_new[0]) ** 2 + (y_source_new - y_source_new[0]) ** 2) + x_source_new, y_source_new = lensModel.ray_shooting( + x_pos, y_pos, kwargs_lens_new + ) + dist = np.sqrt( + (x_source_new - x_source_new[0]) ** 2 + + (y_source_new - y_source_new[0]) ** 2 + ) assert np.max(dist) < 0.000001 - kwargs_ps4 = [{'ra_image': x_pos, 'dec_image': y_pos}] + kwargs_ps4 = [{"ra_image": x_pos, "dec_image": y_pos}] kwargs_lens_new = solver.update_solver(kwargs_lens_init, x_pos, y_pos) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) + npt.assert_almost_equal( + kwargs_lens_new[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) def test_add_fixed_lens(self): - lens_model_list = ['SPEP', 'SHEAR_GAMMA_PSI'] + lens_model_list = ["SPEP", "SHEAR_GAMMA_PSI"] lensModel = LensModel(lens_model_list) phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens_init = [{'theta_E': 1., 'gamma': 2, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma_ext': 0.1, 'psi_ext': 0.5}] + kwargs_lens_init = [ + { + "theta_E": 1.0, + "gamma": 2, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma_ext": 0.1, "psi_ext": 0.5}, + ] kwargs_fixed_lens_list = [{}, {}] - solver = Solver(solver_type='PROFILE', lensModel=lensModel, num_images=4) - kwargs_fixed_lens = solver.add_fixed_lens(kwargs_fixed_lens_list, kwargs_lens_init) - assert kwargs_fixed_lens[0]['theta_E'] == kwargs_lens_init[0]['theta_E'] - - solver = Solver(solver_type='CENTER', lensModel=lensModel, num_images=2) - kwargs_fixed_lens = solver.add_fixed_lens(kwargs_fixed_lens_list, kwargs_lens_init) - assert kwargs_fixed_lens[0]['center_x'] == kwargs_lens_init[0]['center_x'] - - solver = Solver(solver_type='ELLIPSE', lensModel=lensModel, num_images=2) - kwargs_fixed_lens = solver.add_fixed_lens(kwargs_fixed_lens_list, kwargs_lens_init) - assert kwargs_fixed_lens[0]['e1'] == kwargs_lens_init[0]['e1'] - - solver = Solver(solver_type='PROFILE_SHEAR', lensModel=lensModel, num_images=4) - kwargs_fixed_lens = solver.add_fixed_lens(kwargs_fixed_lens_list, kwargs_lens_init) - assert kwargs_fixed_lens[0]['center_x'] == kwargs_lens_init[0]['center_x'] - - lens_model_list = ['NFW_ELLIPSE', 'SHEAR'] + solver = Solver(solver_type="PROFILE", lensModel=lensModel, num_images=4) + kwargs_fixed_lens = solver.add_fixed_lens( + kwargs_fixed_lens_list, kwargs_lens_init + ) + assert kwargs_fixed_lens[0]["theta_E"] == kwargs_lens_init[0]["theta_E"] + + solver = Solver(solver_type="CENTER", lensModel=lensModel, num_images=2) + kwargs_fixed_lens = solver.add_fixed_lens( + kwargs_fixed_lens_list, kwargs_lens_init + ) + assert kwargs_fixed_lens[0]["center_x"] == kwargs_lens_init[0]["center_x"] + + solver = Solver(solver_type="ELLIPSE", lensModel=lensModel, num_images=2) + kwargs_fixed_lens = solver.add_fixed_lens( + kwargs_fixed_lens_list, kwargs_lens_init + ) + assert kwargs_fixed_lens[0]["e1"] == kwargs_lens_init[0]["e1"] + + solver = Solver(solver_type="PROFILE_SHEAR", lensModel=lensModel, num_images=4) + kwargs_fixed_lens = solver.add_fixed_lens( + kwargs_fixed_lens_list, kwargs_lens_init + ) + assert kwargs_fixed_lens[0]["center_x"] == kwargs_lens_init[0]["center_x"] + + lens_model_list = ["NFW_ELLIPSE", "SHEAR"] lensModel = LensModel(lens_model_list) - kwargs_lens_init = [{'alpha_Rs': 1., 'Rs': 4, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'e1': 0.1, 'e2': 0.5}] + kwargs_lens_init = [ + { + "alpha_Rs": 1.0, + "Rs": 4, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"e1": 0.1, "e2": 0.5}, + ] kwargs_fixed_lens_list = [{}, {}] - solver = Solver(solver_type='PROFILE', lensModel=lensModel, num_images=4) - kwargs_fixed_lens = solver.add_fixed_lens(kwargs_fixed_lens_list, kwargs_lens_init) - assert kwargs_fixed_lens[0]['alpha_Rs'] == kwargs_lens_init[0]['alpha_Rs'] + solver = Solver(solver_type="PROFILE", lensModel=lensModel, num_images=4) + kwargs_fixed_lens = solver.add_fixed_lens( + kwargs_fixed_lens_list, kwargs_lens_init + ) + assert kwargs_fixed_lens[0]["alpha_Rs"] == kwargs_lens_init[0]["alpha_Rs"] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Solver/test_solver2.py b/test/test_LensModel/test_Solver/test_solver2.py index 187e8f250..946333502 100644 --- a/test/test_LensModel/test_Solver/test_solver2.py +++ b/test/test_LensModel/test_Solver/test_solver2.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -12,15 +12,14 @@ class TestSolver(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): pass def test_subtract(self): - lensModel = LensModel(['SPEP']) - solver_spep_center = Solver2Point(lensModel, solver_type='CENTER') + lensModel = LensModel(["SPEP"]) + solver_spep_center = Solver2Point(lensModel, solver_type="CENTER") x_cat = np.array([0, 0]) y_cat = np.array([1, 2]) a = solver_spep_center._subtract_constraint(x_cat, y_cat) @@ -28,41 +27,89 @@ def test_subtract(self): assert a[1] == 1 def test_all_spep(self): - lensModel = LensModel(['SPEP']) - solver_spep_center = Solver2Point(lensModel, solver_type='CENTER') - solver_spep_ellipse = Solver2Point(lensModel, solver_type='ELLIPSE') + lensModel = LensModel(["SPEP"]) + solver_spep_center = Solver2Point(lensModel, solver_type="CENTER") + solver_spep_ellipse = Solver2Point(lensModel, solver_type="ELLIPSE") image_position_spep = LensEquationSolver(lensModel) sourcePos_x = 0.1 sourcePos_y = 0.03 gamma = 1.9 phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': 1, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}] - x_pos, y_pos = image_position_spep.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, min_distance=0.01, search_window=5, precision_limit=10**(-10), num_iter_max=10) - print(x_pos, y_pos, 'test') + kwargs_lens = [ + { + "theta_E": 1, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + } + ] + x_pos, y_pos = image_position_spep.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=0.01, + search_window=5, + precision_limit=10 ** (-10), + num_iter_max=10, + ) + print(x_pos, y_pos, "test") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'theta_E': 1, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - kwargs_out_center, precision = solver_spep_center.constraint_lensmodel(x_pos, y_pos, kwargs_init) - - kwargs_init = [{'theta_E': 1, 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0.1, 'center_y': -0.1}] - kwargs_out_ellipse, precision = solver_spep_ellipse.constraint_lensmodel(x_pos, y_pos, kwargs_init) - - npt.assert_almost_equal(kwargs_out_center[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], -0.1, decimal=3) - - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], e1, decimal=3) + kwargs_init = [ + { + "theta_E": 1, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + kwargs_out_center, precision = solver_spep_center.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) + + kwargs_init = [ + { + "theta_E": 1, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.1, + "center_y": -0.1, + } + ] + kwargs_out_ellipse, precision = solver_spep_ellipse.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) + + npt.assert_almost_equal( + kwargs_out_center[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out_center[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) + npt.assert_almost_equal(kwargs_out_center[0]["center_y"], -0.1, decimal=3) + + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal(kwargs_out_ellipse[0]["e1"], e1, decimal=3) def test_all_nfw(self): - lensModel = LensModel(['SPEP']) - solver_nfw_ellipse = Solver2Point(lensModel, solver_type='ELLIPSE') - solver_nfw_center = Solver2Point(lensModel, solver_type='CENTER') - spep = LensModel(['SPEP']) + lensModel = LensModel(["SPEP"]) + solver_nfw_ellipse = Solver2Point(lensModel, solver_type="ELLIPSE") + solver_nfw_center = Solver2Point(lensModel, solver_type="CENTER") + spep = LensModel(["SPEP"]) - image_position_nfw = LensEquationSolver(LensModel(['SPEP', 'NFW'])) + image_position_nfw = LensEquationSolver(LensModel(["SPEP", "NFW"])) sourcePos_x = 0.1 sourcePos_y = 0.03 deltapix = 0.05 @@ -70,108 +117,238 @@ def test_all_nfw(self): gamma = 1.9 Rs = 0.1 nfw = NFW() - alpha_Rs = nfw.rho02alpha(1., Rs) + alpha_Rs = nfw.rho02alpha(1.0, Rs) phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'Rs': Rs, 'alpha_Rs': alpha_Rs, 'center_x': -0.5, 'center_y': 0.5}] - x_pos, y_pos = image_position_nfw.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, min_distance=deltapix, search_window=numPix*deltapix) - print(len(x_pos), 'number of images') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"Rs": Rs, "alpha_Rs": alpha_Rs, "center_x": -0.5, "center_y": 0.5}, + ] + x_pos, y_pos = image_position_nfw.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + print(len(x_pos), "number of images") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'theta_E': 1, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0., 'center_y': 0}, - {'Rs': Rs, 'alpha_Rs': alpha_Rs, 'center_x': -0.5, 'center_y': 0.5}] - kwargs_out_center, precision = solver_nfw_center.constraint_lensmodel(x_pos, y_pos, kwargs_init) + kwargs_init = [ + { + "theta_E": 1, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.0, + "center_y": 0, + }, + {"Rs": Rs, "alpha_Rs": alpha_Rs, "center_x": -0.5, "center_y": 0.5}, + ] + kwargs_out_center, precision = solver_nfw_center.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) source_x, source_y = spep.ray_shooting(x_pos[0], y_pos[0], kwargs_out_center) - x_pos_new, y_pos_new = image_position_nfw.findBrightImage(source_x, source_y, kwargs_out_center, numImages=2, min_distance=deltapix, search_window=numPix*deltapix) - print(kwargs_out_center, 'kwargs_out_center') + x_pos_new, y_pos_new = image_position_nfw.findBrightImage( + source_x, + source_y, + kwargs_out_center, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + print(kwargs_out_center, "kwargs_out_center") npt.assert_almost_equal(x_pos_new[0], x_pos[0], decimal=2) npt.assert_almost_equal(y_pos_new[0], y_pos[0], decimal=2) - npt.assert_almost_equal(kwargs_out_center[0]['center_x'], kwargs_lens[0]['center_x'], decimal=2) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], kwargs_lens[0]['center_y'], decimal=2) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], -0.1, decimal=2) - - kwargs_init = [{'theta_E': 1., 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0.1, 'center_y': -0.1}, - {'Rs': Rs, 'alpha_Rs': alpha_Rs, 'center_x': -0.5, 'center_y': 0.5}] - kwargs_out_ellipse, precision = solver_nfw_ellipse.constraint_lensmodel(x_pos, y_pos, kwargs_init) - - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], kwargs_lens[0]['e1'], decimal=2) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e2'], kwargs_lens[0]['e2'], decimal=2) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], e1, decimal=2) + npt.assert_almost_equal( + kwargs_out_center[0]["center_x"], kwargs_lens[0]["center_x"], decimal=2 + ) + npt.assert_almost_equal( + kwargs_out_center[0]["center_y"], kwargs_lens[0]["center_y"], decimal=2 + ) + npt.assert_almost_equal(kwargs_out_center[0]["center_y"], -0.1, decimal=2) + + kwargs_init = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.1, + "center_y": -0.1, + }, + {"Rs": Rs, "alpha_Rs": alpha_Rs, "center_x": -0.5, "center_y": 0.5}, + ] + kwargs_out_ellipse, precision = solver_nfw_ellipse.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) + + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e1"], kwargs_lens[0]["e1"], decimal=2 + ) + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e2"], kwargs_lens[0]["e2"], decimal=2 + ) + npt.assert_almost_equal(kwargs_out_ellipse[0]["e1"], e1, decimal=2) def test_all_spep_sis(self): - lensModel = LensModel(['SPEP', 'SIS']) - solver_ellipse = Solver2Point(lensModel, solver_type='ELLIPSE') - solver_center = Solver2Point(lensModel, solver_type='CENTER') - spep = LensModel(['SPEP', 'SIS']) + lensModel = LensModel(["SPEP", "SIS"]) + solver_ellipse = Solver2Point(lensModel, solver_type="ELLIPSE") + solver_center = Solver2Point(lensModel, solver_type="CENTER") + spep = LensModel(["SPEP", "SIS"]) image_position = LensEquationSolver(lensModel) sourcePos_x = 0.1 sourcePos_y = 0.03 deltapix = 0.05 numPix = 100 gamma = 1.9 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.2, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 0.6, 'center_x': -0.5, 'center_y': 0.5}] - x_pos, y_pos = image_position.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, min_distance=deltapix, search_window=numPix*deltapix, precision_limit=10**(-10)) - print(len(x_pos), 'number of images') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.2, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.6, "center_x": -0.5, "center_y": 0.5}, + ] + x_pos, y_pos = image_position.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + precision_limit=10 ** (-10), + ) + print(len(x_pos), "number of images") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'theta_E': 1, 'gamma': gamma, 'e1': 0.2, 'e2': -0.03, 'center_x': 0., 'center_y': 0}, - {'theta_E': 0.6, 'center_x': -0.5, 'center_y': 0.5}] - kwargs_out_center, precision = solver_center.constraint_lensmodel(x_pos, y_pos, kwargs_init) - print(kwargs_out_center, 'output') + kwargs_init = [ + { + "theta_E": 1, + "gamma": gamma, + "e1": 0.2, + "e2": -0.03, + "center_x": 0.0, + "center_y": 0, + }, + {"theta_E": 0.6, "center_x": -0.5, "center_y": 0.5}, + ] + kwargs_out_center, precision = solver_center.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) + print(kwargs_out_center, "output") source_x, source_y = spep.ray_shooting(x_pos[0], y_pos[0], kwargs_out_center) - x_pos_new, y_pos_new = image_position.findBrightImage(source_x, source_y, kwargs_out_center, numImages=2, min_distance=deltapix, search_window=numPix*deltapix) + x_pos_new, y_pos_new = image_position.findBrightImage( + source_x, + source_y, + kwargs_out_center, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) npt.assert_almost_equal(x_pos_new[0], x_pos[0], decimal=3) npt.assert_almost_equal(y_pos_new[0], y_pos[0], decimal=3) - npt.assert_almost_equal(kwargs_out_center[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) - npt.assert_almost_equal(kwargs_out_center[0]['center_y'], -0.1, decimal=3) - - kwargs_init = [{'theta_E': 1., 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 0.6, 'center_x': -0.5, 'center_y': 0.5}] - kwargs_out_ellipse, precision = solver_ellipse.constraint_lensmodel(x_pos, y_pos, kwargs_init) - - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_out_ellipse[0]['e1'], 0.2, decimal=3) + npt.assert_almost_equal( + kwargs_out_center[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out_center[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) + npt.assert_almost_equal(kwargs_out_center[0]["center_y"], -0.1, decimal=3) + + kwargs_init = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.6, "center_x": -0.5, "center_y": 0.5}, + ] + kwargs_out_ellipse, precision = solver_ellipse.constraint_lensmodel( + x_pos, y_pos, kwargs_init + ) + + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out_ellipse[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal(kwargs_out_ellipse[0]["e1"], 0.2, decimal=3) def test_shapelet_cart(self): - lens_model_list = ['SHAPELETS_CART', 'SIS'] + lens_model_list = ["SHAPELETS_CART", "SIS"] lens = LensModel(lens_model_list) - solver = Solver2Point(lens, solver_type='SHAPELETS') + solver = Solver2Point(lens, solver_type="SHAPELETS") image_position = LensEquationSolver(lens) sourcePos_x = 0.1 sourcePos_y = 0.03 deltapix = 0.05 numPix = 100 - kwargs_lens = [{'coeffs': [1., 0., 0.1, 1.], 'beta': 1.}, - {'theta_E': 1., 'center_x': -0.1, 'center_y': 0.1}] - x_pos, y_pos = image_position.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, min_distance=deltapix, search_window=numPix*deltapix, precision_limit=10**(-10)) - print(len(x_pos), 'number of images') + kwargs_lens = [ + {"coeffs": [1.0, 0.0, 0.1, 1.0], "beta": 1.0}, + {"theta_E": 1.0, "center_x": -0.1, "center_y": 0.1}, + ] + x_pos, y_pos = image_position.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + precision_limit=10 ** (-10), + ) + print(len(x_pos), "number of images") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'coeffs': [1., 0., 0.1, 1.], 'beta': 1.}, - {'theta_E': 1., 'center_x': -0.1, 'center_y': 0.1}] + kwargs_init = [ + {"coeffs": [1.0, 0.0, 0.1, 1.0], "beta": 1.0}, + {"theta_E": 1.0, "center_x": -0.1, "center_y": 0.1}, + ] kwargs_out, precision = solver.constraint_lensmodel(x_pos, y_pos, kwargs_init) - print(kwargs_out, 'output') + print(kwargs_out, "output") source_x, source_y = lens.ray_shooting(x_pos[0], y_pos[0], kwargs_out) - x_pos_new, y_pos_new = image_position.findBrightImage(source_x, source_y, kwargs_out, numImages=2, min_distance=deltapix, search_window=numPix*deltapix) + x_pos_new, y_pos_new = image_position.findBrightImage( + source_x, + source_y, + kwargs_out, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) npt.assert_almost_equal(x_pos_new[0], x_pos[0], decimal=3) npt.assert_almost_equal(y_pos_new[0], y_pos[0], decimal=3) - npt.assert_almost_equal(kwargs_out[0]['coeffs'][1], kwargs_lens[0]['coeffs'][1], decimal=3) - npt.assert_almost_equal(kwargs_out[0]['coeffs'][2], kwargs_lens[0]['coeffs'][2], decimal=3) + npt.assert_almost_equal( + kwargs_out[0]["coeffs"][1], kwargs_lens[0]["coeffs"][1], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out[0]["coeffs"][2], kwargs_lens[0]["coeffs"][2], decimal=3 + ) def test_theta_E_phi(self): - lensModel = LensModel(['SPEP', 'SHEAR']) - solver = Solver2Point(lensModel, solver_type='THETA_E_PHI') + lensModel = LensModel(["SPEP", "SHEAR"]) + solver = Solver2Point(lensModel, solver_type="THETA_E_PHI") image_position = LensEquationSolver(lensModel) sourcePos_x = 0.1 @@ -179,29 +356,68 @@ def test_theta_E_phi(self): deltapix = 0.05 numPix = 100 gamma = 1.9 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.1, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma1': 0.03, 'gamma2': 0.0}] - x_pos, y_pos = image_position.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, min_distance=deltapix, search_window=numPix*deltapix, precision_limit=10**(-15)) - print(len(x_pos), 'number of images') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.1, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma1": 0.03, "gamma2": 0.0}, + ] + x_pos, y_pos = image_position.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + precision_limit=10 ** (-15), + ) + print(len(x_pos), "number of images") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'theta_E': 1.9, 'gamma': gamma, 'e1': 0.1, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma1': 0., 'gamma2': 0.03}] + kwargs_init = [ + { + "theta_E": 1.9, + "gamma": gamma, + "e1": 0.1, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma1": 0.0, "gamma2": 0.03}, + ] kwargs_out, precision = solver.constraint_lensmodel(x_pos, y_pos, kwargs_init) - print(kwargs_out, 'output') + print(kwargs_out, "output") source_x, source_y = lensModel.ray_shooting(x_pos[0], y_pos[0], kwargs_out) - x_pos_new, y_pos_new = image_position.findBrightImage(source_x, source_y, kwargs_out, numImages=2, min_distance=deltapix, search_window=numPix*deltapix) + x_pos_new, y_pos_new = image_position.findBrightImage( + source_x, + source_y, + kwargs_out, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) npt.assert_almost_equal(x_pos_new[0], x_pos[0], decimal=3) npt.assert_almost_equal(y_pos_new[0], y_pos[0], decimal=3) - npt.assert_almost_equal(kwargs_out[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_out[1]['gamma1'], kwargs_lens[1]['gamma1'], decimal=2) - npt.assert_almost_equal(kwargs_out[1]['gamma2'], kwargs_lens[1]['gamma2'], decimal=2) + npt.assert_almost_equal( + kwargs_out[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_out[1]["gamma1"], kwargs_lens[1]["gamma1"], decimal=2 + ) + npt.assert_almost_equal( + kwargs_out[1]["gamma2"], kwargs_lens[1]["gamma2"], decimal=2 + ) def test_theta_E_ellipse(self): - lensModel = LensModel(['SPEP', 'SHEAR']) - solver = Solver2Point(lensModel, solver_type='THETA_E_ELLIPSE') + lensModel = LensModel(["SPEP", "SHEAR"]) + solver = Solver2Point(lensModel, solver_type="THETA_E_ELLIPSE") image_position = LensEquationSolver(lensModel) sourcePos_x = 0.1 @@ -209,75 +425,128 @@ def test_theta_E_ellipse(self): deltapix = 0.05 numPix = 100 gamma = 1.9 - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.1, 'e2': -0.03, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma1': 0.03, 'gamma2': 0.0}] - x_pos, y_pos = image_position.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=2, - min_distance=deltapix, search_window=numPix * deltapix, - precision_limit=10 ** (-15)) - print(len(x_pos), 'number of images') + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.1, + "e2": -0.03, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma1": 0.03, "gamma2": 0.0}, + ] + x_pos, y_pos = image_position.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + precision_limit=10 ** (-15), + ) + print(len(x_pos), "number of images") x_pos = x_pos[:2] y_pos = y_pos[:2] - kwargs_init = [{'theta_E': 1.9, 'gamma': gamma, 'e1': -0.03, 'e2': 0.1, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma1': 0.03, 'gamma2': 0.0}] + kwargs_init = [ + { + "theta_E": 1.9, + "gamma": gamma, + "e1": -0.03, + "e2": 0.1, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma1": 0.03, "gamma2": 0.0}, + ] kwargs_out, precision = solver.constraint_lensmodel(x_pos, y_pos, kwargs_init) - print(kwargs_out, 'output') + print(kwargs_out, "output") source_x, source_y = lensModel.ray_shooting(x_pos[0], y_pos[0], kwargs_out) - x_pos_new, y_pos_new = image_position.findBrightImage(source_x, source_y, kwargs_out, numImages=2, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = image_position.findBrightImage( + source_x, + source_y, + kwargs_out, + numImages=2, + min_distance=deltapix, + search_window=numPix * deltapix, + ) npt.assert_almost_equal(x_pos_new[0], x_pos[0], decimal=3) npt.assert_almost_equal(y_pos_new[0], y_pos[0], decimal=3) - npt.assert_almost_equal(kwargs_out[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_out[0]['e1'], kwargs_lens[0]['e1'], decimal=2) - npt.assert_almost_equal(kwargs_out[0]['e2'], kwargs_lens[0]['e2'], decimal=2) + npt.assert_almost_equal( + kwargs_out[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal(kwargs_out[0]["e1"], kwargs_lens[0]["e1"], decimal=2) + npt.assert_almost_equal(kwargs_out[0]["e2"], kwargs_lens[0]["e2"], decimal=2) def test_add_fixed_lens(self): - lensModel = LensModel(lens_model_list=['SPEP', 'SHEAR']) - kwargs_lens_init = [{'theta_E': 1, 'e2': 0}, {'gamma2': 0}] - solver = Solver2Point(lensModel=lensModel, solver_type='THETA_E_PHI', decoupling=True) - kwargs_fixed_added = solver.add_fixed_lens(kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init) + lensModel = LensModel(lens_model_list=["SPEP", "SHEAR"]) + kwargs_lens_init = [{"theta_E": 1, "e2": 0}, {"gamma2": 0}] + solver = Solver2Point( + lensModel=lensModel, solver_type="THETA_E_PHI", decoupling=True + ) + kwargs_fixed_added = solver.add_fixed_lens( + kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init + ) print(kwargs_fixed_added) - assert kwargs_fixed_added[1]['gamma2'] == 0 - - solver = Solver2Point(lensModel=lensModel, solver_type='THETA_E_ELLIPSE', decoupling=True) - kwargs_fixed_added = solver.add_fixed_lens(kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init) - assert kwargs_fixed_added[0]['e2'] == 0 - - lensModel = LensModel(lens_model_list=['SHAPELETS_CART']) - solver = Solver2Point(lensModel=lensModel, solver_type='SHAPELETS', decoupling=True) - kwargs_fixed_added = solver.add_fixed_lens(kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init) + assert kwargs_fixed_added[1]["gamma2"] == 0 + + solver = Solver2Point( + lensModel=lensModel, solver_type="THETA_E_ELLIPSE", decoupling=True + ) + kwargs_fixed_added = solver.add_fixed_lens( + kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init + ) + assert kwargs_fixed_added[0]["e2"] == 0 + + lensModel = LensModel(lens_model_list=["SHAPELETS_CART"]) + solver = Solver2Point( + lensModel=lensModel, solver_type="SHAPELETS", decoupling=True + ) + kwargs_fixed_added = solver.add_fixed_lens( + kwargs_fixed_lens_list=[{}, {}], kwargs_lens_init=kwargs_lens_init + ) assert len(kwargs_fixed_added) == len(kwargs_lens_init) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP']) - Solver2Point(lensModel=lensModel, solver_type='WRONG', decoupling=True) + lensModel = LensModel(lens_model_list=["SPEP"]) + Solver2Point(lensModel=lensModel, solver_type="WRONG", decoupling=True) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP']) - Solver2Point(lensModel=lensModel, solver_type='SHAPELETS', decoupling=True) + lensModel = LensModel(lens_model_list=["SPEP"]) + Solver2Point(lensModel=lensModel, solver_type="SHAPELETS", decoupling=True) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP', 'SIS']) - Solver2Point(lensModel=lensModel, solver_type='THETA_E_PHI', decoupling=True) + lensModel = LensModel(lens_model_list=["SPEP", "SIS"]) + Solver2Point( + lensModel=lensModel, solver_type="THETA_E_PHI", decoupling=True + ) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP', 'SHEAR']) - solver = Solver2Point(lensModel=lensModel, solver_type='THETA_E_PHI', decoupling=True) - solver._solver_type = 'WRONG' + lensModel = LensModel(lens_model_list=["SPEP", "SHEAR"]) + solver = Solver2Point( + lensModel=lensModel, solver_type="THETA_E_PHI", decoupling=True + ) + solver._solver_type = "WRONG" solver._update_kwargs(x=None, kwargs_list=None) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP', 'SHEAR']) - solver = Solver2Point(lensModel=lensModel, solver_type='THETA_E_PHI', decoupling=True) - solver._solver_type = 'WRONG' + lensModel = LensModel(lens_model_list=["SPEP", "SHEAR"]) + solver = Solver2Point( + lensModel=lensModel, solver_type="THETA_E_PHI", decoupling=True + ) + solver._solver_type = "WRONG" solver._extract_array(kwargs_list=None) with self.assertRaises(ValueError): - lensModel = LensModel(lens_model_list=['SPEP', 'SHEAR']) - solver = Solver2Point(lensModel=lensModel, solver_type='THETA_E_PHI', decoupling=True) - solver._solver_type = 'WRONG' - solver.add_fixed_lens(kwargs_fixed_lens_list=[None], kwargs_lens_init=[None]) + lensModel = LensModel(lens_model_list=["SPEP", "SHEAR"]) + solver = Solver2Point( + lensModel=lensModel, solver_type="THETA_E_PHI", decoupling=True + ) + solver._solver_type = "WRONG" + solver.add_fixed_lens( + kwargs_fixed_lens_list=[None], kwargs_lens_init=[None] + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Solver/test_solver4.py b/test/test_LensModel/test_Solver/test_solver4.py index f7a7b1915..847a87595 100644 --- a/test/test_LensModel/test_Solver/test_solver4.py +++ b/test/test_LensModel/test_Solver/test_solver4.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -10,7 +10,6 @@ class TestSolver4Point(object): - def setup_method(self): """ @@ -19,7 +18,7 @@ def setup_method(self): pass def test_decoupling(self): - lens_model_list = ['SPEP', 'SIS'] + lens_model_list = ["SPEP", "SIS"] lensModel = LensModel(lens_model_list) solver = Solver4Point(lensModel) solver_decoupled = Solver4Point(lensModel) @@ -31,37 +30,92 @@ def test_decoupling(self): gamma = 1.9 phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=deltapix, search_window=numPix*deltapix) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) phi_G, q = 1.5, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0., 'center_y': 0}, {'theta_E': 0.1, 'center_x': 0.5, 'center_y': 0}] - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - kwargs_lens_new_2, accuracy = solver_decoupled.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.0, + "center_y": 0, + }, + {"theta_E": 0.1, "center_x": 0.5, "center_y": 0}, + ] + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + kwargs_lens_new_2, accuracy = solver_decoupled.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) print(kwargs_lens_new_2) print(kwargs_lens_new) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) + npt.assert_almost_equal( + kwargs_lens_new[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], kwargs_lens_new_2[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens_new_2[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens_new_2[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens_new_2[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens_new_2[0]['center_y'], decimal=3) + npt.assert_almost_equal( + kwargs_lens_new[0]["theta_E"], kwargs_lens_new_2[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens_new_2[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens_new_2[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens_new_2[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens_new_2[0]["center_y"], decimal=3 + ) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], 1., decimal=3) + npt.assert_almost_equal(kwargs_lens_new[0]["theta_E"], 1.0, decimal=3) lensModel = LensModel(lens_model_list=lens_model_list) - x_source_new, y_source_new = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) - dist = np.sqrt((x_source_new - x_source_new[0]) ** 2 + (y_source_new - y_source_new[0]) ** 2) + x_source_new, y_source_new = lensModel.ray_shooting( + x_pos, y_pos, kwargs_lens_new + ) + dist = np.sqrt( + (x_source_new - x_source_new[0]) ** 2 + + (y_source_new - y_source_new[0]) ** 2 + ) print(dist) assert np.max(dist) < 0.000001 def test_solver_spep(self): - lens_model_list = ['SPEP'] + lens_model_list = ["SPEP"] lensModel = LensModel(lens_model_list) solver = Solver4Point(lensModel) lensEquationSolver = LensEquationSolver(lensModel) @@ -73,26 +127,68 @@ def test_solver_spep(self): gamma = 1.9 phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=deltapix, search_window=numPix*deltapix) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + } + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) phi_G, q = 1.5, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0., 'center_y': 0}] - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], kwargs_lens[0]['theta_E'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['theta_E'], 1., decimal=3) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0.0, + "center_y": 0, + } + ] + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["theta_E"], kwargs_lens[0]["theta_E"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) + npt.assert_almost_equal(kwargs_lens_new[0]["theta_E"], 1.0, decimal=3) lensModel = LensModel(lens_model_list=lens_model_list) - x_source_new, y_source_new = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) - dist = np.sqrt((x_source_new - x_source_new[0]) ** 2 + (y_source_new - y_source_new[0]) ** 2) + x_source_new, y_source_new = lensModel.ray_shooting( + x_pos, y_pos, kwargs_lens_new + ) + dist = np.sqrt( + (x_source_new - x_source_new[0]) ** 2 + + (y_source_new - y_source_new[0]) ** 2 + ) print(dist) assert np.max(dist) < 0.000001 def test_solver_nfw(self): - lens_model_list = ['NFW_ELLIPSE', 'SIS'] + lens_model_list = ["NFW_ELLIPSE", "SIS"] lensModel = LensModel(lens_model_list) solver = Solver4Point(lensModel) lensEquationSolver = LensEquationSolver(lensModel) @@ -100,45 +196,111 @@ def test_solver_nfw(self): sourcePos_y = -0.1 deltapix = 0.05 numPix = 150 - Rs = 4. + Rs = 4.0 phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'alpha_Rs': 1., 'Rs': Rs, 'e1': e1, 'e2': e2, 'center_x': 0.1, 'center_y': -0.1}, - {'theta_E': 1, 'center_x': 0, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=deltapix, search_window=numPix*deltapix) + kwargs_lens = [ + { + "alpha_Rs": 1.0, + "Rs": Rs, + "e1": e1, + "e2": e2, + "center_x": 0.1, + "center_y": -0.1, + }, + {"theta_E": 1, "center_x": 0, "center_y": 0}, + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) phi_G, q = 1.5, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens_init = [{'alpha_Rs': 0.5, 'Rs': Rs, 'e1': e1, 'e2': e2, 'center_x': 0., 'center_y': 0}, kwargs_lens[1]] - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - npt.assert_almost_equal(kwargs_lens_new[0]['alpha_Rs'], kwargs_lens[0]['alpha_Rs'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_x'], kwargs_lens[0]['center_x'], decimal=3) - npt.assert_almost_equal(kwargs_lens_new[0]['center_y'], kwargs_lens[0]['center_y'], decimal=3) + kwargs_lens_init = [ + { + "alpha_Rs": 0.5, + "Rs": Rs, + "e1": e1, + "e2": e2, + "center_x": 0.0, + "center_y": 0, + }, + kwargs_lens[1], + ] + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["alpha_Rs"], kwargs_lens[0]["alpha_Rs"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_x"], kwargs_lens[0]["center_x"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["center_y"], kwargs_lens[0]["center_y"], decimal=3 + ) def test_solver_shapelets(self): - lens_model_list = ['SHAPELETS_CART', 'SPEP'] + lens_model_list = ["SHAPELETS_CART", "SPEP"] lensModel = LensModel(lens_model_list) solver = Solver4Point(lensModel) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.1 - sourcePos_y = -0. + sourcePos_y = -0.0 deltapix = 0.05 numPix = 150 coeffs = np.array([0, 0.1, 0.1, 0, 0, -0.1]) - kwargs_lens = [{'beta': 1., 'coeffs': coeffs, 'center_x': 0., 'center_y': 0.}, - {'theta_E': 1., 'gamma': 2, 'e1': 0.1, 'e2': 0, 'center_x': 0, 'center_y': 0}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, min_distance=deltapix, search_window=numPix*deltapix) + kwargs_lens = [ + {"beta": 1.0, "coeffs": coeffs, "center_x": 0.0, "center_y": 0.0}, + { + "theta_E": 1.0, + "gamma": 2, + "e1": 0.1, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, y_pos) - kwargs_lens_init = [{'beta': 1, 'coeffs': np.zeros_like(coeffs), 'center_x': 0., 'center_y': 0}, kwargs_lens[1]] - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - npt.assert_almost_equal(kwargs_lens_new[0]['beta'], kwargs_lens[0]['beta'], decimal=3) - coeffs_new = kwargs_lens_new[0]['coeffs'] + kwargs_lens_init = [ + { + "beta": 1, + "coeffs": np.zeros_like(coeffs), + "center_x": 0.0, + "center_y": 0, + }, + kwargs_lens[1], + ] + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["beta"], kwargs_lens[0]["beta"], decimal=3 + ) + coeffs_new = kwargs_lens_new[0]["coeffs"] for i in range(len(coeffs)): npt.assert_almost_equal(coeffs_new[i], coeffs[i], decimal=3) def test_solver_simplified(self): - lens_model_list = ['SPEP', 'SHEAR_GAMMA_PSI'] + lens_model_list = ["SPEP", "SHEAR_GAMMA_PSI"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) @@ -149,21 +311,55 @@ def test_solver_simplified(self): gamma = 1.9 gamma_ext = 0.05 psi_ext = 0.4 - #e1, e2 = param_util.phi_gamma_ellipticity(phi=psi_ext, gamma=gamma_ext) - kwargs_lens = [{'theta_E': 1., 'gamma': gamma, 'e1': 0.1, 'e2': -0.1, 'center_x': 0.1, 'center_y': -0.1}, - {'gamma_ext': gamma_ext, 'psi_ext': psi_ext}] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) - e1_new, e2_new = param_util.shear_polar2cartesian(phi=0., gamma=gamma_ext + 0.1) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0., 'e2': 0., 'center_x': 0., 'center_y': 0}, - {'gamma_ext': gamma_ext + 0.1, 'psi_ext': 0}] - solver = Solver4Point(lensModel, solver_type='PROFILE_SHEAR') - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - assert accuracy < 10**(-10) + # e1, e2 = param_util.phi_gamma_ellipticity(phi=psi_ext, gamma=gamma_ext) + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": gamma, + "e1": 0.1, + "e2": -0.1, + "center_x": 0.1, + "center_y": -0.1, + }, + {"gamma_ext": gamma_ext, "psi_ext": psi_ext}, + ] + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + e1_new, e2_new = param_util.shear_polar2cartesian( + phi=0.0, gamma=gamma_ext + 0.1 + ) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0.0, + "e2": 0.0, + "center_x": 0.0, + "center_y": 0, + }, + {"gamma_ext": gamma_ext + 0.1, "psi_ext": 0}, + ] + solver = Solver4Point(lensModel, solver_type="PROFILE_SHEAR") + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + assert accuracy < 10 ** (-10) x_source, y_source = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) x_source, y_source = np.mean(x_source), np.mean(y_source) - x_pos_new, y_pos_new = lensEquationSolver.findBrightImage(x_source, y_source, kwargs_lens_new, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = lensEquationSolver.findBrightImage( + x_source, + y_source, + kwargs_lens_new, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, x_pos_new) x_pos = np.sort(x_pos) x_pos_new = np.sort(x_pos_new) @@ -174,7 +370,7 @@ def test_solver_simplified(self): npt.assert_almost_equal(y_pos[i], y_pos_new[i], decimal=6) def test_solver_simplified_2(self): - lens_model_list = ['SPEP', 'SHEAR_GAMMA_PSI'] + lens_model_list = ["SPEP", "SHEAR_GAMMA_PSI"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) @@ -185,20 +381,53 @@ def test_solver_simplified_2(self): gamma = 1.96 e1, e2 = -0.01, -0.01 psi_ext, gamma_ext = param_util.shear_cartesian2polar(e1, e2) - kwargs_shear = {'gamma_ext': gamma_ext, 'psi_ext': psi_ext} # gamma_ext: shear strength, psi_ext: shear angel (in radian) - kwargs_spemd = {'theta_E': 1., 'gamma': gamma, 'center_x': 0, 'center_y': 0, 'e1': -0.2, 'e2': -0.03} + kwargs_shear = { + "gamma_ext": gamma_ext, + "psi_ext": psi_ext, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_spemd = { + "theta_E": 1.0, + "gamma": gamma, + "center_x": 0, + "center_y": 0, + "e1": -0.2, + "e2": -0.03, + } kwargs_lens = [kwargs_spemd, kwargs_shear] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0}, - {'gamma_ext': gamma_ext, 'psi_ext': psi_ext}] - solver = Solver4Point(lensModel, solver_type='PROFILE_SHEAR') - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - assert accuracy < 10**(-10) + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0, + }, + {"gamma_ext": gamma_ext, "psi_ext": psi_ext}, + ] + solver = Solver4Point(lensModel, solver_type="PROFILE_SHEAR") + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + assert accuracy < 10 ** (-10) x_source, y_source = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) x_source, y_source = np.mean(x_source), np.mean(y_source) - x_pos_new, y_pos_new = lensEquationSolver.findBrightImage(x_source, y_source, kwargs_lens_new, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = lensEquationSolver.findBrightImage( + x_source, + y_source, + kwargs_lens_new, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, x_pos_new) x_pos = np.sort(x_pos) x_pos_new = np.sort(x_pos_new) @@ -207,38 +436,74 @@ def test_solver_simplified_2(self): for i in range(len(x_pos)): npt.assert_almost_equal(x_pos[i], x_pos_new[i], decimal=6) npt.assert_almost_equal(y_pos[i], y_pos_new[i], decimal=6) - npt.assert_almost_equal(kwargs_lens_new[1]['psi_ext'], kwargs_lens[1]['psi_ext'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[1]['gamma_ext'], kwargs_lens[1]['gamma_ext'], decimal=8) + npt.assert_almost_equal( + kwargs_lens_new[1]["psi_ext"], kwargs_lens[1]["psi_ext"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[1]["gamma_ext"], kwargs_lens[1]["gamma_ext"], decimal=8 + ) def test_solver_profile_shear(self): - lens_model_list = ['SPEP', 'SHEAR_GAMMA_PSI'] + lens_model_list = ["SPEP", "SHEAR_GAMMA_PSI"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) - sourcePos_x = 0. + sourcePos_x = 0.0 sourcePos_y = 0.1 deltapix = 0.05 numPix = 150 gamma = 1.98 e1, e2 = -0.04, -0.01 gamma_ext = np.sqrt(e1**2 + e2**2) - kwargs_shear = {'gamma_ext': gamma_ext, 'psi_ext': 0.3} # shear values to the source plane - kwargs_spemd = {'theta_E': 1.66, 'gamma': gamma, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.1, - 'e2': 0.05} # parameters of the deflector lens model + kwargs_shear = { + "gamma_ext": gamma_ext, + "psi_ext": 0.3, + } # shear values to the source plane + kwargs_spemd = { + "theta_E": 1.66, + "gamma": gamma, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.1, + "e2": 0.05, + } # parameters of the deflector lens model kwargs_lens = [kwargs_spemd, kwargs_shear] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) - print(x_pos, y_pos, 'test positions') - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0}, - {'gamma_ext': gamma_ext, 'psi_ext': -0.3}] - solver = Solver4Point(lensModel, solver_type='PROFILE_SHEAR') - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - assert accuracy < 10**(-10) + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + print(x_pos, y_pos, "test positions") + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0, + }, + {"gamma_ext": gamma_ext, "psi_ext": -0.3}, + ] + solver = Solver4Point(lensModel, solver_type="PROFILE_SHEAR") + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + assert accuracy < 10 ** (-10) x_source, y_source = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) x_source, y_source = np.mean(x_source), np.mean(y_source) - x_pos_new, y_pos_new = lensEquationSolver.findBrightImage(x_source, y_source, kwargs_lens_new, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = lensEquationSolver.findBrightImage( + x_source, + y_source, + kwargs_lens_new, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, x_pos_new) x_pos = np.sort(x_pos) x_pos_new = np.sort(x_pos_new) @@ -247,44 +512,86 @@ def test_solver_profile_shear(self): for i in range(len(x_pos)): npt.assert_almost_equal(x_pos[i], x_pos_new[i], decimal=6) npt.assert_almost_equal(y_pos[i], y_pos_new[i], decimal=6) - npt.assert_almost_equal(kwargs_lens_new[1]['gamma_ext'], kwargs_lens[1]['gamma_ext'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[1]['psi_ext'], kwargs_lens[1]['psi_ext'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=8) + npt.assert_almost_equal( + kwargs_lens_new[1]["gamma_ext"], kwargs_lens[1]["gamma_ext"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[1]["psi_ext"], kwargs_lens[1]["psi_ext"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=8 + ) def test_solver_profile_shear_2(self): - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] lensModel = LensModel(lens_model_list) lensEquationSolver = LensEquationSolver(lensModel) - sourcePos_x = 0. + sourcePos_x = 0.0 sourcePos_y = 0.1 deltapix = 0.05 numPix = 150 gamma = 1.98 gamma1, gamma2 = -0.04, -0.01 - kwargs_shear = {'gamma1': gamma1, 'gamma2': gamma2} # shear values to the source plane - kwargs_spemd = {'theta_E': 1.66, 'gamma': gamma, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.1, - 'e2': 0.05} # parameters of the deflector lens model + kwargs_shear = { + "gamma1": gamma1, + "gamma2": gamma2, + } # shear values to the source plane + kwargs_spemd = { + "theta_E": 1.66, + "gamma": gamma, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.1, + "e2": 0.05, + } # parameters of the deflector lens model kwargs_lens = [kwargs_spemd, kwargs_shear] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) - print(x_pos, y_pos, 'test positions') + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + print(x_pos, y_pos, "test positions") - gamma_ext = np.sqrt(gamma1 ** 2 + gamma2 ** 2) - gamma1_init, gamma2_init = param_util.shear_polar2cartesian(gamma=gamma_ext, phi=-1.3) + gamma_ext = np.sqrt(gamma1**2 + gamma2**2) + gamma1_init, gamma2_init = param_util.shear_polar2cartesian( + gamma=gamma_ext, phi=-1.3 + ) - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0}, - {'gamma1': gamma1_init, 'gamma2': gamma2_init}] - solver = Solver4Point(lensModel, solver_type='PROFILE_SHEAR') - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - assert accuracy < 10**(-10) + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0, + }, + {"gamma1": gamma1_init, "gamma2": gamma2_init}, + ] + solver = Solver4Point(lensModel, solver_type="PROFILE_SHEAR") + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + assert accuracy < 10 ** (-10) x_source, y_source = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) x_source, y_source = np.mean(x_source), np.mean(y_source) - x_pos_new, y_pos_new = lensEquationSolver.findBrightImage(x_source, y_source, kwargs_lens_new, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = lensEquationSolver.findBrightImage( + x_source, + y_source, + kwargs_lens_new, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, x_pos_new) x_pos = np.sort(x_pos) x_pos_new = np.sort(x_pos_new) @@ -293,14 +600,27 @@ def test_solver_profile_shear_2(self): for i in range(len(x_pos)): npt.assert_almost_equal(x_pos[i], x_pos_new[i], decimal=6) npt.assert_almost_equal(y_pos[i], y_pos_new[i], decimal=6) - npt.assert_almost_equal(kwargs_lens_new[1]['gamma1'], kwargs_lens[1]['gamma1'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[1]['gamma2'], kwargs_lens[1]['gamma2'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[0]['e1'], kwargs_lens[0]['e1'], decimal=8) - npt.assert_almost_equal(kwargs_lens_new[0]['e2'], kwargs_lens[0]['e2'], decimal=8) + npt.assert_almost_equal( + kwargs_lens_new[1]["gamma1"], kwargs_lens[1]["gamma1"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[1]["gamma2"], kwargs_lens[1]["gamma2"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e1"], kwargs_lens[0]["e1"], decimal=8 + ) + npt.assert_almost_equal( + kwargs_lens_new[0]["e2"], kwargs_lens[0]["e2"], decimal=8 + ) def test_solver_multiplane(self): - lens_model_list = ['SPEP', 'SHEAR', 'SIS'] - lensModel = LensModel(lens_model_list, z_source=1, lens_redshift_list=[0.5, 0.5, 0.3], multi_plane=True) + lens_model_list = ["SPEP", "SHEAR", "SIS"] + lensModel = LensModel( + lens_model_list, + z_source=1, + lens_redshift_list=[0.5, 0.5, 0.3], + multi_plane=True, + ) lensEquationSolver = LensEquationSolver(lensModel) sourcePos_x = 0.01 @@ -309,23 +629,57 @@ def test_solver_multiplane(self): numPix = 150 gamma = 1.96 gamma1, gamma2 = 0.01, 0.01 - kwargs_shear = {'gamma1': gamma1, 'gamma2': gamma2} # gamma_ext: shear strength, psi_ext: shear angel (in radian) - kwargs_spemd = {'theta_E': 1., 'gamma': gamma, 'center_x': 0, 'center_y': 0, 'e1': 0.2, 'e2': 0.03} - kwargs_sis = {'theta_E': .1, 'center_x': 1, 'center_y': 0} + kwargs_shear = { + "gamma1": gamma1, + "gamma2": gamma2, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_spemd = { + "theta_E": 1.0, + "gamma": gamma, + "center_x": 0, + "center_y": 0, + "e1": 0.2, + "e2": 0.03, + } + kwargs_sis = {"theta_E": 0.1, "center_x": 1, "center_y": 0} kwargs_lens = [kwargs_spemd, kwargs_shear, kwargs_sis] - x_pos, y_pos = lensEquationSolver.findBrightImage(sourcePos_x, sourcePos_y, kwargs_lens, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) - print(x_pos, y_pos, 'test positions') - kwargs_lens_init = [{'theta_E': 1.3, 'gamma': gamma, 'e1': 0.1, 'e2': 0, 'center_x': 0., 'center_y': 0}, - {'gamma1': gamma1, 'gamma2': gamma2}, {'theta_E': .1, 'center_x': 1, 'center_y': 0}] - solver = Solver4Point(lensModel, solver_type='PROFILE') - kwargs_lens_new, accuracy = solver.constraint_lensmodel(x_pos, y_pos, kwargs_lens_init) - print(kwargs_lens_new, 'kwargs_lens_new') - assert accuracy < 10**(-10) + x_pos, y_pos = lensEquationSolver.findBrightImage( + sourcePos_x, + sourcePos_y, + kwargs_lens, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) + print(x_pos, y_pos, "test positions") + kwargs_lens_init = [ + { + "theta_E": 1.3, + "gamma": gamma, + "e1": 0.1, + "e2": 0, + "center_x": 0.0, + "center_y": 0, + }, + {"gamma1": gamma1, "gamma2": gamma2}, + {"theta_E": 0.1, "center_x": 1, "center_y": 0}, + ] + solver = Solver4Point(lensModel, solver_type="PROFILE") + kwargs_lens_new, accuracy = solver.constraint_lensmodel( + x_pos, y_pos, kwargs_lens_init + ) + print(kwargs_lens_new, "kwargs_lens_new") + assert accuracy < 10 ** (-10) x_source, y_source = lensModel.ray_shooting(x_pos, y_pos, kwargs_lens_new) x_source, y_source = np.mean(x_source), np.mean(y_source) - x_pos_new, y_pos_new = lensEquationSolver.findBrightImage(x_source, y_source, kwargs_lens_new, numImages=4, - min_distance=deltapix, search_window=numPix * deltapix) + x_pos_new, y_pos_new = lensEquationSolver.findBrightImage( + x_source, + y_source, + kwargs_lens_new, + numImages=4, + min_distance=deltapix, + search_window=numPix * deltapix, + ) print(x_pos, x_pos_new) x_pos = np.sort(x_pos) x_pos_new = np.sort(x_pos_new) @@ -334,8 +688,10 @@ def test_solver_multiplane(self): for i in range(len(x_pos)): npt.assert_almost_equal(x_pos[i], x_pos_new[i], decimal=6) npt.assert_almost_equal(y_pos[i], y_pos_new[i], decimal=6) - npt.assert_almost_equal(kwargs_lens_new[1]['gamma1'], kwargs_lens[1]['gamma1'], decimal=8) + npt.assert_almost_equal( + kwargs_lens_new[1]["gamma1"], kwargs_lens[1]["gamma1"], decimal=8 + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_Util/test_epl_util.py b/test/test_LensModel/test_Util/test_epl_util.py index 4e56b778a..220bfe9f4 100644 --- a/test/test_LensModel/test_Util/test_epl_util.py +++ b/test/test_LensModel/test_Util/test_epl_util.py @@ -4,5 +4,11 @@ def test_brentq_nojit(): - npt.assert_almost_equal(brentq_nojit(lambda x, args: np.sin(x), np.pi/2, 3*np.pi/2), np.pi, decimal=10) - npt.assert_almost_equal(brentq_nojit(lambda x, args: np.cos(x), np.pi, np.pi/2), np.pi/2, decimal=10) + npt.assert_almost_equal( + brentq_nojit(lambda x, args: np.sin(x), np.pi / 2, 3 * np.pi / 2), + np.pi, + decimal=10, + ) + npt.assert_almost_equal( + brentq_nojit(lambda x, args: np.cos(x), np.pi, np.pi / 2), np.pi / 2, decimal=10 + ) diff --git a/test/test_LensModel/test_arc_distortions.py b/test/test_LensModel/test_arc_distortions.py index e33eb445d..b2a13b71e 100644 --- a/test/test_LensModel/test_arc_distortions.py +++ b/test/test_LensModel/test_arc_distortions.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np @@ -13,43 +13,78 @@ class TestArcDistortions(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): pass def test_radial_tangential_distortions(self): - lens_model_list = ['CURVED_ARC_SPP', 'SHEAR', 'FLEXION'] + lens_model_list = ["CURVED_ARC_SPP", "SHEAR", "FLEXION"] center_x, center_y = 0.01, 0 - curvature = 1./2 + curvature = 1.0 / 2 lens = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'tangential_stretch': 10, 'radial_stretch': 1., 'curvature': curvature, - 'direction': -10, 'center_x': center_x, 'center_y': center_y}, - {'gamma1': -0., 'gamma2': -0.0}, - {'g1': 0., 'g2': 0., 'g3': -0., 'g4': 0}] + kwargs_lens = [ + { + "tangential_stretch": 10, + "radial_stretch": 1.0, + "curvature": curvature, + "direction": -10, + "center_x": center_x, + "center_y": center_y, + }, + {"gamma1": -0.0, "gamma2": -0.0}, + {"g1": 0.0, "g2": 0.0, "g3": -0.0, "g4": 0}, + ] extensions = LensModelExtensions(lensModel=lens) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials( - x=center_x, y=center_y, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001) - print(orientation_angle, 'orientation angle') - l = 1. / dphi_tan_dtan - npt.assert_almost_equal(l, 1./curvature, decimal=6) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + x=center_x, y=center_y, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001 + ) + print(orientation_angle, "orientation angle") + l = 1.0 / dphi_tan_dtan + npt.assert_almost_equal(l, 1.0 / curvature, decimal=6) def test_hessian_eigenvector_mp(self): - lens_model_list = ['SIS', 'SHEAR'] - lens_mp = LensModel(lens_model_list=lens_model_list, lens_redshift_list=[0.5, 0.4], multi_plane=True, - z_source=2) + lens_model_list = ["SIS", "SHEAR"] + lens_mp = LensModel( + lens_model_list=lens_model_list, + lens_redshift_list=[0.5, 0.4], + multi_plane=True, + z_source=2, + ) lens = LensModel(lens_model_list=lens_model_list) - x0, y0 = 1., 1. - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.0, 'gamma2': 0.00001}] + x0, y0 = 1.0, 1.0 + kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"gamma1": 0.0, "gamma2": 0.00001}, + ] extensions = LensModelExtensions(lensModel=lens) extensions_mp = LensModelExtensions(lensModel=lens_mp) - w0, w1, v11, v12, v21, v22 = extensions.hessian_eigenvectors(x0, y0, kwargs_lens, diff=None) - w0_mp, w1_mp, v11_mp, v12_mp, v21_mp, v22_mp = extensions_mp.hessian_eigenvectors(x0, y0, kwargs_lens, diff=None) + w0, w1, v11, v12, v21, v22 = extensions.hessian_eigenvectors( + x0, y0, kwargs_lens, diff=None + ) + ( + w0_mp, + w1_mp, + v11_mp, + v12_mp, + v21_mp, + v22_mp, + ) = extensions_mp.hessian_eigenvectors(x0, y0, kwargs_lens, diff=None) npt.assert_almost_equal(w0, w0_mp, decimal=3) npt.assert_almost_equal(w1, w1_mp, decimal=3) npt.assert_almost_equal(v11, v11_mp, decimal=3) @@ -58,26 +93,40 @@ def test_hessian_eigenvector_mp(self): npt.assert_almost_equal(v22, v22_mp, decimal=3) def test_radial_tangential_stretch(self): - lens_model_list = ['SIS', 'SHEAR'] - lens_mp = LensModel(lens_model_list=lens_model_list, lens_redshift_list=[0.5, 0.4], multi_plane=True, - z_source=2) + lens_model_list = ["SIS", "SHEAR"] + lens_mp = LensModel( + lens_model_list=lens_model_list, + lens_redshift_list=[0.5, 0.4], + multi_plane=True, + z_source=2, + ) lens = LensModel(lens_model_list=lens_model_list) - x0, y0 = 1., 1. + x0, y0 = 1.0, 1.0 - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.0, 'gamma2': 0.00001}] + kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"gamma1": 0.0, "gamma2": 0.00001}, + ] extensions = LensModelExtensions(lensModel=lens) extensions_mp = LensModelExtensions(lensModel=lens_mp) - - radial_stretch, tangential_stretch, v_rad1, v_rad2, v_tang1, v_tang2 = extensions.radial_tangential_stretch(x0, y0, - kwargs_lens, - diff=None) - radial_stretch_mp, tangential_stretch_mp, v_rad1_mp, v_rad2_mp, v_tang1_mp, v_tang2_mp = extensions_mp.radial_tangential_stretch(x0, - y0, - kwargs_lens, - diff=None) + ( + radial_stretch, + tangential_stretch, + v_rad1, + v_rad2, + v_tang1, + v_tang2, + ) = extensions.radial_tangential_stretch(x0, y0, kwargs_lens, diff=None) + ( + radial_stretch_mp, + tangential_stretch_mp, + v_rad1_mp, + v_rad2_mp, + v_tang1_mp, + v_tang2_mp, + ) = extensions_mp.radial_tangential_stretch(x0, y0, kwargs_lens, diff=None) npt.assert_almost_equal(radial_stretch, radial_stretch_mp, decimal=4) npt.assert_almost_equal(tangential_stretch, tangential_stretch_mp, decimal=4) npt.assert_almost_equal(v_rad1, v_rad1_mp, decimal=4) @@ -86,66 +135,146 @@ def test_radial_tangential_stretch(self): npt.assert_almost_equal(v_tang2, v_tang2_mp, decimal=4) def test_radial_tangential_distortions_multi_plane(self): - lens_model_list = ['SIS', 'SHEAR'] - lens_mp = LensModel(lens_model_list=lens_model_list, lens_redshift_list=[0.5, 0.4], multi_plane=True, z_source=2) + lens_model_list = ["SIS", "SHEAR"] + lens_mp = LensModel( + lens_model_list=lens_model_list, + lens_redshift_list=[0.5, 0.4], + multi_plane=True, + z_source=2, + ) lens = LensModel(lens_model_list=lens_model_list) - x0, y0 = 2., 1. + x0, y0 = 2.0, 1.0 - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.0, 'gamma2': 0.00001}] + kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"gamma1": 0.0, "gamma2": 0.00001}, + ] extensions = LensModelExtensions(lensModel=lens) extensions_mp = LensModelExtensions(lensModel=lens_mp) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials( - x=x0, y=y0, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001) - - lambda_rad_mp, lambda_tan_mp, orientation_angle_mp, dlambda_tan_dtan_mp, dlambda_tan_drad_mp, dlambda_rad_drad_mp, dlambda_rad_dtan_mp, dphi_tan_dtan_mp, dphi_tan_drad_mp, dphi_rad_drad_mp, dphi_rad_dtan_mp = extensions_mp.radial_tangential_differentials( - x=x0, y=y0, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + x=x0, y=y0, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001 + ) + + ( + lambda_rad_mp, + lambda_tan_mp, + orientation_angle_mp, + dlambda_tan_dtan_mp, + dlambda_tan_drad_mp, + dlambda_rad_drad_mp, + dlambda_rad_dtan_mp, + dphi_tan_dtan_mp, + dphi_tan_drad_mp, + dphi_rad_drad_mp, + dphi_rad_dtan_mp, + ) = extensions_mp.radial_tangential_differentials( + x=x0, y=y0, kwargs_lens=kwargs_lens, smoothing_3rd=0.0001 + ) npt.assert_almost_equal(lambda_rad, lambda_rad_mp, decimal=3) npt.assert_almost_equal(lambda_tan, lambda_tan_mp, decimal=3) npt.assert_almost_equal(dphi_tan_dtan, dphi_rad_dtan_mp, decimal=3) def test_radial_tangential_differentials(self): - from lenstronomy.Util import util + x, y = util.make_grid(numPix=10, deltapix=1) - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] center_x, center_y = 0, 0 lens = LensModel(lens_model_list=lens_model_list) - kwargs_lens = [{'theta_E': 1, 'center_x': center_x, 'center_y': center_y}] + kwargs_lens = [{"theta_E": 1, "center_x": center_x, "center_y": center_y}] extensions = LensModelExtensions(lensModel=lens) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials(2, 2, kwargs_lens, smoothing_3rd=0.001) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + 2, 2, kwargs_lens, smoothing_3rd=0.001 + ) npt.assert_almost_equal(lambda_rad, 1, decimal=5) npt.assert_almost_equal(lambda_tan, 1.5469181606780271, decimal=5) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials( - np.array([2]), np.array([2]), kwargs_lens, smoothing_3rd=0.001) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + np.array([2]), np.array([2]), kwargs_lens, smoothing_3rd=0.001 + ) npt.assert_almost_equal(lambda_rad, 1, decimal=5) npt.assert_almost_equal(lambda_tan, 1.5469181606780271, decimal=5) mag = lens.magnification(x, y, kwargs_lens) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials( - x, y, kwargs_lens, smoothing_3rd=0.001) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = extensions.radial_tangential_differentials( + x, y, kwargs_lens, smoothing_3rd=0.001 + ) mag_tang_rad = lambda_tan * lambda_rad npt.assert_almost_equal(mag_tang_rad, mag, decimal=5) def test_curved_arc_estimate(self): - lens_model_list = ['SPP'] + lens_model_list = ["SPP"] lens = LensModel(lens_model_list=lens_model_list) - arc = LensModel(lens_model_list=['CURVED_ARC_SPP']) + arc = LensModel(lens_model_list=["CURVED_ARC_SPP"]) theta_E = 4 - gamma = 2. - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}] + gamma = 2.0 + kwargs_lens = [ + {"theta_E": theta_E, "gamma": gamma, "center_x": 0, "center_y": 0} + ] ext = LensModelExtensions(lensModel=lens) x_0, y_0 = 5, 0 kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens) - theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc = CurvedArcSPP.stretch2spp(**kwargs_arc) + ( + theta_E_arc, + gamma_arc, + center_x_spp_arc, + center_y_spp_arc, + ) = CurvedArcSPP.stretch2spp(**kwargs_arc) npt.assert_almost_equal(theta_E_arc, theta_E, decimal=4) npt.assert_almost_equal(gamma_arc, gamma, decimal=3) npt.assert_almost_equal(center_x_spp_arc, 0, decimal=3) @@ -157,9 +286,14 @@ def test_curved_arc_estimate(self): npt.assert_almost_equal(alpha_x_arc, alpha_x - alpha0_x, decimal=3) npt.assert_almost_equal(alpha_y_arc, alpha_y - alpha0_y, decimal=3) - x_0, y_0 = 0., 3 + x_0, y_0 = 0.0, 3 kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens) - theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc = CurvedArcSPP.stretch2spp(**kwargs_arc) + ( + theta_E_arc, + gamma_arc, + center_x_spp_arc, + center_y_spp_arc, + ) = CurvedArcSPP.stretch2spp(**kwargs_arc) print(kwargs_arc) print(theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc) npt.assert_almost_equal(theta_E_arc, theta_E, decimal=4) @@ -169,58 +303,96 @@ def test_curved_arc_estimate(self): x_0, y_0 = -2, -3 kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens) - theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc = CurvedArcSPP.stretch2spp(**kwargs_arc) + ( + theta_E_arc, + gamma_arc, + center_x_spp_arc, + center_y_spp_arc, + ) = CurvedArcSPP.stretch2spp(**kwargs_arc) npt.assert_almost_equal(theta_E_arc, theta_E, decimal=4) npt.assert_almost_equal(gamma_arc, gamma, decimal=3) npt.assert_almost_equal(center_x_spp_arc, 0, decimal=3) npt.assert_almost_equal(center_y_spp_arc, 0, decimal=3) def test_curved_arc_finite_area(self): - lens_model_list = ['SPP'] + lens_model_list = ["SPP"] lens = LensModel(lens_model_list=lens_model_list) - arc = LensModel(lens_model_list=['CURVED_ARC_SPP']) + arc = LensModel(lens_model_list=["CURVED_ARC_SPP"]) theta_E = 4 - gamma = 2. - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}] + gamma = 2.0 + kwargs_lens = [ + {"theta_E": theta_E, "gamma": gamma, "center_x": 0, "center_y": 0} + ] ext = LensModelExtensions(lensModel=lens) x_0, y_0 = 5, 0 kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens) dr = 0.001 kwargs_arc_finite = ext.curved_arc_finite_area(x_0, y_0, kwargs_lens, dr) - npt.assert_almost_equal(kwargs_arc['direction'], kwargs_arc_finite['direction'], decimal=3) - npt.assert_almost_equal(kwargs_arc['radial_stretch'], kwargs_arc_finite['radial_stretch'], decimal=3) - npt.assert_almost_equal(kwargs_arc['tangential_stretch'], kwargs_arc_finite['tangential_stretch'], decimal=3) - npt.assert_almost_equal(kwargs_arc['curvature'], kwargs_arc_finite['curvature'], decimal=3) - + npt.assert_almost_equal( + kwargs_arc["direction"], kwargs_arc_finite["direction"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_arc["radial_stretch"], kwargs_arc_finite["radial_stretch"], decimal=3 + ) + npt.assert_almost_equal( + kwargs_arc["tangential_stretch"], + kwargs_arc_finite["tangential_stretch"], + decimal=3, + ) + npt.assert_almost_equal( + kwargs_arc["curvature"], kwargs_arc_finite["curvature"], decimal=3 + ) def test_curved_arc_estimate_tan_diff(self): arc_tan_diff = CurvedArcTanDiff() - lens_model_list = ['SIE'] + lens_model_list = ["SIE"] lens = LensModel(lens_model_list=lens_model_list) - arc = LensModel(lens_model_list=['CURVED_ARC_TAN_DIFF']) + arc = LensModel(lens_model_list=["CURVED_ARC_TAN_DIFF"]) theta_E = 4 # here we model an off-axis ellisoid relative to the x-axis - e1, e2 = 0., -0.1 + e1, e2 = 0.0, -0.1 x_0, y_0 = 5, 0 - kwargs_lens = [{'theta_E': theta_E, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + {"theta_E": theta_E, "e1": e1, "e2": e2, "center_x": 0, "center_y": 0} + ] ext = LensModelExtensions(lensModel=lens) - kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens, tan_diff=True, smoothing_3rd=0.01) - theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis = arc_tan_diff.stretch2sie_mst(**kwargs_arc) + kwargs_arc = ext.curved_arc_estimate( + x_0, y_0, kwargs_lens, tan_diff=True, smoothing_3rd=0.01 + ) + ( + theta_E_sie, + e1_sie, + e2_sie, + kappa_ext, + center_x_sis, + center_y_sis, + ) = arc_tan_diff.stretch2sie_mst(**kwargs_arc) print(theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) npt.assert_almost_equal(e2_sie - e2, 0, decimal=1) npt.assert_almost_equal(e1_sie, e1, decimal=3) # here we model an off-axis ellisoid relative to the y-axis - e1, e2 = 0.1, 0. + e1, e2 = 0.1, 0.0 x_0, y_0 = 0, 5 - kwargs_lens = [{'theta_E': theta_E, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + {"theta_E": theta_E, "e1": e1, "e2": e2, "center_x": 0, "center_y": 0} + ] ext = LensModelExtensions(lensModel=lens) - kwargs_arc = ext.curved_arc_estimate(x_0, y_0, kwargs_lens, tan_diff=True, smoothing_3rd=0.01) - theta_E_sie, e1_sie, e2_sie, kappa_ext, center_x_sis, center_y_sis = arc_tan_diff.stretch2sie_mst(**kwargs_arc) + kwargs_arc = ext.curved_arc_estimate( + x_0, y_0, kwargs_lens, tan_diff=True, smoothing_3rd=0.01 + ) + ( + theta_E_sie, + e1_sie, + e2_sie, + kappa_ext, + center_x_sis, + center_y_sis, + ) = arc_tan_diff.stretch2sie_mst(**kwargs_arc) print(theta_E_sie, e1_sie, e2_sie, center_x_sis, center_y_sis) npt.assert_almost_equal(e1_sie - e1, 0, decimal=1) npt.assert_almost_equal(e2_sie, e2, decimal=3) @@ -231,27 +403,40 @@ def test_curved_arc_estimate_tan_diff(self): def test_arcs_at_image_position(self): # lensing quantities - kwargs_spp = {'theta_E': 1.26, 'gamma': 2., 'e1': 0.1, 'e2': -0.1, 'center_x': 0.0, 'center_y': 0.0} # parameters of the deflector lens model + kwargs_spp = { + "theta_E": 1.26, + "gamma": 2.0, + "e1": 0.1, + "e2": -0.1, + "center_x": 0.0, + "center_y": 0.0, + } # parameters of the deflector lens model # the lens model is a supperposition of an elliptical lens model with external shear - lens_model_list = ['SPEP'] #, 'SHEAR'] - kwargs_lens = [kwargs_spp] #, kwargs_shear] + lens_model_list = ["SPEP"] # , 'SHEAR'] + kwargs_lens = [kwargs_spp] # , kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) lensEquationSolver = LensEquationSolver(lens_model_class) - source_x = 0. + source_x = 0.0 source_y = 0.05 - x_image, y_image = lensEquationSolver.findBrightImage(source_x, source_y, kwargs_lens, numImages=4, - min_distance=0.05, search_window=5) - arc_model = LensModel(lens_model_list=['CURVED_ARC_SPP', 'SHIFT']) + x_image, y_image = lensEquationSolver.findBrightImage( + source_x, + source_y, + kwargs_lens, + numImages=4, + min_distance=0.05, + search_window=5, + ) + arc_model = LensModel(lens_model_list=["CURVED_ARC_SPP", "SHIFT"]) for i in range(len(x_image)): x0, y0 = x_image[i], y_image[i] print(x0, y0, i) ext = LensModelExtensions(lensModel=lens_model_class) kwargs_arc_i = ext.curved_arc_estimate(x0, y0, kwargs_lens) alpha_x, alpha_y = lens_model_class.alpha(x0, y0, kwargs_lens) - kwargs_arc = [kwargs_arc_i, {'alpha_x': alpha_x, 'alpha_y': alpha_y}] + kwargs_arc = [kwargs_arc_i, {"alpha_x": alpha_x, "alpha_y": alpha_y}] print(kwargs_arc_i) - direction = kwargs_arc_i['direction'] + direction = kwargs_arc_i["direction"] print(np.cos(direction), np.sin(direction)) x, y = util.make_grid(numPix=5, deltapix=0.01) x = x0 @@ -261,158 +446,314 @@ def test_arcs_at_image_position(self): print(gamma1, gamma2) npt.assert_almost_equal(gamma1_arc, gamma1, decimal=3) npt.assert_almost_equal(gamma2_arc, gamma2, decimal=3) - theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc = CurvedArcSPP.stretch2spp(**kwargs_arc_i) + ( + theta_E_arc, + gamma_arc, + center_x_spp_arc, + center_y_spp_arc, + ) = CurvedArcSPP.stretch2spp(**kwargs_arc_i) print(theta_E_arc, gamma_arc, center_x_spp_arc, center_y_spp_arc) npt.assert_almost_equal(center_x_spp_arc, 0, decimal=3) npt.assert_almost_equal(center_y_spp_arc, 0, decimal=3) def test_analytic_differentials_spp(self): - """ - test the analytical differentials of the spherical power-law mass profile + """Test the analytical differentials of the spherical power-law mass profile. :return: """ - lens_model_class = LensModel(lens_model_list=['SPP']) + lens_model_class = LensModel(lens_model_list=["SPP"]) theta_E = 2 gamma = 2.2 - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + {"theta_E": theta_E, "gamma": gamma, "center_x": 0, "center_y": 0} + ] ext = LensModelExtensions(lensModel=lens_model_class) # we leave out the very high magnification region where the numerical differentials might not be as accurate as required in the tests - x = np.append(np.linspace(start=0.1, stop=theta_E-0.1, num=9), np.linspace(start=theta_E + 0.1, stop=3 * theta_E, num=9)) + x = np.append( + np.linspace(start=0.1, stop=theta_E - 0.1, num=9), + np.linspace(start=theta_E + 0.1, stop=3 * theta_E, num=9), + ) y = np.zeros_like(x) - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials(x, y, kwargs_lens) + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) def _lambda_t_analytic(r, theta_E, gamma): - """ - analytic expression for lambda_tan + """Analytic expression for lambda_tan. :param r: radius :return: """ - return (1 - (theta_E / r) ** (gamma -1)) ** -1 + return (1 - (theta_E / r) ** (gamma - 1)) ** -1 lambda_tan_analytic = _lambda_t_analytic(x, theta_E, gamma) npt.assert_almost_equal(lambda_tan_analytic, lambda_tan, decimal=5) def _lambda_r_analytic(r, theta_E, gamma): - """ - analytic expression for lambda_rad + """Analytic expression for lambda_rad. :param r: radius :return: """ - return (1 + (gamma - 2) * (theta_E/r) ** (gamma - 1)) ** -1 + return (1 + (gamma - 2) * (theta_E / r) ** (gamma - 1)) ** -1 lambda_rad_analytic = _lambda_r_analytic(x, theta_E, gamma) npt.assert_almost_equal(lambda_rad_analytic, lambda_rad, decimal=5) def _lambda_t_dr_analytic(r, theta_E, gamma): - """ - analytic expression for d(lambda_tan) / dr + """Analytic expression for d(lambda_tan) / dr. :param r: radius :return: """ - return (1 - gamma) * (theta_E / r) ** gamma / (theta_E * (1 - (theta_E/r)**(gamma - 1))**2) + return ( + (1 - gamma) + * (theta_E / r) ** gamma + / (theta_E * (1 - (theta_E / r) ** (gamma - 1)) ** 2) + ) dlambda_tan_drad_analytic = _lambda_t_dr_analytic(x, theta_E, gamma) - npt.assert_almost_equal(dlambda_tan_drad_analytic / dlambda_tan_drad, 1, decimal=2) + npt.assert_almost_equal( + dlambda_tan_drad_analytic / dlambda_tan_drad, 1, decimal=2 + ) def _lambda_r_dr_analytic(r, theta_E, gamma): - """ - analytic expression for d(lambda_tan) / dr + """Analytic expression for d(lambda_tan) / dr. :param r: radius :return: """ - return (1 - gamma) * (2 - gamma) * (theta_E / r) ** gamma / (theta_E * (1 + (theta_E/r)**(gamma - 1) * (gamma - 2))**2) + return ( + (1 - gamma) + * (2 - gamma) + * (theta_E / r) ** gamma + / (theta_E * (1 + (theta_E / r) ** (gamma - 1) * (gamma - 2)) ** 2) + ) dlambda_r_drad_analytic = _lambda_r_dr_analytic(x, theta_E, gamma) - npt.assert_almost_equal(dlambda_r_drad_analytic / dlambda_rad_drad, 1, decimal=2) + npt.assert_almost_equal( + dlambda_r_drad_analytic / dlambda_rad_drad, 1, decimal=2 + ) def test_analytic_differential_pemd(self): - lens_model_class = LensModel(lens_model_list=['EPL']) + lens_model_class = LensModel(lens_model_list=["EPL"]) theta_E = 4 - gamma = 2. + gamma = 2.0 ext = LensModelExtensions(lensModel=lens_model_class) # we leave out the very high magnification region where the numerical differentials might not be as accurate as required in the tests - x = np.append(np.linspace(start=0.2 * theta_E, stop=theta_E * (1-0.2), num=9), - np.linspace(start=theta_E *(1 + 0.5), stop=3 * theta_E, num=9)) + x = np.append( + np.linspace(start=0.2 * theta_E, stop=theta_E * (1 - 0.2), num=9), + np.linspace(start=theta_E * (1 + 0.5), stop=3 * theta_E, num=9), + ) y = np.zeros_like(x) def _dlambda_t_dr_analytic(r, theta_E, gamma): - """ - analytic expression for d(lambda_tan) / dr + """Analytic expression for d(lambda_tan) / dr. :param r: circularized radius :return: """ - return (1 - gamma) * (theta_E / r) ** gamma / (theta_E * (1 - (theta_E/r)**(gamma - 1))**2) + return ( + (1 - gamma) + * (theta_E / r) ** gamma + / (theta_E * (1 - (theta_E / r) ** (gamma - 1)) ** 2) + ) def _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G): # polar coordinates with respect to x-axis r, phi = param_util.cart2polar(x, y, center_x=0, center_y=0) - epsilon = (1 - q ** 2) / (1 + q ** 2) + epsilon = (1 - q**2) / (1 + q**2) # radial component in respect to rotation of deflector r_ = r * np.sqrt(1 - epsilon * np.cos(2 * (phi - phi_G))) # equivalent Einstein radius for elliptical mass definition theta_E_prim = np.sqrt(2 * q / (1 + q**2)) * theta_E dlambda_t_dr = _dlambda_t_dr_analytic(r_, theta_E_prim, gamma) - dr_de_t = epsilon * np.sin(2 * (phi - phi_G)) / np.sqrt(1 - epsilon * np.cos(2 * (phi - phi_G))) + dr_de_t = ( + epsilon + * np.sin(2 * (phi - phi_G)) + / np.sqrt(1 - epsilon * np.cos(2 * (phi - phi_G))) + ) return dlambda_t_dr * dr_de_t, dlambda_t_dr # define ellipticity with axis ratio and orientation # ellipticity aligned -> leading to zero differential - phi_G, q = 0., 0.7 + phi_G, q = 0.0, 0.7 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials(x, y, kwargs_lens) - dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G) + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) + dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic( + x, y, theta_E, gamma, q, phi_G + ) npt.assert_almost_equal(dlambda_tan_dtan_analytic, 0, decimal=5) npt.assert_almost_equal(dlambda_tan_dtan, 0, decimal=1) # ellipticity aligned -> leading to zero differential - phi_G, q = np.pi/2, 0.7 + phi_G, q = np.pi / 2, 0.7 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - x, y, kwargs_lens) - dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G) + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) + dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic( + x, y, theta_E, gamma, q, phi_G + ) npt.assert_almost_equal(dlambda_tan_dtan_analytic, 0, decimal=5) npt.assert_almost_equal(dlambda_tan_dtan, 0, decimal=1) phi_G, q = np.pi / 4, 0.7 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - x, y, kwargs_lens) - dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G) - print(dlambda_rad_drad, 'dlambda_rad') + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) + dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic( + x, y, theta_E, gamma, q, phi_G + ) + print(dlambda_rad_drad, "dlambda_rad") npt.assert_almost_equal(dlambda_tan_drad / dlambda_t_dr_analtic, 1, decimal=2) - print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 'test') - npt.assert_almost_equal(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2) + print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, "test") + npt.assert_almost_equal( + dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2 + ) phi_G, q = np.pi / 8, 0.7 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - x, y, kwargs_lens) - dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G) - print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 'test') - npt.assert_almost_equal(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2) + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) + dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic( + x, y, theta_E, gamma, q, phi_G + ) + print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, "test") + npt.assert_almost_equal( + dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2 + ) phi_G, q = 0.3, 0.7 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] - lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = ext.radial_tangential_differentials( - x, y, kwargs_lens) - dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic(x, y, theta_E, gamma, q, phi_G) - print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 'test') - npt.assert_almost_equal(dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2) - - -if __name__ == '__main__': + kwargs_lens = [ + { + "theta_E": theta_E, + "gamma": gamma, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] + ( + lambda_rad, + lambda_tan, + orientation_angle, + dlambda_tan_dtan, + dlambda_tan_drad, + dlambda_rad_drad, + dlambda_rad_dtan, + dphi_tan_dtan, + dphi_tan_drad, + dphi_rad_drad, + dphi_rad_dtan, + ) = ext.radial_tangential_differentials(x, y, kwargs_lens) + dlambda_tan_dtan_analytic, dlambda_t_dr_analtic = _dlambda_t_dt_analytic( + x, y, theta_E, gamma, q, phi_G + ) + print(dlambda_tan_dtan_analytic / dlambda_tan_dtan, "test") + npt.assert_almost_equal( + dlambda_tan_dtan_analytic / dlambda_tan_dtan, 1, decimal=2 + ) + + +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_convergence_integrals.py b/test/test_LensModel/test_convergence_integrals.py index 06a9df3a5..b7d80d3a7 100644 --- a/test/test_LensModel/test_convergence_integrals.py +++ b/test/test_LensModel/test_convergence_integrals.py @@ -6,23 +6,21 @@ class TestConvergenceIntegrals(object): - """ - test angular to mass unit conversions - """ + """Test angular to mass unit conversions.""" + def setup_method(self): pass def test_potential_from_kappa(self): - sis = SIS() deltaPix = 0.005 x_grid, y_grid = util.make_grid(numPix=2000, deltapix=deltaPix) - kwargs_sis = {'theta_E': 1., 'center_x': 0, 'center_y': 0} + kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0} f_xx, _, _, f_yy = sis.hessian(x_grid, y_grid, **kwargs_sis) f_ = sis.function(x_grid, y_grid, **kwargs_sis) f_ = util.array2image(f_) - kappa = util.array2image((f_xx + f_yy) / 2.) + kappa = util.array2image((f_xx + f_yy) / 2.0) potential_num = convergence_integrals.potential_from_kappa_grid(kappa, deltaPix) x1, y1 = 560, 500 @@ -35,16 +33,20 @@ def test_potential_from_kappa(self): def test_potential_from_kappa_adaptiv(self): sis = SIS() deltaPix = 0.01 - kwargs_sis = {'theta_E': 1., 'center_x': 0, 'center_y': 0} + kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0} low_res_factor = 5 high_res_kernel_size = 5 x_grid, y_grid = util.make_grid(numPix=1000, deltapix=deltaPix) f_xx, _, _, f_yy = sis.hessian(x_grid, y_grid, **kwargs_sis) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_num = convergence_integrals.potential_from_kappa_grid_adaptive(kappa, deltaPix, low_res_factor, high_res_kernel_size) - - x_grid_low, y_grid_low = util.make_grid(numPix=1000/low_res_factor, deltapix=deltaPix*low_res_factor) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_num = convergence_integrals.potential_from_kappa_grid_adaptive( + kappa, deltaPix, low_res_factor, high_res_kernel_size + ) + + x_grid_low, y_grid_low = util.make_grid( + numPix=1000 / low_res_factor, deltapix=deltaPix * low_res_factor + ) f_low = sis.function(x_grid_low, y_grid_low, **kwargs_sis) f_low = util.array2image(f_low) x1, y1 = 56, 50 @@ -54,18 +56,19 @@ def test_potential_from_kappa_adaptiv(self): d_f = f_low[x1, y1] - f_low[x2, y2] npt.assert_almost_equal(d_f_num, d_f, decimal=2) - def test_deflection_from_kappa(self): sis = SIS() deltaPix = 0.01 x_grid, y_grid = util.make_grid(numPix=1000, deltapix=deltaPix) - kwargs_sis = {'theta_E': 1., 'center_x': 0, 'center_y': 0} + kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0} f_xx, _, _, f_yy = sis.hessian(x_grid, y_grid, **kwargs_sis) f_x, f_y = sis.derivatives(x_grid, y_grid, **kwargs_sis) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 550, 500 # test relative potential at two different point way inside the kappa map @@ -74,16 +77,20 @@ def test_deflection_from_kappa(self): def test_deflection_from_kappa_adaptiv(self): sis = SIS() deltaPix = 0.01 - kwargs_sis = {'theta_E': 1., 'center_x': 0, 'center_y': 0} + kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0} low_res_factor = 5 high_res_kernel_size = 5 x_grid, y_grid = util.make_grid(numPix=1000, deltapix=deltaPix) f_xx, _, _, f_yy = sis.hessian(x_grid, y_grid, **kwargs_sis) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid_adaptive(kappa, deltaPix, low_res_factor, high_res_kernel_size) - - x_grid_low, y_grid_low = util.make_grid(numPix=1000/low_res_factor, deltapix=deltaPix*low_res_factor) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid_adaptive( + kappa, deltaPix, low_res_factor, high_res_kernel_size + ) + + x_grid_low, y_grid_low = util.make_grid( + numPix=1000 / low_res_factor, deltapix=deltaPix * low_res_factor + ) f_x_low, f_y_low = sis.derivatives(x_grid_low, y_grid_low, **kwargs_sis) f_x_low = util.array2image(f_x_low) f_y_low = util.array2image(f_y_low) @@ -96,10 +103,23 @@ def test_deflection_from_kappa_adaptiv(self): def test_sersic(self): from lenstronomy.LensModel.Profiles.sersic import Sersic from lenstronomy.LightModel.Profiles.sersic import Sersic as SersicLight + sersic_lens = Sersic() sersic_light = SersicLight() - kwargs_light = {'n_sersic': 2, 'R_sersic': 0.5, 'I0_sersic': 1, 'center_x': 0, 'center_y': 0} - kwargs_lens = {'n_sersic': 2, 'R_sersic': 0.5, 'k_eff': 1, 'center_x': 0, 'center_y': 0} + kwargs_light = { + "n_sersic": 2, + "R_sersic": 0.5, + "I0_sersic": 1, + "center_x": 0, + "center_y": 0, + } + kwargs_lens = { + "n_sersic": 2, + "R_sersic": 0.5, + "k_eff": 1, + "center_x": 0, + "center_y": 0, + } deltaPix = 0.01 numPix = 1000 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) @@ -109,10 +129,12 @@ def test_sersic(self): f_xx, _, _, f_yy = sersic_lens.hessian(x_grid, y_grid, **kwargs_lens) f_x, f_y = sersic_lens.derivatives(x_grid, y_grid, **kwargs_lens) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 500, 550 - x0, y0 = int(numPix/2.), int(numPix/2.) + x0, y0 = int(numPix / 2.0), int(numPix / 2.0) npt.assert_almost_equal(f_x[x1, y1], f_x_num[x1, y1], decimal=2) f_num = convergence_integrals.potential_from_kappa_grid(kappa, deltaPix) f_ = sersic_lens.function(x_grid2d[x1, y1], y_grid2d[x1, y1], **kwargs_lens) @@ -120,56 +142,75 @@ def test_sersic(self): npt.assert_almost_equal(f_ - f_00, f_num[x1, y1] - f_num[x0, y0], decimal=2) def test_gnfw(self): - from lenstronomy.LensModel.Profiles.general_nfw import GNFW + gnfw = GNFW() deltaPix = 0.005 numPix = 2000 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) - kwargs_lens = {'alpha_Rs': 1.2, 'Rs': 0.8, 'gamma_inner': 1.5, 'gamma_outer': 3.4} + kwargs_lens = { + "alpha_Rs": 1.2, + "Rs": 0.8, + "gamma_inner": 1.5, + "gamma_outer": 3.4, + } f_xx, _, _, f_yy = gnfw.hessian(x_grid, y_grid, **kwargs_lens) f_x, f_y = gnfw.derivatives(x_grid, y_grid, **kwargs_lens) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 500, 550 npt.assert_almost_equal(f_x[x1, y1], f_x_num[x1, y1], decimal=2) - kwargs_lens = {'alpha_Rs': 1.2, 'Rs': 0.8, 'gamma_inner': 2.3, 'gamma_outer': 3.45} + kwargs_lens = { + "alpha_Rs": 1.2, + "Rs": 0.8, + "gamma_inner": 2.3, + "gamma_outer": 3.45, + } f_xx, _, _, f_yy = gnfw.hessian(x_grid, y_grid, **kwargs_lens) f_x, f_y = gnfw.derivatives(x_grid, y_grid, **kwargs_lens) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 500, 550 npt.assert_almost_equal(f_x[x1, y1], f_x_num[x1, y1], decimal=2) def test_tnfwc(self): - from lenstronomy.LensModel.Profiles.nfw_core_truncated import TNFWC + tnfwc = TNFWC() deltaPix = 0.005 numPix = 2000 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) - kwargs_lens = {'alpha_Rs': 1.2, 'Rs': 0.8, 'r_trunc': 3.5, 'r_core': 0.45} + kwargs_lens = {"alpha_Rs": 1.2, "Rs": 0.8, "r_trunc": 3.5, "r_core": 0.45} f_xx, _, _, f_yy = tnfwc.hessian(x_grid, y_grid, **kwargs_lens) f_x, f_y = tnfwc.derivatives(x_grid, y_grid, **kwargs_lens) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 500, 550 npt.assert_almost_equal(f_x[x1, y1], f_x_num[x1, y1], decimal=2) - kwargs_lens = {'alpha_Rs': 1.2, 'Rs': 0.8, 'r_trunc': 300.5, 'r_core': 0.45} + kwargs_lens = {"alpha_Rs": 1.2, "Rs": 0.8, "r_trunc": 300.5, "r_core": 0.45} f_xx, _, _, f_yy = tnfwc.hessian(x_grid, y_grid, **kwargs_lens) f_x, f_y = tnfwc.derivatives(x_grid, y_grid, **kwargs_lens) f_x = util.array2image(f_x) - kappa = util.array2image((f_xx + f_yy) / 2.) - f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid(kappa, deltaPix) + kappa = util.array2image((f_xx + f_yy) / 2.0) + f_x_num, f_y_num = convergence_integrals.deflection_from_kappa_grid( + kappa, deltaPix + ) x1, y1 = 500, 550 npt.assert_almost_equal(f_x[x1, y1], f_x_num[x1, y1], decimal=2) -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_lens_model.py b/test/test_LensModel/test_lens_model.py index 05e3be4d8..043630243 100644 --- a/test/test_LensModel/test_lens_model.py +++ b/test/test_LensModel/test_lens_model.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -10,108 +10,149 @@ class TestLensModel(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): - self.lensModel = LensModel(['GAUSSIAN']) - self.kwargs = [{'amp': 1., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.}] + self.lensModel = LensModel(["GAUSSIAN"]) + self.kwargs = [ + { + "amp": 1.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 0.0, + "center_y": 0.0, + } + ] def test_init(self): - lens_model_list = ['FLEXION', 'SIS_TRUNCATED', 'SERSIC', 'SERSIC_ELLIPSE_KAPPA', - 'SERSIC_ELLIPSE_GAUSS_DEC', 'NFW_ELLIPSE_GAUSS_DEC', - 'SERSIC_ELLIPSE_POTENTIAL', 'CTNFW_GAUSS_DEC', - 'PJAFFE', 'PJAFFE_ELLIPSE', 'HERNQUIST_ELLIPSE', 'INTERPOL', 'INTERPOL_SCALED', - 'SHAPELETS_POLAR', 'DIPOLE', - 'GAUSSIAN_ELLIPSE_KAPPA', 'GAUSSIAN_ELLIPSE_POTENTIAL', - 'MULTI_GAUSSIAN_KAPPA', 'MULTI_GAUSSIAN_KAPPA_ELLIPSE', 'CHAMELEON', - 'DOUBLE_CHAMELEON'] + lens_model_list = [ + "FLEXION", + "SIS_TRUNCATED", + "SERSIC", + "SERSIC_ELLIPSE_KAPPA", + "SERSIC_ELLIPSE_GAUSS_DEC", + "NFW_ELLIPSE_GAUSS_DEC", + "SERSIC_ELLIPSE_POTENTIAL", + "CTNFW_GAUSS_DEC", + "PJAFFE", + "PJAFFE_ELLIPSE", + "HERNQUIST_ELLIPSE", + "INTERPOL", + "INTERPOL_SCALED", + "SHAPELETS_POLAR", + "DIPOLE", + "GAUSSIAN_ELLIPSE_KAPPA", + "GAUSSIAN_ELLIPSE_POTENTIAL", + "MULTI_GAUSSIAN_KAPPA", + "MULTI_GAUSSIAN_KAPPA_ELLIPSE", + "CHAMELEON", + "DOUBLE_CHAMELEON", + ] lensModel = LensModel(lens_model_list) assert len(lensModel.lens_model_list) == len(lens_model_list) - lens_model_list = ['NFW'] + lens_model_list = ["NFW"] lensModel = LensModel(lens_model_list) - x,y = 0.2,1 - kwargs = [{'alpha_Rs':1, 'Rs': 0.5, 'center_x':0, 'center_y': 0}] - value = lensModel.potential(x,y,kwargs) + x, y = 0.2, 1 + kwargs = [{"alpha_Rs": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}] + value = lensModel.potential(x, y, kwargs) nfw_interp = NFW(interpol=True) value_interp_lookup = nfw_interp.function(x, y, **kwargs[0]) npt.assert_almost_equal(value, value_interp_lookup, decimal=4) def test_kappa(self): - lensModel = LensModel(lens_model_list=['CONVERGENCE']) + lensModel = LensModel(lens_model_list=["CONVERGENCE"]) kappa_ext = 0.5 - kwargs = [{'kappa': kappa_ext}] - output = lensModel.kappa(x=1., y=1., kwargs=kwargs) + kwargs = [{"kappa": kappa_ext}] + output = lensModel.kappa(x=1.0, y=1.0, kwargs=kwargs) assert output == kappa_ext def test_potential(self): - output = self.lensModel.potential(x=1., y=1., kwargs=self.kwargs) - npt.assert_almost_equal(output, 0.77880078307140488/(8*np.pi), decimal=8) - #assert output == 0.77880078307140488/(8*np.pi) + output = self.lensModel.potential(x=1.0, y=1.0, kwargs=self.kwargs) + npt.assert_almost_equal(output, 0.77880078307140488 / (8 * np.pi), decimal=8) + # assert output == 0.77880078307140488/(8*np.pi) def test_alpha(self): - output1, output2 = self.lensModel.alpha(x=1., y=1., kwargs=self.kwargs) - npt.assert_almost_equal(output1, -0.19470019576785122/(8*np.pi), decimal=8) + output1, output2 = self.lensModel.alpha(x=1.0, y=1.0, kwargs=self.kwargs) + npt.assert_almost_equal(output1, -0.19470019576785122 / (8 * np.pi), decimal=8) npt.assert_almost_equal(output2, -0.19470019576785122 / (8 * np.pi), decimal=8) - #assert output1 == -0.19470019576785122/(8*np.pi) - #assert output2 == -0.19470019576785122/(8*np.pi) + # assert output1 == -0.19470019576785122/(8*np.pi) + # assert output2 == -0.19470019576785122/(8*np.pi) - output1_diff, output2_diff = self.lensModel.alpha(x=1., y=1., kwargs=self.kwargs, diff=0.00001) + output1_diff, output2_diff = self.lensModel.alpha( + x=1.0, y=1.0, kwargs=self.kwargs, diff=0.00001 + ) npt.assert_almost_equal(output1_diff, output1, decimal=5) npt.assert_almost_equal(output2_diff, output2, decimal=5) def test_gamma(self): - lensModel = LensModel(lens_model_list=['SHEAR']) - gamma1, gamm2 = 0.1, -0.1 - kwargs = [{'gamma1': gamma1, 'gamma2': gamm2}] - e1_out, e2_out = lensModel.gamma(x=1., y=1., kwargs=kwargs) + lensModel = LensModel(lens_model_list=["SHEAR"]) + gamma1, gamm2 = 0.1, -0.1 + kwargs = [{"gamma1": gamma1, "gamma2": gamm2}] + e1_out, e2_out = lensModel.gamma(x=1.0, y=1.0, kwargs=kwargs) assert e1_out == gamma1 assert e2_out == gamm2 - output1, output2 = self.lensModel.gamma(x=1., y=1., kwargs=self.kwargs) + output1, output2 = self.lensModel.gamma(x=1.0, y=1.0, kwargs=self.kwargs) assert output1 == 0 - assert output2 == 0.048675048941962805/(8*np.pi) + assert output2 == 0.048675048941962805 / (8 * np.pi) def test_magnification(self): - output = self.lensModel.magnification(x=1., y=1., kwargs=self.kwargs) + output = self.lensModel.magnification(x=1.0, y=1.0, kwargs=self.kwargs) assert output == 0.98848384784633392 def test_flexion(self): - lensModel = LensModel(lens_model_list=['FLEXION']) + lensModel = LensModel(lens_model_list=["FLEXION"]) g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04 - kwargs = [{'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4}] - f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1., y=1., kwargs=kwargs) + kwargs = [{"g1": g1, "g2": g2, "g3": g3, "g4": g4}] + f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1.0, y=1.0, kwargs=kwargs) npt.assert_almost_equal(f_xxx, g1, decimal=8) npt.assert_almost_equal(f_xxy, g2, decimal=8) npt.assert_almost_equal(f_xyy, g3, decimal=8) npt.assert_almost_equal(f_yyy, g4, decimal=8) def test_ray_shooting(self): - delta_x, delta_y = self.lensModel.ray_shooting(x=1., y=1., kwargs=self.kwargs) - npt.assert_almost_equal(delta_x, 1 + 0.19470019576785122/(8*np.pi), decimal=8) - npt.assert_almost_equal(delta_y, 1 + 0.19470019576785122 / (8 * np.pi), decimal=8) - #assert delta_x == 1 + 0.19470019576785122/(8*np.pi) - #assert delta_y == 1 + 0.19470019576785122/(8*np.pi) + delta_x, delta_y = self.lensModel.ray_shooting(x=1.0, y=1.0, kwargs=self.kwargs) + npt.assert_almost_equal( + delta_x, 1 + 0.19470019576785122 / (8 * np.pi), decimal=8 + ) + npt.assert_almost_equal( + delta_y, 1 + 0.19470019576785122 / (8 * np.pi), decimal=8 + ) + # assert delta_x == 1 + 0.19470019576785122/(8*np.pi) + # assert delta_y == 1 + 0.19470019576785122/(8*np.pi) def test_arrival_time(self): z_lens = 0.5 z_source = 1.5 - x_image, y_image = 1., 0. - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, lens_redshift_list=[z_lens], z_source=z_source) - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}] + x_image, y_image = 1.0, 0.0 + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + lens_redshift_list=[z_lens], + z_source=z_source, + ) + kwargs = [{"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}] arrival_time_mp = lensModel.arrival_time(x_image, y_image, kwargs) - lensModel_sp = LensModel(lens_model_list=['SIS'], z_source=z_source, z_lens=z_lens) + lensModel_sp = LensModel( + lens_model_list=["SIS"], z_source=z_source, z_lens=z_lens + ) arrival_time_sp = lensModel_sp.arrival_time(x_image, y_image, kwargs) npt.assert_almost_equal(arrival_time_sp, arrival_time_mp, decimal=8) def test_fermat_potential(self): z_lens = 0.5 z_source = 1.5 - x_image, y_image = 1., 0. - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, lens_redshift_list=[z_lens], z_lens=z_lens, z_source=z_source) - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}] + x_image, y_image = 1.0, 0.0 + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + lens_redshift_list=[z_lens], + z_lens=z_lens, + z_source=z_source, + ) + kwargs = [{"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}] fermat_pot = lensModel.fermat_potential(x_image, y_image, kwargs) arrival_time = lensModel.arrival_time(x_image, y_image, kwargs) arrival_time_from_fermat_pot = lensModel._lensCosmo.time_delay_units(fermat_pot) @@ -120,29 +161,41 @@ def test_fermat_potential(self): def test_curl(self): z_lens_list = [0.2, 0.8] z_source = 1.5 - lensModel = LensModel(lens_model_list=['SIS', 'SIS'], multi_plane=True, lens_redshift_list=z_lens_list, z_source=z_source) - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}, - {'theta_E': 0., 'center_x': 0., 'center_y': 0.2}] + lensModel = LensModel( + lens_model_list=["SIS", "SIS"], + multi_plane=True, + lens_redshift_list=z_lens_list, + z_source=z_source, + ) + kwargs = [ + {"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}, + {"theta_E": 0.0, "center_x": 0.0, "center_y": 0.2}, + ] curl = lensModel.curl(x=1, y=1, kwargs=kwargs) assert curl == 0 - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}, - {'theta_E': 1., 'center_x': 0., 'center_y': 0.2}] + kwargs = [ + {"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}, + {"theta_E": 1.0, "center_x": 0.0, "center_y": 0.2}, + ] curl = lensModel.curl(x=1, y=1, kwargs=kwargs) assert curl != 0 def test_hessian_differentials(self): - """ - routine to test the private numerical differentials, both cross and square methods in the infinitesimal regime - """ - lens_model = LensModel(lens_model_list=['SIS']) - kwargs = [{'theta_E': 1, 'center_x': 0.01, 'center_y': 0}] + """Routine to test the private numerical differentials, both cross and square + methods in the infinitesimal regime.""" + lens_model = LensModel(lens_model_list=["SIS"]) + kwargs = [{"theta_E": 1, "center_x": 0.01, "center_y": 0}] x, y = make_grid(numPix=10, deltapix=0.2) diff = 0.0000001 - f_xx_sq, f_xy_sq, f_yx_sq, f_yy_sq = lens_model.hessian(x, y, kwargs, diff=diff, diff_method='square') - f_xx_cr, f_xy_cr, f_yx_cr, f_yy_cr = lens_model.hessian(x, y, kwargs, diff=diff, diff_method='cross') + f_xx_sq, f_xy_sq, f_yx_sq, f_yy_sq = lens_model.hessian( + x, y, kwargs, diff=diff, diff_method="square" + ) + f_xx_cr, f_xy_cr, f_yx_cr, f_yy_cr = lens_model.hessian( + x, y, kwargs, diff=diff, diff_method="cross" + ) f_xx, f_xy, f_yx, f_yy = lens_model.hessian(x, y, kwargs, diff=None) npt.assert_almost_equal(f_xx_cr, f_xx, decimal=5) npt.assert_almost_equal(f_xy_cr, f_xy, decimal=5) @@ -156,37 +209,48 @@ def test_hessian_differentials(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - kwargs = [{'alpha_Rs': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}] - lensModel = LensModel(['NFW'], multi_plane=True, lens_redshift_list=[1], z_source=2) + kwargs = [{"alpha_Rs": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}] + lensModel = LensModel( + ["NFW"], multi_plane=True, lens_redshift_list=[1], z_source=2 + ) f_x, f_y = lensModel.alpha(1, 1, kwargs, diff=0.0001) with self.assertRaises(ValueError): - lensModel = LensModel(['NFW'], multi_plane=True, lens_redshift_list=[1]) + lensModel = LensModel(["NFW"], multi_plane=True, lens_redshift_list=[1]) with self.assertRaises(ValueError): - kwargs = [{'alpha_Rs': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}] - lensModel = LensModel(['NFW'], multi_plane=False) + kwargs = [{"alpha_Rs": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}] + lensModel = LensModel(["NFW"], multi_plane=False) t_arrival = lensModel.arrival_time(1, 1, kwargs) with self.assertRaises(ValueError): z_lens = 0.5 z_source = 1.5 - x_image, y_image = 1., 0. - lensModel = LensModel(lens_model_list=['SIS'], multi_plane=True, lens_redshift_list=[z_lens], - z_source=z_source) - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}] + x_image, y_image = 1.0, 0.0 + lensModel = LensModel( + lens_model_list=["SIS"], + multi_plane=True, + lens_redshift_list=[z_lens], + z_source=z_source, + ) + kwargs = [{"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}] fermat_pot = lensModel.fermat_potential(x_image, y_image, kwargs) with self.assertRaises(ValueError): - lens_model = LensModel(lens_model_list=['SIS']) - kwargs = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.}] - lens_model.hessian(0, 0, kwargs, diff=0.001, diff_method='bad') + lens_model = LensModel(lens_model_list=["SIS"]) + kwargs = [{"theta_E": 1.0, "center_x": 0.0, "center_y": 0.0}] + lens_model.hessian(0, 0, kwargs, diff=0.001, diff_method="bad") with self.assertRaises(ValueError): - lens_model = LensModel(lens_model_list=['LOS', 'LOS_MINIMAL']) + lens_model = LensModel(lens_model_list=["LOS", "LOS_MINIMAL"]) with self.assertRaises(ValueError): - lens_model = LensModel(lens_model_list=['LOS', 'EPL', 'NFW'], multi_plane=True, z_source=1.0) + lens_model = LensModel( + lens_model_list=["LOS", "EPL", "NFW"], multi_plane=True, z_source=1.0 + ) with self.assertRaises(ValueError): - lens_model = LensModel(lens_model_list=['LOS_MINIMAL', 'SIS', 'GAUSSIAN'], multi_plane=True, z_source=1.0) + lens_model = LensModel( + lens_model_list=["LOS_MINIMAL", "SIS", "GAUSSIAN"], + multi_plane=True, + z_source=1.0, + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_lens_model_extensions.py b/test/test_LensModel/test_lens_model_extensions.py index 5c4caa5ed..4d83203b3 100644 --- a/test/test_LensModel/test_lens_model_extensions.py +++ b/test/test_LensModel/test_lens_model_extensions.py @@ -1,29 +1,39 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy.testing as npt import numpy as np from lenstronomy.LensModel.lens_model_extensions import LensModelExtensions from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver from lenstronomy.LensModel.lens_model import LensModel -from lenstronomy.Util.magnification_finite_util import auto_raytracing_grid_resolution, auto_raytracing_grid_size +from lenstronomy.Util.magnification_finite_util import ( + auto_raytracing_grid_resolution, + auto_raytracing_grid_size, +) import lenstronomy.Util.param_util as param_util from lenstronomy.LightModel.light_model import LightModel from astropy.cosmology import FlatLambdaCDM class TestLensModelExtensions(object): - """ - tests the source model routines - """ - def setup_method(self): + """Tests the source model routines.""" + def setup_method(self): self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3) def test_critical_curves(self): - lens_model_list = ['SPEP'] - phi, q = 1., 0.8 + lens_model_list = ["SPEP"] + phi, q = 1.0, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_lens = [{'theta_E': 1., 'gamma': 2., 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.0, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] lens_model = LensModel(lens_model_list) lensModelExtensions = LensModelExtensions(LensModel(lens_model_list)) @@ -31,8 +41,14 @@ def test_critical_curves(self): compute_window, grid_scale = 5, 0.05 num_pix = int(compute_window / grid_scale) assert num_pix % 2 == 0 - ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = lensModelExtensions.critical_curve_caustics(kwargs_lens, - compute_window=compute_window, grid_scale=grid_scale) + ( + ra_crit_list, + dec_crit_list, + ra_caustic_list, + dec_caustic_list, + ) = lensModelExtensions.critical_curve_caustics( + kwargs_lens, compute_window=compute_window, grid_scale=grid_scale + ) # here we test whether the caustic points are in fact at high magnifications (close to infinite) # close here means above magnification of 1000000 (with matplotlib method, this limit achieved was 170) @@ -43,14 +59,19 @@ def test_critical_curves(self): assert np.all(np.abs(mag) > 1000) # this second test is for odd pixel grid numbers - compute_window, grid_scale = 5. + 0.06, 0.05 - print(compute_window / grid_scale, 'test float') + compute_window, grid_scale = 5.0 + 0.06, 0.05 + print(compute_window / grid_scale, "test float") num_pix = int(compute_window / grid_scale) - print(num_pix, 'test num_pix') + print(num_pix, "test num_pix") assert num_pix % 2 == 1 - ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list = lensModelExtensions.critical_curve_caustics( - kwargs_lens, - compute_window=compute_window, grid_scale=grid_scale) + ( + ra_crit_list, + dec_crit_list, + ra_caustic_list, + dec_caustic_list, + ) = lensModelExtensions.critical_curve_caustics( + kwargs_lens, compute_window=compute_window, grid_scale=grid_scale + ) # here we test whether the caustic points are in fact at high magnifications (close to infinite) # close here means above magnification of 1000000 (with matplotlib method, this limit achieved was 170) @@ -61,13 +82,23 @@ def test_critical_curves(self): assert np.all(np.abs(mag) > 1000) def test_critical_curves_tiling(self): - lens_model_list = ['SPEP'] - phi, q = 1., 0.8 + lens_model_list = ["SPEP"] + phi, q = 1.0, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_lens = [{'theta_E': 1., 'gamma': 2., 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}] + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.0, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + } + ] lensModel = LensModelExtensions(LensModel(lens_model_list)) - ra_crit, dec_crit = lensModel.critical_curve_tiling(kwargs_lens, compute_window=5, start_scale=0.01, - max_order=10) + ra_crit, dec_crit = lensModel.critical_curve_tiling( + kwargs_lens, compute_window=5, start_scale=0.01, max_order=10 + ) # here we test whether the caustic points are in fact at high magnifications (close to infinite) # close here means above magnification of 1000. This is more precise than the critical_curve_caustics() method lens_model = LensModel(lens_model_list) @@ -75,40 +106,84 @@ def test_critical_curves_tiling(self): assert np.all(np.abs(mag) > 1000) def test_get_magnification_model(self): - self.kwargs_options = { 'lens_model_list': ['GAUSSIAN'], 'source_light_model_list': ['GAUSSIAN'], - 'lens_light_model_list': ['SERSIC'] - , 'subgrid_res': 10, 'numPix': 200, 'psf_type': 'gaussian', 'x2_simple': True} - kwargs_lens = [{'amp': 1, 'sigma_x': 2, 'sigma_y': 2, 'center_x': 0, 'center_y': 0}] - - x_pos = np.array([1., 1., 2.]) - y_pos = np.array([-1., 0., 0.]) - lens_model = LensModelExtensions(LensModel(lens_model_list=['GAUSSIAN'])) - mag = lens_model.magnification_finite(x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, - grid_number=100) + self.kwargs_options = { + "lens_model_list": ["GAUSSIAN"], + "source_light_model_list": ["GAUSSIAN"], + "lens_light_model_list": ["SERSIC"], + "subgrid_res": 10, + "numPix": 200, + "psf_type": "gaussian", + "x2_simple": True, + } + kwargs_lens = [ + {"amp": 1, "sigma_x": 2, "sigma_y": 2, "center_x": 0, "center_y": 0} + ] + + x_pos = np.array([1.0, 1.0, 2.0]) + y_pos = np.array([-1.0, 0.0, 0.0]) + lens_model = LensModelExtensions(LensModel(lens_model_list=["GAUSSIAN"])) + mag = lens_model.magnification_finite( + x_pos, + y_pos, + kwargs_lens, + source_sigma=0.003, + window_size=0.1, + grid_number=100, + ) npt.assert_almost_equal(mag[0], 0.98848384784633392, decimal=5) def test_magnification_finite(self): - - lens_model_list = ['SPEP','SHEAR'] - - kwargs_lens = [{'theta_E': 1., 'gamma': 2., 'e1': 0.02, 'e2': -0.09, 'center_x': 0, 'center_y': 0}, - {'gamma1':0.01, 'gamma2':0.03}] + lens_model_list = ["SPEP", "SHEAR"] + + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.0, + "e1": 0.02, + "e2": -0.09, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.01, "gamma2": 0.03}, + ] extension = LensModelExtensions(LensModel(lens_model_list)) x_image = [0.56153533, -0.78067875, -0.72551184, 0.75664112] y_image = [-0.74722528, 0.52491177, -0.72799235, 0.78503659] - mag_square_grid = extension.magnification_finite(x_image, y_image, kwargs_lens, source_sigma=0.001, - grid_number=200, window_size=0.1) - mag_polar_grid = extension.magnification_finite(x_image, y_image, kwargs_lens, source_sigma=0.001, - grid_number=200, window_size=0.1, polar_grid=True) - npt.assert_almost_equal(mag_polar_grid,mag_square_grid,decimal=5) + mag_square_grid = extension.magnification_finite( + x_image, + y_image, + kwargs_lens, + source_sigma=0.001, + grid_number=200, + window_size=0.1, + ) + mag_polar_grid = extension.magnification_finite( + x_image, + y_image, + kwargs_lens, + source_sigma=0.001, + grid_number=200, + window_size=0.1, + polar_grid=True, + ) + npt.assert_almost_equal(mag_polar_grid, mag_square_grid, decimal=5) def test_magnification_finite_adaptive(self): - lens_model_list = ['EPL', 'SHEAR'] + lens_model_list = ["EPL", "SHEAR"] z_source = 1.5 - kwargs_lens = [{'theta_E': 1., 'gamma': 2., 'e1': 0.02, 'e2': -0.09, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.01, 'gamma2': 0.03}] + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.0, + "e1": 0.02, + "e2": -0.09, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.01, "gamma2": 0.03}, + ] lensmodel = LensModel(lens_model_list) extension = LensModelExtensions(lensmodel) @@ -116,7 +191,7 @@ def test_magnification_finite_adaptive(self): source_x, source_y = 0.07, 0.03 x_image, y_image = solver.findBrightImage(source_x, source_y, kwargs_lens) - source_fwhm_parsec = 40. + source_fwhm_parsec = 40.0 pc_per_arcsec = 1000 / self.cosmo.arcsec_per_kpc_proper(z_source).value source_sigma = source_fwhm_parsec / pc_per_arcsec / 2.355 @@ -127,71 +202,162 @@ def test_magnification_finite_adaptive(self): grid_number = int(2 * grid_size / grid_resolution) window_size = 2 * grid_size - mag_square_grid = extension.magnification_finite(x_image, y_image, kwargs_lens, source_sigma=source_sigma, - grid_number=grid_number, window_size=window_size) + mag_square_grid = extension.magnification_finite( + x_image, + y_image, + kwargs_lens, + source_sigma=source_sigma, + grid_number=grid_number, + window_size=window_size, + ) flux_ratios_square_grid = mag_square_grid / max(mag_square_grid) - mag_adaptive_grid = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, - z_source, cosmo=self.cosmo) + mag_adaptive_grid = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + ) flux_ratios_adaptive_grid = mag_adaptive_grid / max(mag_adaptive_grid) - mag_adaptive_grid_fixed_aperture_size = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, - z_source, cosmo=self.cosmo, fixed_aperture_size=True, - grid_radius_arcsec=0.2) - flux_ratios_fixed_aperture_size = mag_adaptive_grid_fixed_aperture_size / max(mag_adaptive_grid_fixed_aperture_size) - - mag_adaptive_grid_2 = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source, - cosmo=self.cosmo, axis_ratio=0) - mag_adaptive_grid_3 = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source, - cosmo=self.cosmo, axis_ratio=1) + mag_adaptive_grid_fixed_aperture_size = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + fixed_aperture_size=True, + grid_radius_arcsec=0.2, + ) + flux_ratios_fixed_aperture_size = mag_adaptive_grid_fixed_aperture_size / max( + mag_adaptive_grid_fixed_aperture_size + ) + + mag_adaptive_grid_2 = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + axis_ratio=0, + ) + mag_adaptive_grid_3 = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + axis_ratio=1, + ) flux_ratios_adaptive_grid_2 = mag_adaptive_grid_2 / max(mag_adaptive_grid_2) flux_ratios_adaptive_grid_3 = mag_adaptive_grid_3 / max(mag_adaptive_grid_3) # tests the default cosmology - _ = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, - z_source, cosmo=None, tol=0.0001) + _ = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=None, + tol=0.0001, + ) # test smallest eigenvalue - _ = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, - z_source, cosmo=None, tol=0.0001, use_largest_eigenvalue=False) + _ = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=None, + tol=0.0001, + use_largest_eigenvalue=False, + ) # tests the r_max > sqrt(2) * grid_radius stop criterion - _ = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, - z_source, cosmo=None, tol=0.0001, step_size=1000) + _ = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=None, + tol=0.0001, + step_size=1000, + ) mag_point_source = abs(lensmodel.magnification(x_image, y_image, kwargs_lens)) quarter_precent_precision = [0.0025] * 4 - npt.assert_array_less(flux_ratios_square_grid / flux_ratios_adaptive_grid - 1, - quarter_precent_precision) - npt.assert_array_less(flux_ratios_fixed_aperture_size / flux_ratios_adaptive_grid - 1, - quarter_precent_precision) - npt.assert_array_less(flux_ratios_square_grid / flux_ratios_adaptive_grid_2 - 1, - quarter_precent_precision) - npt.assert_array_less(flux_ratios_adaptive_grid_3 / flux_ratios_adaptive_grid_2 - 1, - quarter_precent_precision) + npt.assert_array_less( + flux_ratios_square_grid / flux_ratios_adaptive_grid - 1, + quarter_precent_precision, + ) + npt.assert_array_less( + flux_ratios_fixed_aperture_size / flux_ratios_adaptive_grid - 1, + quarter_precent_precision, + ) + npt.assert_array_less( + flux_ratios_square_grid / flux_ratios_adaptive_grid_2 - 1, + quarter_precent_precision, + ) + npt.assert_array_less( + flux_ratios_adaptive_grid_3 / flux_ratios_adaptive_grid_2 - 1, + quarter_precent_precision, + ) half_percent_precision = [0.005] * 4 - npt.assert_array_less(mag_square_grid / mag_adaptive_grid - 1, half_percent_precision) - npt.assert_array_less(mag_square_grid / mag_adaptive_grid_2 - 1, half_percent_precision) - npt.assert_array_less(mag_adaptive_grid / mag_point_source - 1, half_percent_precision) - - flux_array = np.array([0., 0.]) - grid_x = np.array([0., source_sigma]) - grid_y = np.array([0., 0.]) + npt.assert_array_less( + mag_square_grid / mag_adaptive_grid - 1, half_percent_precision + ) + npt.assert_array_less( + mag_square_grid / mag_adaptive_grid_2 - 1, half_percent_precision + ) + npt.assert_array_less( + mag_adaptive_grid / mag_point_source - 1, half_percent_precision + ) + + flux_array = np.array([0.0, 0.0]) + grid_x = np.array([0.0, source_sigma]) + grid_y = np.array([0.0, 0.0]) grid_r = np.hypot(grid_x, grid_y) # test that the double gaussian profile has 2x the flux when dx, dy = 0 - magnification_double_gaussian = extension.magnification_finite_adaptive(x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source, cosmo=self.cosmo, - source_light_model='DOUBLE_GAUSSIAN', dx=0., dy=0., amp_scale=1., size_scale=1.) + magnification_double_gaussian = extension.magnification_finite_adaptive( + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + source_light_model="DOUBLE_GAUSSIAN", + dx=0.0, + dy=0.0, + amp_scale=1.0, + size_scale=1.0, + ) npt.assert_almost_equal(magnification_double_gaussian, 2 * mag_adaptive_grid) grid_radius = 0.3 @@ -200,80 +366,161 @@ def test_magnification_finite_adaptive(self): resolution = 2 * grid_radius / npix xx, yy = np.meshgrid(_x, _y) for i in range(0, 4): - beta_x, beta_y = lensmodel.ray_shooting(x_image[i] + xx.ravel(), y_image[i] + yy.ravel(), kwargs_lens) - source_light_model = LightModel(['GAUSSIAN'] * 2) - amp_scale, dx, dy, size_scale = 0.2, 0.005, -0.005, 1. - kwargs_source = [{'amp': 1., 'center_x': source_x, 'center_y': source_y, 'sigma': source_sigma}, - {'amp': amp_scale, 'center_x': source_x + dx, 'center_y': source_y + dy, - 'sigma': source_sigma * size_scale}] + beta_x, beta_y = lensmodel.ray_shooting( + x_image[i] + xx.ravel(), y_image[i] + yy.ravel(), kwargs_lens + ) + source_light_model = LightModel(["GAUSSIAN"] * 2) + amp_scale, dx, dy, size_scale = 0.2, 0.005, -0.005, 1.0 + kwargs_source = [ + { + "amp": 1.0, + "center_x": source_x, + "center_y": source_y, + "sigma": source_sigma, + }, + { + "amp": amp_scale, + "center_x": source_x + dx, + "center_y": source_y + dy, + "sigma": source_sigma * size_scale, + }, + ] sb = source_light_model.surface_brightness(beta_x, beta_y, kwargs_source) - magnification_double_gaussian_reference = np.sum(sb) * resolution ** 2 - magnification_double_gaussian = extension.magnification_finite_adaptive([x_image[i]], [y_image[i]], source_x, source_y, - kwargs_lens, - source_fwhm_parsec, z_source, - cosmo=self.cosmo, - source_light_model='DOUBLE_GAUSSIAN', - dx=dx, dy=dy, amp_scale=amp_scale, - size_scale=size_scale, - grid_resolution=resolution, - grid_radius_arcsec=grid_radius, - axis_ratio=1.) - npt.assert_almost_equal(magnification_double_gaussian/magnification_double_gaussian_reference, 1., 3) - - source_model = LightModel(['GAUSSIAN']) - kwargs_source = [{'amp': 1., 'center_x': source_x, 'center_y': source_y, 'sigma': source_sigma}] - - r_min = 0. + magnification_double_gaussian_reference = np.sum(sb) * resolution**2 + magnification_double_gaussian = extension.magnification_finite_adaptive( + [x_image[i]], + [y_image[i]], + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + cosmo=self.cosmo, + source_light_model="DOUBLE_GAUSSIAN", + dx=dx, + dy=dy, + amp_scale=amp_scale, + size_scale=size_scale, + grid_resolution=resolution, + grid_radius_arcsec=grid_radius, + axis_ratio=1.0, + ) + npt.assert_almost_equal( + magnification_double_gaussian / magnification_double_gaussian_reference, + 1.0, + 3, + ) + + source_model = LightModel(["GAUSSIAN"]) + kwargs_source = [ + { + "amp": 1.0, + "center_x": source_x, + "center_y": source_y, + "sigma": source_sigma, + } + ] + + r_min = 0.0 r_max = source_sigma * 0.9 - flux_array = extension._magnification_adaptive_iteration(flux_array, x_image[0], y_image[0], grid_x, grid_y, grid_r, - r_min, r_max, - lensmodel, kwargs_lens, source_model, kwargs_source) + flux_array = extension._magnification_adaptive_iteration( + flux_array, + x_image[0], + y_image[0], + grid_x, + grid_y, + grid_r, + r_min, + r_max, + lensmodel, + kwargs_lens, + source_model, + kwargs_source, + ) bx, by = lensmodel.ray_shooting(x_image[0], y_image[0], kwargs_lens) sb_true = source_model.surface_brightness(bx, by, kwargs_source) npt.assert_equal(True, flux_array[0] == sb_true) - npt.assert_equal(True, flux_array[1] == 0.) + npt.assert_equal(True, flux_array[1] == 0.0) r_min = source_sigma * 0.9 r_max = 2 * source_sigma - flux_array = extension._magnification_adaptive_iteration(flux_array, x_image[0], y_image[0], grid_x, grid_y, grid_r, - r_min, r_max, - lensmodel, kwargs_lens, source_model, kwargs_source) - bx, by = lensmodel.ray_shooting(x_image[0] + source_sigma, y_image[0], kwargs_lens) + flux_array = extension._magnification_adaptive_iteration( + flux_array, + x_image[0], + y_image[0], + grid_x, + grid_y, + grid_r, + r_min, + r_max, + lensmodel, + kwargs_lens, + source_model, + kwargs_source, + ) + bx, by = lensmodel.ray_shooting( + x_image[0] + source_sigma, y_image[0], kwargs_lens + ) sb_true = source_model.surface_brightness(bx, by, kwargs_source) npt.assert_equal(True, flux_array[1] == sb_true) def test_zoom_source(self): - lens_model_list = ['SIE', 'SHEAR'] + lens_model_list = ["SIE", "SHEAR"] lensModel = LensModel(lens_model_list=lens_model_list) lensModelExtensions = LensModelExtensions(lensModel=lensModel) lensEquationSolver = LensEquationSolver(lensModel=lensModel) x_source, y_source = 0.02, 0.01 - kwargs_lens = [{'theta_E': 1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.05, 'gamma2': -0.03}] - - x_img, y_img = lensEquationSolver.image_position_from_source(kwargs_lens=kwargs_lens, sourcePos_x=x_source, - sourcePos_y=y_source) - - image = lensModelExtensions.zoom_source(x_img[0], y_img[0], kwargs_lens, source_sigma=0.003, window_size=0.1, - grid_number=100, shape="GAUSSIAN") + kwargs_lens = [ + {"theta_E": 1, "e1": 0.1, "e2": 0.1, "center_x": 0, "center_y": 0}, + {"gamma1": 0.05, "gamma2": -0.03}, + ] + + x_img, y_img = lensEquationSolver.image_position_from_source( + kwargs_lens=kwargs_lens, sourcePos_x=x_source, sourcePos_y=y_source + ) + + image = lensModelExtensions.zoom_source( + x_img[0], + y_img[0], + kwargs_lens, + source_sigma=0.003, + window_size=0.1, + grid_number=100, + shape="GAUSSIAN", + ) assert len(image) == 100 def test_tangential_average(self): - lens_model_list = ['SIS'] + lens_model_list = ["SIS"] lensModel = LensModel(lens_model_list=lens_model_list) lensModelExtensions = LensModelExtensions(lensModel=lensModel) - tang_stretch_ave = lensModelExtensions.tangential_average(x=1.1, y=0, kwargs_lens=[{'theta_E': 1, 'center_x': 0, 'center_y': 0}], dr=1, smoothing=None, num_average=9) + tang_stretch_ave = lensModelExtensions.tangential_average( + x=1.1, + y=0, + kwargs_lens=[{"theta_E": 1, "center_x": 0, "center_y": 0}], + dr=1, + smoothing=None, + num_average=9, + ) npt.assert_almost_equal(tang_stretch_ave, -2.525501464097973, decimal=6) def test_caustic_area(self): - lens_model_list = ['SIE'] + lens_model_list = ["SIE"] lensModel = LensModel(lens_model_list=lens_model_list) lensModelExtensions = LensModelExtensions(lensModel=lensModel) - kwargs_lens = [{'theta_E': 1, 'e1': 0.2, 'e2': 0, 'center_x': 0, 'center_y': 0}] - kwargs_caustic_num = {'compute_window': 3, 'grid_scale': 0.005, 'center_x': 0, 'center_y': 0} - - area = lensModelExtensions.caustic_area(kwargs_lens=kwargs_lens, kwargs_caustic_num=kwargs_caustic_num, - index_vertices=0) + kwargs_lens = [{"theta_E": 1, "e1": 0.2, "e2": 0, "center_x": 0, "center_y": 0}] + kwargs_caustic_num = { + "compute_window": 3, + "grid_scale": 0.005, + "center_x": 0, + "center_y": 0, + } + + area = lensModelExtensions.caustic_area( + kwargs_lens=kwargs_lens, + kwargs_caustic_num=kwargs_caustic_num, + index_vertices=0, + ) npt.assert_almost_equal(area, 0.08445866728739478, decimal=3) diff --git a/test/test_LensModel/test_lens_param.py b/test/test_LensModel/test_lens_param.py index 8c047c470..6d47f7393 100644 --- a/test/test_LensModel/test_lens_param.py +++ b/test/test_LensModel/test_lens_param.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy.testing as npt @@ -6,48 +6,101 @@ class TestParam(object): - def setup_method(self): - self.lens_model_list = ['SPEP', - 'INTERPOL_SCALED', - 'SHAPELETS_CART', - 'MULTI_GAUSSIAN_KAPPA' - ] + self.lens_model_list = [ + "SPEP", + "INTERPOL_SCALED", + "SHAPELETS_CART", + "MULTI_GAUSSIAN_KAPPA", + ] self.kwargs = [ - {'theta_E': 1., 'gamma': 2, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0}, # 'SPEP - {'scale_factor': 1, 'grid_interp_x': None, 'grid_interp_y': None, 'f_x': None, 'f_y': None}, # 'INTERPOL_SCALED' - {'coeffs': [1, 1, 1, 1, 1, 1], 'beta': 1., 'center_x': 0, 'center_y': 0}, # 'SHAPELETS_CART' - {'amp': [1, 2], 'sigma': [0.5, 1], 'center_x': 0, 'center_y': 0, 'scale_factor': 1}, # 'MULTI_GAUSSIAN_KAPPA' - ] + { + "theta_E": 1.0, + "gamma": 2, + "e1": 0, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, # 'SPEP + { + "scale_factor": 1, + "grid_interp_x": None, + "grid_interp_y": None, + "f_x": None, + "f_y": None, + }, # 'INTERPOL_SCALED' + { + "coeffs": [1, 1, 1, 1, 1, 1], + "beta": 1.0, + "center_x": 0, + "center_y": 0, + }, # 'SHAPELETS_CART' + { + "amp": [1, 2], + "sigma": [0.5, 1], + "center_x": 0, + "center_y": 0, + "scale_factor": 1, + }, # 'MULTI_GAUSSIAN_KAPPA' + ] self.kwargs_sigma = [ - {'theta_E_sigma': 1., 'gamma_sigma': 2, 'e1_sigma': 0.1, 'e2_sigma': 0.1, 'center_x_sigma': 0, 'center_y_sigma': 0}, # 'SPEP - {'scale_factor_sigma': 1}, # 'INTERPOL_SCALED' - {'coeffs_sigma': 0.1, 'beta_sigma': 1., 'center_x_sigma': 0, 'center_y_sigma': 0}, # 'SHAPELETS_CART' - {'amp_sigma': [1, 1], 'sigma_sigma': [1, 1], 'center_x_sigma': 0, 'center_y_sigma': 0, 'scale_factor_sigma': 1}, + { + "theta_E_sigma": 1.0, + "gamma_sigma": 2, + "e1_sigma": 0.1, + "e2_sigma": 0.1, + "center_x_sigma": 0, + "center_y_sigma": 0, + }, # 'SPEP + {"scale_factor_sigma": 1}, # 'INTERPOL_SCALED' + { + "coeffs_sigma": 0.1, + "beta_sigma": 1.0, + "center_x_sigma": 0, + "center_y_sigma": 0, + }, # 'SHAPELETS_CART' + { + "amp_sigma": [1, 1], + "sigma_sigma": [1, 1], + "center_x_sigma": 0, + "center_y_sigma": 0, + "scale_factor_sigma": 1, + }, + ] + self.kwargs_fixed = [ + {}, + {"grid_interp_x": None, "grid_interp_y": None, "f_x": None, "f_y": None}, + {}, + {"sigma": [1, 2]}, ] - self.kwargs_fixed = [{}, - {'grid_interp_x': None, 'grid_interp_y': None, 'f_x': None, 'f_y': None}, - {}, - {'sigma': [1, 2]} - ] self.kwargs_mean = [] for i in range(len(self.lens_model_list)): kwargs_mean_k = self.kwargs[i].copy() kwargs_mean_k.update(self.kwargs_sigma[i]) self.kwargs_mean.append(kwargs_mean_k) - self.param = LensParam(lens_model_list=self.lens_model_list, - kwargs_fixed=self.kwargs_fixed, num_images=2, solver_type='SHAPELETS', num_shapelet_lens=6) - self.param_fixed = LensParam(lens_model_list=self.lens_model_list, - kwargs_fixed=self.kwargs, num_images=4, solver_type='NONE', num_shapelet_lens=6) + self.param = LensParam( + lens_model_list=self.lens_model_list, + kwargs_fixed=self.kwargs_fixed, + num_images=2, + solver_type="SHAPELETS", + num_shapelet_lens=6, + ) + self.param_fixed = LensParam( + lens_model_list=self.lens_model_list, + kwargs_fixed=self.kwargs, + num_images=4, + solver_type="NONE", + num_shapelet_lens=6, + ) def test_get_setParams(self): - print(self.kwargs, 'kwargs') + print(self.kwargs, "kwargs") args = self.param.set_params(self.kwargs) - print(args, 'args') + print(args, "args") kwargs_new, _ = self.param.get_params(args, i=0) - print(kwargs_new, 'kwargs_new') + print(kwargs_new, "kwargs_new") args_new = self.param.set_params(kwargs_new) - print(args_new, 'args_new') + print(args_new, "args_new") for k in range(len(args)): npt.assert_almost_equal(args[k], args_new[k], decimal=8) @@ -58,16 +111,29 @@ def test_get_setParams(self): npt.assert_almost_equal(args[k], args_new[k], decimal=8) def test_param_name_list(self): - lens_model_list = ['SHIFT', 'FLEXION', 'SIS_TRUNCATED', 'SERSIC', - 'SERSIC_ELLIPSE_POTENTIAL', 'SERSIC_ELLIPSE_KAPPA', - 'PJAFFE', 'PJAFFE_ELLIPSE', 'HERNQUIST_ELLIPSE', 'INTERPOL', 'INTERPOL_SCALED', - 'SHAPELETS_POLAR', 'DIPOLE', - 'GAUSSIAN_ELLIPSE_KAPPA', 'SERSIC_ELLIPSE_KAPPA', - 'SERSIC_ELLIPSE_GAUSS_DEC', 'NFW_ELLIPSE_GAUSS_DEC', - 'CTNFW_GAUSS_DEC', - 'GAUSSIAN_ELLIPSE_POTENTIAL', - 'MULTI_GAUSSIAN_KAPPA', - 'MULTI_GAUSSIAN_KAPPA_ELLIPSE'] + lens_model_list = [ + "SHIFT", + "FLEXION", + "SIS_TRUNCATED", + "SERSIC", + "SERSIC_ELLIPSE_POTENTIAL", + "SERSIC_ELLIPSE_KAPPA", + "PJAFFE", + "PJAFFE_ELLIPSE", + "HERNQUIST_ELLIPSE", + "INTERPOL", + "INTERPOL_SCALED", + "SHAPELETS_POLAR", + "DIPOLE", + "GAUSSIAN_ELLIPSE_KAPPA", + "SERSIC_ELLIPSE_KAPPA", + "SERSIC_ELLIPSE_GAUSS_DEC", + "NFW_ELLIPSE_GAUSS_DEC", + "CTNFW_GAUSS_DEC", + "GAUSSIAN_ELLIPSE_POTENTIAL", + "MULTI_GAUSSIAN_KAPPA", + "MULTI_GAUSSIAN_KAPPA_ELLIPSE", + ] lensParam = LensParam(lens_model_list, kwargs_fixed=None) param_name_list = lensParam._param_name_list assert len(lens_model_list) == len(param_name_list) @@ -77,27 +143,51 @@ def test_num_params(self): assert num == 20 def test_shapelet_solver(self): - lens_model_list = ['SHAPELETS_CART'] - lensParam = LensParam(lens_model_list, kwargs_fixed=[{}], num_images=2, solver_type='SHAPELETS', - num_shapelet_lens=8) - kwargs_lens = [{'beta': 1, 'coeffs': [0, 1, 2, 3, 4, 5, 5, 7], 'center_x':0, 'center_y': 0}] + lens_model_list = ["SHAPELETS_CART"] + lensParam = LensParam( + lens_model_list, + kwargs_fixed=[{}], + num_images=2, + solver_type="SHAPELETS", + num_shapelet_lens=8, + ) + kwargs_lens = [ + { + "beta": 1, + "coeffs": [0, 1, 2, 3, 4, 5, 5, 7], + "center_x": 0, + "center_y": 0, + } + ] args = lensParam.set_params(kwargs_lens) kwargs_out, i = lensParam.get_params(args, i=0) - assert kwargs_out[0]['coeffs'][1] == 0 - assert kwargs_out[0]['beta'] == kwargs_lens[0]['beta'] + assert kwargs_out[0]["coeffs"][1] == 0 + assert kwargs_out[0]["beta"] == kwargs_lens[0]["beta"] num, param_list = lensParam.num_param() assert num == 8 - lensParam = LensParam(lens_model_list, kwargs_fixed=[{}], num_images=4, solver_type='SHAPELETS', - num_shapelet_lens=8) - kwargs_lens = [{'beta': 1, 'coeffs': [0, 1, 2, 3, 4, 5, 5, 7], 'center_x': 0, 'center_y': 0}] + lensParam = LensParam( + lens_model_list, + kwargs_fixed=[{}], + num_images=4, + solver_type="SHAPELETS", + num_shapelet_lens=8, + ) + kwargs_lens = [ + { + "beta": 1, + "coeffs": [0, 1, 2, 3, 4, 5, 5, 7], + "center_x": 0, + "center_y": 0, + } + ] args = lensParam.set_params(kwargs_lens) kwargs_out, i = lensParam.get_params(args, i=0) - assert kwargs_out[0]['coeffs'][5] == 0 - assert kwargs_out[0]['beta'] == kwargs_lens[0]['beta'] + assert kwargs_out[0]["coeffs"][5] == 0 + assert kwargs_out[0]["beta"] == kwargs_lens[0]["beta"] num, param_list = lensParam.num_param() assert num == 5 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LensModel/test_numeric_lens_differentials.py b/test/test_LensModel/test_numeric_lens_differentials.py index d7641a17e..e49622167 100644 --- a/test/test_LensModel/test_numeric_lens_differentials.py +++ b/test/test_LensModel/test_numeric_lens_differentials.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -9,28 +9,35 @@ class TestNumerics(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): - self.lensModel = LensModel(['GAUSSIAN']) - self.kwargs = [{'amp': 1./4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.}] + self.lensModel = LensModel(["GAUSSIAN"]) + self.kwargs = [ + { + "amp": 1.0 / 4.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 0.0, + "center_y": 0.0, + } + ] def test_kappa(self): - x, y = 1., 1. + x, y = 1.0, 1.0 output = self.lensModel.kappa(x, y, self.kwargs) output_num = self.lensModel.kappa(x, y, self.kwargs, diff=0.00001) npt.assert_almost_equal(output_num, output, decimal=5) def test_gamma(self): - x, y = 1., 2. + x, y = 1.0, 2.0 output1, output2 = self.lensModel.gamma(x, y, self.kwargs) output1_num, output2_num = self.lensModel.gamma(x, y, self.kwargs, diff=0.00001) npt.assert_almost_equal(output1_num, output1, decimal=5) npt.assert_almost_equal(output2_num, output2, decimal=5) def test_magnification(self): - x, y = 1., 1. + x, y = 1.0, 1.0 output = self.lensModel.magnification(x, y, self.kwargs) output_num = self.lensModel.magnification(x, y, self.kwargs, diff=0.00001) npt.assert_almost_equal(output_num, output, decimal=5) @@ -38,7 +45,9 @@ def test_magnification(self): def test_differentials(self): x, y = util.make_grid(numPix=10, deltapix=0.5) f_xx, f_xy, f_yx, f_yy = self.lensModel.hessian(x, y, self.kwargs) - f_xx_num, f_xy_num, f_yx_num, f_yy_num = self.lensModel.hessian(x, y, self.kwargs, diff=0.00001) + f_xx_num, f_xy_num, f_yx_num, f_yy_num = self.lensModel.hessian( + x, y, self.kwargs, diff=0.00001 + ) npt.assert_almost_equal(f_xy_num, f_yx_num, decimal=5) npt.assert_almost_equal(f_xx_num, f_xx, decimal=5) @@ -47,16 +56,20 @@ def test_differentials(self): npt.assert_almost_equal(f_yy_num, f_yy, decimal=5) def test_flexion(self): - lensModel = LensModel(lens_model_list=['FLEXION']) + lensModel = LensModel(lens_model_list=["FLEXION"]) g1, g2, g3, g4 = 0.01, 0.02, 0.03, 0.04 - kwargs = [{'g1': g1, 'g2': g2, 'g3': g3, 'g4': g4}] - f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1., y=1., kwargs=kwargs, diff=0.0001) + kwargs = [{"g1": g1, "g2": g2, "g3": g3, "g4": g4}] + f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion( + x=1.0, y=1.0, kwargs=kwargs, diff=0.0001 + ) npt.assert_almost_equal(f_xxx, g1, decimal=2) npt.assert_almost_equal(f_xxy, g2, decimal=2) npt.assert_almost_equal(f_xyy, g3, decimal=2) npt.assert_almost_equal(f_yyy, g4, decimal=2) - f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion(x=1., y=1., kwargs=kwargs, diff=0.0001, hessian_diff=0.001) + f_xxx, f_xxy, f_xyy, f_yyy = lensModel.flexion( + x=1.0, y=1.0, kwargs=kwargs, diff=0.0001, hessian_diff=0.001 + ) npt.assert_almost_equal(f_xxx, g1, decimal=2) npt.assert_almost_equal(f_xxy, g2, decimal=2) npt.assert_almost_equal(f_xyy, g3, decimal=2) @@ -64,9 +77,8 @@ def test_flexion(self): class TestNumericsProfile(object): - """ - tests the second derivatives of various lens models - """ + """Tests the second derivatives of various lens models.""" + def setup_method(self): pass @@ -80,7 +92,9 @@ def assert_differentials(self, lens_model, kwargs, potential=True): lensModel = LensModel(lens_model) f_xx, f_xy, f_yx, f_yy = lensModel.hessian(x, y, [kwargs]) - f_xx_num, f_xy_num, f_yx_num, f_yy_num = lensModel.hessian(x, y, [kwargs], diff=diff) + f_xx_num, f_xy_num, f_yx_num, f_yy_num = lensModel.hessian( + x, y, [kwargs], diff=diff + ) npt.assert_almost_equal(f_xx, f_xx_num, decimal=3) npt.assert_almost_equal(f_yy, f_yy_num, decimal=3) @@ -98,7 +112,9 @@ def assert_differentials(self, lens_model, kwargs, potential=True): lensModel = LensModel(lens_model) f_xx, f_xy, f_yx, f_yy = lensModel.hessian(x, y, [kwargs]) - f_xx_num, f_xy_num, f_yx_num, f_yy_num = lensModel.hessian(x, y, [kwargs], diff=diff) + f_xx_num, f_xy_num, f_yx_num, f_yy_num = lensModel.hessian( + x, y, [kwargs], diff=diff + ) npt.assert_almost_equal(f_xx, f_xx_num, decimal=3) npt.assert_almost_equal(f_yy, f_yy_num, decimal=3) @@ -112,402 +128,593 @@ def assert_differentials(self, lens_model, kwargs, potential=True): npt.assert_almost_equal(f_y, f_y_num, decimal=3) def test_gaussian(self): - lens_model = ['GAUSSIAN'] - kwargs = {'amp': 1. / 4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.} - self.assert_differentials(lens_model, kwargs) - kwargs = {'amp': 1. / 4., 'sigma_x': 20., 'sigma_y': 20., 'center_x': 0., 'center_y': 0.} - self.assert_differentials(lens_model, kwargs) - kwargs = {'amp': 1. / 4., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 2., 'center_y': 2.} + lens_model = ["GAUSSIAN"] + kwargs = { + "amp": 1.0 / 4.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 0.0, + "center_y": 0.0, + } + self.assert_differentials(lens_model, kwargs) + kwargs = { + "amp": 1.0 / 4.0, + "sigma_x": 20.0, + "sigma_y": 20.0, + "center_x": 0.0, + "center_y": 0.0, + } + self.assert_differentials(lens_model, kwargs) + kwargs = { + "amp": 1.0 / 4.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 2.0, + "center_y": 2.0, + } self.assert_differentials(lens_model, kwargs) def test_gausian_kappa(self): - kwargs = {'amp': 1. / 4., 'sigma': 2., 'center_x': 0., 'center_y': 0.} - lens_model = ['GAUSSIAN_KAPPA'] + kwargs = {"amp": 1.0 / 4.0, "sigma": 2.0, "center_x": 0.0, "center_y": 0.0} + lens_model = ["GAUSSIAN_KAPPA"] self.assert_differentials(lens_model, kwargs) def test_gausian_ellipse_kappa(self): - kwargs = {'amp': 1., 'sigma': 1., 'e1': 0.1, 'e2': -0.1, 'center_x': 0., 'center_y': 0.} - lens_model = ['GAUSSIAN_ELLIPSE_KAPPA'] + kwargs = { + "amp": 1.0, + "sigma": 1.0, + "e1": 0.1, + "e2": -0.1, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["GAUSSIAN_ELLIPSE_KAPPA"] self.assert_differentials(lens_model, kwargs) def test_gausian_ellipse_potential(self): - kwargs = {'amp': 1., 'sigma': 2., 'e1': .1, 'e2': -0.1, 'center_x': 0., 'center_y': 0.} - lens_model = ['GAUSSIAN_ELLIPSE_POTENTIAL'] + kwargs = { + "amp": 1.0, + "sigma": 2.0, + "e1": 0.1, + "e2": -0.1, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["GAUSSIAN_ELLIPSE_POTENTIAL"] self.assert_differentials(lens_model, kwargs) def test_external_shear(self): - kwargs = {'gamma1': 0.1, 'gamma2': -0.1} - lens_model = ['SHEAR'] + kwargs = {"gamma1": 0.1, "gamma2": -0.1} + lens_model = ["SHEAR"] self.assert_differentials(lens_model, kwargs) def test_reduce_shear(self): - kwargs = {'gamma1': 0.1, 'gamma2': -0.1} - lens_model = ['SHEAR_REDUCED'] + kwargs = {"gamma1": 0.1, "gamma2": -0.1} + lens_model = ["SHEAR_REDUCED"] self.assert_differentials(lens_model, kwargs) def test_hessian(self): - kwargs = {'f_xx': 0.1, 'f_yy': -0.1, 'f_xy': 0.1, 'f_yx': 0.1} - lens_model = ['HESSIAN'] + kwargs = {"f_xx": 0.1, "f_yy": -0.1, "f_xy": 0.1, "f_yx": 0.1} + lens_model = ["HESSIAN"] self.assert_differentials(lens_model, kwargs) def test_mass_sheet(self): - kwargs = {'kappa': 0.1} - lens_model = ['CONVERGENCE'] + kwargs = {"kappa": 0.1} + lens_model = ["CONVERGENCE"] self.assert_differentials(lens_model, kwargs) def test_sis(self): - kwargs = {'theta_E': 0.5} - lens_model = ['SIS'] + kwargs = {"theta_E": 0.5} + lens_model = ["SIS"] self.assert_differentials(lens_model, kwargs) def test_flexion(self): - kwargs = {'g1': 0.01, 'g2': -0.01, 'g3': 0.001, 'g4': 0} - lens_model = ['FLEXION'] + kwargs = {"g1": 0.01, "g2": -0.01, "g3": 0.001, "g4": 0} + lens_model = ["FLEXION"] self.assert_differentials(lens_model, kwargs) def test_nfw(self): - kwargs = {'alpha_Rs': .1, 'Rs': 5.} - lens_model = ['NFW'] + kwargs = {"alpha_Rs": 0.1, "Rs": 5.0} + lens_model = ["NFW"] self.assert_differentials(lens_model, kwargs) def test_tnfw(self): - kwargs = {'alpha_Rs': .1, 'Rs': 5., 'r_trunc': 7} - lens_model = ['TNFW'] + kwargs = {"alpha_Rs": 0.1, "Rs": 5.0, "r_trunc": 7} + lens_model = ["TNFW"] self.assert_differentials(lens_model, kwargs) - kwargs = {'Rs': 2., 'alpha_Rs': 1., 'r_trunc': 7} - lens_model = ['TNFW'] + kwargs = {"Rs": 2.0, "alpha_Rs": 1.0, "r_trunc": 7} + lens_model = ["TNFW"] self.assert_differentials(lens_model, kwargs) def test_tnfw_ellipse(self): - lens_model = ['TNFW_ELLIPSE'] + lens_model = ["TNFW_ELLIPSE"] - kwargs = {'alpha_Rs': .1, 'Rs': 1., 'r_trunc': 7, 'e1': 0, 'e2': 0} + kwargs = {"alpha_Rs": 0.1, "Rs": 1.0, "r_trunc": 7, "e1": 0, "e2": 0} self.assert_differentials(lens_model, kwargs) - kwargs = {'alpha_Rs': .1, 'Rs': 1., 'r_trunc': 7, 'e1': 0.1, 'e2': -0.02} + kwargs = {"alpha_Rs": 0.1, "Rs": 1.0, "r_trunc": 7, "e1": 0.1, "e2": -0.02} self.assert_differentials(lens_model, kwargs) - kwargs = {'Rs': .5, 'alpha_Rs': 1., 'r_trunc': 100, 'e1': 0.1, 'e2': -0.02} + kwargs = {"Rs": 0.5, "alpha_Rs": 1.0, "r_trunc": 100, "e1": 0.1, "e2": -0.02} self.assert_differentials(lens_model, kwargs) def test_nfw_ellipse(self): - kwargs = {'alpha_Rs': .1, 'Rs': 5., 'e1': 0.04, 'e2': -0.04} - lens_model = ['NFW_ELLIPSE'] + kwargs = {"alpha_Rs": 0.1, "Rs": 5.0, "e1": 0.04, "e2": -0.04} + lens_model = ["NFW_ELLIPSE"] self.assert_differentials(lens_model, kwargs) def test_nfw_ellipse_gauss_dec(self): - kwargs = {'alpha_Rs': .1, 'Rs': 5., 'e1': 0.04, 'e2': -0.04} - lens_model = ['NFW_ELLIPSE_GAUSS_DEC'] + kwargs = {"alpha_Rs": 0.1, "Rs": 5.0, "e1": 0.04, "e2": -0.04} + lens_model = ["NFW_ELLIPSE_GAUSS_DEC"] self.assert_differentials(lens_model, kwargs) def test_ctnfw_gauss_dec(self): - kwargs = {'rho_s': 5, 'r_s': 5., 'r_trunc': 10., 'r_core': 0.3, 'a': 2} - lens_model = ['CTNFW_GAUSS_DEC'] + kwargs = {"rho_s": 5, "r_s": 5.0, "r_trunc": 10.0, "r_core": 0.3, "a": 2} + lens_model = ["CTNFW_GAUSS_DEC"] self.assert_differentials(lens_model, kwargs) def test_point_mass(self): - kwargs = {'theta_E': 2.} - lens_model = ['POINT_MASS'] + kwargs = {"theta_E": 2.0} + lens_model = ["POINT_MASS"] self.assert_differentials(lens_model, kwargs) def test_sersic(self): - kwargs = {'n_sersic': .5, 'R_sersic': 1.5, 'k_eff': 0.3} - lens_model = ['SERSIC'] + kwargs = {"n_sersic": 0.5, "R_sersic": 1.5, "k_eff": 0.3} + lens_model = ["SERSIC"] self.assert_differentials(lens_model, kwargs) def test_sersic_ellipse_gauss_dec(self): - kwargs = {'n_sersic': 1., 'R_sersic': 2., 'k_eff': 1., 'e1': 0.04, - 'e2': 0.} - lens_model = ['SERSIC_ELLIPSE_GAUSS_DEC'] + kwargs = {"n_sersic": 1.0, "R_sersic": 2.0, "k_eff": 1.0, "e1": 0.04, "e2": 0.0} + lens_model = ["SERSIC_ELLIPSE_GAUSS_DEC"] self.assert_differentials(lens_model, kwargs) def test_sersic_ellipse_pot(self): - kwargs = {'n_sersic': 2., 'R_sersic': 0.5, 'k_eff': 0.3, 'e1': 0.04, 'e2': -0.0} - lens_model = ['SERSIC_ELLIPSE_POTENTIAL'] + kwargs = { + "n_sersic": 2.0, + "R_sersic": 0.5, + "k_eff": 0.3, + "e1": 0.04, + "e2": -0.0, + } + lens_model = ["SERSIC_ELLIPSE_POTENTIAL"] self.assert_differentials(lens_model, kwargs) def test_shapelets_pot_2(self): - kwargs = {'coeffs': [0, 1, 2, 3, 4, 5], 'beta': 0.3} - lens_model = ['SHAPELETS_CART'] + kwargs = {"coeffs": [0, 1, 2, 3, 4, 5], "beta": 0.3} + lens_model = ["SHAPELETS_CART"] self.assert_differentials(lens_model, kwargs) def test_sis_truncate(self): - kwargs = {'theta_E': 0.5, 'r_trunc': 2.} - lens_model = ['SIS_TRUNCATED'] + kwargs = {"theta_E": 0.5, "r_trunc": 2.0} + lens_model = ["SIS_TRUNCATED"] self.assert_differentials(lens_model, kwargs) def test_spep(self): - kwargs = {'theta_E': 0.5, 'gamma': 1.9, 'e1': 0.04, 'e2': -0.1} - lens_model = ['SPEP'] + kwargs = {"theta_E": 0.5, "gamma": 1.9, "e1": 0.04, "e2": -0.1} + lens_model = ["SPEP"] self.assert_differentials(lens_model, kwargs) def test_spp(self): - kwargs = {'theta_E': 0.5, 'gamma': 1.9} - lens_model = ['SPP'] + kwargs = {"theta_E": 0.5, "gamma": 1.9} + lens_model = ["SPP"] self.assert_differentials(lens_model, kwargs) def test_PJaffe(self): - kwargs = {'sigma0': 1., 'Ra': 0.2, 'Rs': 2.} - lens_model = ['PJAFFE'] + kwargs = {"sigma0": 1.0, "Ra": 0.2, "Rs": 2.0} + lens_model = ["PJAFFE"] self.assert_differentials(lens_model, kwargs) def test_PJaffe_ellipse(self): - kwargs = {'sigma0': 1., 'Ra': 0.2, 'Rs': 2., 'e1': 0.04, 'e2': -0.0} - lens_model = ['PJAFFE_ELLIPSE'] + kwargs = {"sigma0": 1.0, "Ra": 0.2, "Rs": 2.0, "e1": 0.04, "e2": -0.0} + lens_model = ["PJAFFE_ELLIPSE"] self.assert_differentials(lens_model, kwargs) def test_Hernquist(self): - kwargs = {'sigma0': 1., 'Rs': 1.5} - lens_model = ['HERNQUIST'] + kwargs = {"sigma0": 1.0, "Rs": 1.5} + lens_model = ["HERNQUIST"] self.assert_differentials(lens_model, kwargs) def test_Hernquist_ellipse(self): - kwargs = {'sigma0': 1., 'Rs': 1.5, 'e1': 0.04, 'e2': -0.0} - lens_model = ['HERNQUIST_ELLIPSE'] + kwargs = {"sigma0": 1.0, "Rs": 1.5, "e1": 0.04, "e2": -0.0} + lens_model = ["HERNQUIST_ELLIPSE"] self.assert_differentials(lens_model, kwargs) def test_NIE(self): - kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0., 's_scale': 0.04} - lens_model = ['NIE'] + kwargs = {"theta_E": 2.0, "e1": 0.1, "e2": 0.0, "s_scale": 0.04} + lens_model = ["NIE"] self.assert_differentials(lens_model, kwargs) def test_NIE_simple(self): - kwargs = {'b': 2., 'q': 0.3, 's': 0.04} - lens_model = ['NIE_SIMPLE'] + kwargs = {"b": 2.0, "q": 0.3, "s": 0.04} + lens_model = ["NIE_SIMPLE"] self.assert_differentials(lens_model, kwargs) def test_EPL(self): - kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0., 'gamma': 2.13} - lens_model = ['EPL'] + kwargs = {"theta_E": 2.0, "e1": 0.1, "e2": 0.0, "gamma": 2.13} + lens_model = ["EPL"] self.assert_differentials(lens_model, kwargs) def test_EPL_BOXYDISKY(self): - kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0.2, 'gamma': 2.13, 'a_m': 0.1} - lens_model = ['EPL_BOXYDISKY'] + kwargs = {"theta_E": 2.0, "e1": 0.1, "e2": 0.2, "gamma": 2.13, "a_m": 0.1} + lens_model = ["EPL_BOXYDISKY"] self.assert_differentials(lens_model, kwargs) def test_EPL_numba(self): - kwargs = {'theta_E': 2., 'e1': 0.1, 'e2': 0., 'gamma': 2.13} - lens_model = ['EPL_NUMBA'] + kwargs = {"theta_E": 2.0, "e1": 0.1, "e2": 0.0, "gamma": 2.13} + lens_model = ["EPL_NUMBA"] self.assert_differentials(lens_model, kwargs) def test_coreBurk(self): - kwargs = {'Rs':2, 'alpha_Rs': 1, 'r_core':0.4} - lens_model = ['coreBURKERT'] + kwargs = {"Rs": 2, "alpha_Rs": 1, "r_core": 0.4} + lens_model = ["coreBURKERT"] self.assert_differentials(lens_model, kwargs) - kwargs = {'Rs': 2, 'alpha_Rs': 1, 'r_core':5} + kwargs = {"Rs": 2, "alpha_Rs": 1, "r_core": 5} self.assert_differentials(lens_model, kwargs) def test_cnfw(self): - kwargs = {'Rs': 5.5, 'alpha_Rs': 1., 'r_core': .5} - lens_model = ['CNFW'] + kwargs = {"Rs": 5.5, "alpha_Rs": 1.0, "r_core": 0.5} + lens_model = ["CNFW"] self.assert_differentials(lens_model, kwargs, potential=True) def test_cnfw_ellipse(self): - kwargs = {'alpha_Rs': .1, 'Rs': 5., 'r_core': 0.1, 'e1': 0.04, 'e2': -0.04} - lens_model = ['CNFW_ELLIPSE'] + kwargs = {"alpha_Rs": 0.1, "Rs": 5.0, "r_core": 0.1, "e1": 0.04, "e2": -0.04} + lens_model = ["CNFW_ELLIPSE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_cored_density(self): - kwargs = {'sigma0': 0.1, 'r_core': 8} - lens_model = ['CORED_DENSITY'] + kwargs = {"sigma0": 0.1, "r_core": 8} + lens_model = ["CORED_DENSITY"] self.assert_differentials(lens_model, kwargs) def test_cored_density_2(self): - kwargs = {'sigma0': 0.1, 'r_core': 8} - lens_model = ['CORED_DENSITY_2'] + kwargs = {"sigma0": 0.1, "r_core": 8} + lens_model = ["CORED_DENSITY_2"] self.assert_differentials(lens_model, kwargs) def test_cored_density_exp(self): - kwargs = {'kappa_0': 0.1, 'theta_c': 8} - lens_model = ['CORED_DENSITY_EXP'] + kwargs = {"kappa_0": 0.1, "theta_c": 8} + lens_model = ["CORED_DENSITY_EXP"] self.assert_differentials(lens_model, kwargs) def test_cored_density_mst(self): - kwargs = {'lambda_approx': 1.1, 'r_core': 8} - lens_model = ['CORED_DENSITY_MST'] + kwargs = {"lambda_approx": 1.1, "r_core": 8} + lens_model = ["CORED_DENSITY_MST"] self.assert_differentials(lens_model, kwargs) def test_cored_density_2_mst(self): - kwargs = {'lambda_approx': 1.1, 'r_core': 8} - lens_model = ['CORED_DENSITY_2_MST'] + kwargs = {"lambda_approx": 1.1, "r_core": 8} + lens_model = ["CORED_DENSITY_2_MST"] self.assert_differentials(lens_model, kwargs) def test_cored_density_exp_mst(self): - kwargs = {'lambda_approx': 1.1, 'r_core': 8} - lens_model = ['CORED_DENSITY_EXP_MST'] + kwargs = {"lambda_approx": 1.1, "r_core": 8} + lens_model = ["CORED_DENSITY_EXP_MST"] self.assert_differentials(lens_model, kwargs) def test_cored_density_uldm_mst(self): - kwargs = {'lambda_approx': 0.9, 'r_core': 8} - lens_model = ['CORED_DENSITY_ULDM_MST'] + kwargs = {"lambda_approx": 0.9, "r_core": 8} + lens_model = ["CORED_DENSITY_ULDM_MST"] self.assert_differentials(lens_model, kwargs) def test_uldm(self): - kwargs = {'kappa_0': 0.1, 'theta_c': 5, 'slope': 7.5} - lens_model = ['ULDM'] + kwargs = {"kappa_0": 0.1, "theta_c": 5, "slope": 7.5} + lens_model = ["ULDM"] self.assert_differentials(lens_model, kwargs) def test_const_mag_positive(self): - kwargs = {'mu_r': 1, 'mu_t': 10, 'parity': 1, 'phi_G': 0.1} - lens_model = ['CONST_MAG'] + kwargs = {"mu_r": 1, "mu_t": 10, "parity": 1, "phi_G": 0.1} + lens_model = ["CONST_MAG"] self.assert_differentials(lens_model, kwargs) def test_const_mag_negative(self): - kwargs = {'mu_r': 1, 'mu_t': 10, 'parity': -1, 'phi_G': 0.1} - lens_model = ['CONST_MAG'] + kwargs = {"mu_r": 1, "mu_t": 10, "parity": -1, "phi_G": 0.1} + lens_model = ["CONST_MAG"] self.assert_differentials(lens_model, kwargs) def test_nie_potential(self): - kwargs = {'theta_E':2. , 'theta_c':1. , 'e1': 0.1, 'e2': 0.1} - lens_model = ['NIE_POTENTIAL'] + kwargs = {"theta_E": 2.0, "theta_c": 1.0, "e1": 0.1, "e2": 0.1} + lens_model = ["NIE_POTENTIAL"] self.assert_differentials(lens_model, kwargs) def test_multipole(self): - kwargs = {'m':4, 'a_m':0.05, 'phi_m':0.1, 'center_x':0., 'center_y':0.} - lens_model = ['MULTIPOLE'] - self.assert_differentials(lens_model,kwargs,potential=True) - kwargs = {'m': 2, 'a_m': 0.1, 'phi_m': 0.05, 'center_x': 0., 'center_y': 0.} - lens_model = ['MULTIPOLE'] + kwargs = {"m": 4, "a_m": 0.05, "phi_m": 0.1, "center_x": 0.0, "center_y": 0.0} + lens_model = ["MULTIPOLE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'m': 3, 'a_m': 0.07, 'phi_m': 0., 'center_x': -0.01, 'center_y': -0.5} - lens_model = ['MULTIPOLE'] + kwargs = {"m": 2, "a_m": 0.1, "phi_m": 0.05, "center_x": 0.0, "center_y": 0.0} + lens_model = ["MULTIPOLE"] + self.assert_differentials(lens_model, kwargs, potential=True) + kwargs = { + "m": 3, + "a_m": 0.07, + "phi_m": 0.0, + "center_x": -0.01, + "center_y": -0.5, + } + lens_model = ["MULTIPOLE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_elli_slice(self): - kwargs = {'a':2., 'b':1., 'psi':90*np.pi/180., 'sigma_0':5., 'center_x':1., 'center_y':0.} - lens_model = ['ElliSLICE'] + kwargs = { + "a": 2.0, + "b": 1.0, + "psi": 90 * np.pi / 180.0, + "sigma_0": 5.0, + "center_x": 1.0, + "center_y": 0.0, + } + lens_model = ["ElliSLICE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'a': 2., 'b': 1., 'psi': 89 * np.pi / 180., 'sigma_0': 5., 'center_x': 1., 'center_y': 0.01} - lens_model = ['ElliSLICE'] + kwargs = { + "a": 2.0, + "b": 1.0, + "psi": 89 * np.pi / 180.0, + "sigma_0": 5.0, + "center_x": 1.0, + "center_y": 0.01, + } + lens_model = ["ElliSLICE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_curved_arc_const(self): - kwargs = {'tangential_stretch': 4. , 'direction': 0., 'curvature': 0.5, 'center_x': 0, 'center_y': 0} - lens_model = ['CURVED_ARC_CONST'] + kwargs = { + "tangential_stretch": 4.0, + "direction": 0.0, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + } + lens_model = ["CURVED_ARC_CONST"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'tangential_stretch': 4. , 'direction': 0.5, 'curvature': 0.5, 'center_x': 0, 'center_y': 0} + kwargs = { + "tangential_stretch": 4.0, + "direction": 0.5, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + } self.assert_differentials(lens_model, kwargs, potential=False) def test_curved_arc_const_MST(self): - kwargs = {'tangential_stretch': 4., 'radial_stretch': 0.9, 'direction': 0., 'curvature': 0.5, 'center_x': 0, 'center_y': 0} - lens_model = ['CURVED_ARC_CONST_MST'] + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 0.9, + "direction": 0.0, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + } + lens_model = ["CURVED_ARC_CONST_MST"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'tangential_stretch': 4., 'radial_stretch': 1.1, 'direction': 0.5, 'curvature': 0.5, 'center_x': 0, 'center_y': 0} + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 1.1, + "direction": 0.5, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + } self.assert_differentials(lens_model, kwargs, potential=False) def test_curved_arc_spp(self): - kwargs = {'tangential_stretch': 4., 'radial_stretch': .9 , 'direction': 0.5, 'curvature': 0.5, 'center_x': 0, 'center_y': 0} - lens_model = ['CURVED_ARC_SPP'] + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 0.9, + "direction": 0.5, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + } + lens_model = ["CURVED_ARC_SPP"] self.assert_differentials(lens_model, kwargs) def test_curved_arc_spt(self): - kwargs = {'tangential_stretch': 4., 'radial_stretch': .9 , 'direction': 0.5, 'curvature': 0.5, 'center_x': 0, - 'center_y': 0, 'gamma1': 0.1, 'gamma2': -0.2} - lens_model = ['CURVED_ARC_SPT'] + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 0.9, + "direction": 0.5, + "curvature": 0.5, + "center_x": 0, + "center_y": 0, + "gamma1": 0.1, + "gamma2": -0.2, + } + lens_model = ["CURVED_ARC_SPT"] self.assert_differentials(lens_model, kwargs, potential=False) def test_curved_arc_sis_mst(self): - kwargs = {'tangential_stretch': 4., 'radial_stretch': 1 , 'direction': 0.5, 'curvature': 0.2, 'center_x': 0, 'center_y': 0} - lens_model = ['CURVED_ARC_SIS_MST'] + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 1, + "direction": 0.5, + "curvature": 0.2, + "center_x": 0, + "center_y": 0, + } + lens_model = ["CURVED_ARC_SIS_MST"] self.assert_differentials(lens_model, kwargs) def test_curved_arc_tan_diff(self): - kwargs = {'tangential_stretch': 4., 'radial_stretch': 1 , 'direction': 0.5, 'dtan_dtan': 0.1, 'curvature': 0.2, 'center_x': 0, 'center_y': 0} - lens_model = ['CURVED_ARC_TAN_DIFF'] + kwargs = { + "tangential_stretch": 4.0, + "radial_stretch": 1, + "direction": 0.5, + "dtan_dtan": 0.1, + "curvature": 0.2, + "center_x": 0, + "center_y": 0, + } + lens_model = ["CURVED_ARC_TAN_DIFF"] self.assert_differentials(lens_model, kwargs) def test_splcore(self): - - kwargs = {'sigma0': 1., 'gamma': 3, 'r_core': 0.1, 'center_x': 0., 'center_y': 0.} - lens_model = ['SPL_CORE'] + kwargs = { + "sigma0": 1.0, + "gamma": 3, + "r_core": 0.1, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["SPL_CORE"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'sigma0': 1., 'gamma': 2., 'r_core': 0.1, 'center_x': 0., 'center_y': 0.} - lens_model = ['SPL_CORE'] + kwargs = { + "sigma0": 1.0, + "gamma": 2.0, + "r_core": 0.1, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["SPL_CORE"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'sigma0': 1., 'gamma': 2.5, 'r_core': 0.1, 'center_x': 0., 'center_y': 0.} - lens_model = ['SPL_CORE'] + kwargs = { + "sigma0": 1.0, + "gamma": 2.5, + "r_core": 0.1, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["SPL_CORE"] self.assert_differentials(lens_model, kwargs, potential=False) def test_gnfw(self): - - kwargs = {'alpha_Rs': 1.2, 'Rs': 0.8, 'gamma_inner': 2.3, 'gamma_outer': 3.15} - lens_model = ['GNFW'] + kwargs = {"alpha_Rs": 1.2, "Rs": 0.8, "gamma_inner": 2.3, "gamma_outer": 3.15} + lens_model = ["GNFW"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'alpha_Rs': 1.2, 'Rs': 0.8, 'gamma_inner': 0.3, 'gamma_outer': 3.15} - lens_model = ['GNFW'] + kwargs = {"alpha_Rs": 1.2, "Rs": 0.8, "gamma_inner": 0.3, "gamma_outer": 3.15} + lens_model = ["GNFW"] self.assert_differentials(lens_model, kwargs, potential=False) def test_cse(self): - kwargs = {'a': 2, 's': 1., 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['CSE'] + kwargs = {"a": 2, "s": 1.0, "e1": 0, "e2": 0, "center_x": 0.0, "center_y": 0.0} + lens_model = ["CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'a': 2, 's': 1., 'e1': 0.3, 'e2': 0, 'center_x': 1., 'center_y': 2.} - lens_model = ['CSE'] + kwargs = { + "a": 2, + "s": 1.0, + "e1": 0.3, + "e2": 0, + "center_x": 1.0, + "center_y": 2.0, + } + lens_model = ["CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'a': 2, 's': 1., 'e1': -0.3, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['CSE'] + kwargs = { + "a": 2, + "s": 1.0, + "e1": -0.3, + "e2": 0, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'a': 2, 's': 1., 'e1': 0., 'e2': 0.3, 'center_x': 0., 'center_y': 0.} - lens_model = ['CSE'] + kwargs = { + "a": 2, + "s": 1.0, + "e1": 0.0, + "e2": 0.3, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["CSE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_nfw_cse(self): - kwargs = {'alpha_Rs': 1, 'Rs': 1, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['NFW_ELLIPSE_CSE'] + kwargs = { + "alpha_Rs": 1, + "Rs": 1, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["NFW_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'alpha_Rs': 1, 'Rs': .5, 'e1': 0.2, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['NFW_ELLIPSE_CSE'] + kwargs = { + "alpha_Rs": 1, + "Rs": 0.5, + "e1": 0.2, + "e2": 0, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["NFW_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'alpha_Rs': 2, 'Rs': .5, 'e1': 0, 'e2': 0.4, 'center_x': 0., 'center_y': 0.} - lens_model = ['NFW_ELLIPSE_CSE'] + kwargs = { + "alpha_Rs": 2, + "Rs": 0.5, + "e1": 0, + "e2": 0.4, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["NFW_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_hernquist_cse(self): - kwargs = {'sigma0': 1, 'Rs': 1, 'e1': 0, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['HERNQUIST_ELLIPSE_CSE'] + kwargs = { + "sigma0": 1, + "Rs": 1, + "e1": 0, + "e2": 0, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["HERNQUIST_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'sigma0': 1, 'Rs': .5, 'e1': 0.2, 'e2': 0, 'center_x': 0., 'center_y': 0.} - lens_model = ['HERNQUIST_ELLIPSE_CSE'] + kwargs = { + "sigma0": 1, + "Rs": 0.5, + "e1": 0.2, + "e2": 0, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["HERNQUIST_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) - kwargs = {'sigma0': 2, 'Rs': .5, 'e1': 0, 'e2': 0.4, 'center_x': 0., 'center_y': 0.} - lens_model = ['HERNQUIST_ELLIPSE_CSE'] + kwargs = { + "sigma0": 2, + "Rs": 0.5, + "e1": 0, + "e2": 0.4, + "center_x": 0.0, + "center_y": 0.0, + } + lens_model = ["HERNQUIST_ELLIPSE_CSE"] self.assert_differentials(lens_model, kwargs, potential=True) def test_TNFWC(self): - - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 0.1, 'r_trunc': 200.0} - lens_model = ['TNFWC'] + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 0.1, "r_trunc": 200.0} + lens_model = ["TNFWC"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 0.1, 'r_trunc': 5.0} - lens_model = ['TNFWC'] + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 0.1, "r_trunc": 5.0} + lens_model = ["TNFWC"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 0.9, 'r_trunc': 5.0} - lens_model = ['TNFWC'] + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 0.9, "r_trunc": 5.0} + lens_model = ["TNFWC"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 2.1, 'r_trunc': 5.0} - lens_model = ['TNFWC'] + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 2.1, "r_trunc": 5.0} + lens_model = ["TNFWC"] self.assert_differentials(lens_model, kwargs, potential=False) - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 12.1, 'r_trunc': 5.0} - lens_model = ['TNFWC'] + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 12.1, "r_trunc": 5.0} + lens_model = ["TNFWC"] self.assert_differentials(lens_model, kwargs, potential=False) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_profile_integrals.py b/test/test_LensModel/test_profile_integrals.py index 29740bc88..65f8c9071 100644 --- a/test/test_LensModel/test_profile_integrals.py +++ b/test/test_LensModel/test_profile_integrals.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy.testing as npt @@ -8,40 +8,39 @@ class TestNumerics(object): - """ - tests the second derivatives of various lens models - """ + """Tests the second derivatives of various lens models.""" + def setup_method(self): pass def assert_integrals(self, Model, kwargs): lensModel = Model() int_profile = ProfileIntegrals(lensModel) - r = 2. + r = 2.0 density2d_num = int_profile.density_2d(r, kwargs) density2d = lensModel.density_2d(r, 0, **kwargs) - npt.assert_almost_equal(density2d/density2d_num, 1., decimal=1) + npt.assert_almost_equal(density2d / density2d_num, 1.0, decimal=1) - kwargs['center_x'] = 0 - kwargs['center_y'] = 0 + kwargs["center_x"] = 0 + kwargs["center_y"] = 0 mass_2d_num = int_profile.mass_enclosed_2d(r, kwargs) - del kwargs['center_x'] - del kwargs['center_y'] + del kwargs["center_x"] + del kwargs["center_y"] mass_2d = lensModel.mass_2d(r, **kwargs) - npt.assert_almost_equal(mass_2d/mass_2d_num, 1, decimal=1) + npt.assert_almost_equal(mass_2d / mass_2d_num, 1, decimal=1) - kwargs['center_x'] = 0 - kwargs['center_y'] = 0 + kwargs["center_x"] = 0 + kwargs["center_y"] = 0 mass_3d_num = int_profile.mass_enclosed_3d(r, kwargs) - del kwargs['center_x'] - del kwargs['center_y'] + del kwargs["center_x"] + del kwargs["center_y"] mass_3d = lensModel.mass_3d(r, **kwargs) - npt.assert_almost_equal(mass_3d/mass_3d_num, 1, decimal=2) + npt.assert_almost_equal(mass_3d / mass_3d_num, 1, decimal=2) def assert_lens_integrals(self, Model, kwargs, pi_convention=True): - """ - checks whether the integral in projection of the density_lens() function is the convergence + """Checks whether the integral in projection of the density_lens() function is + the convergence. :param Model: lens model instance :param kwargs: keyword arguments of lens model @@ -49,14 +48,14 @@ def assert_lens_integrals(self, Model, kwargs, pi_convention=True): """ lensModel = Model() int_profile = ProfileIntegrals(lensModel) - r = 2. + r = 2.0 kappa_num = int_profile.density_2d(r, kwargs, lens_param=True) f_xx, f_xy, f_yx, f_yy = lensModel.hessian(r, 0, **kwargs) - kappa = 1./2 * (f_xx + f_yy) + kappa = 1.0 / 2 * (f_xx + f_yy) npt.assert_almost_equal(kappa_num, kappa, decimal=2) try: - del kwargs['center_x'] - del kwargs['center_y'] + del kwargs["center_x"] + del kwargs["center_y"] except: pass bool_mass_2d_lens = False @@ -67,7 +66,7 @@ def assert_lens_integrals(self, Model, kwargs, pi_convention=True): pass if bool_mass_2d_lens: alpha_x, alpha_y = lensModel.derivatives(r, 0, **kwargs) - alpha = np.sqrt(alpha_x ** 2 + alpha_y ** 2) + alpha = np.sqrt(alpha_x**2 + alpha_y**2) if pi_convention: npt.assert_almost_equal(alpha, mass_2d / r / np.pi, decimal=5) else: @@ -78,330 +77,397 @@ def assert_lens_integrals(self, Model, kwargs, pi_convention=True): except: bool_mass_3d_lens = False if bool_mass_3d_lens: - mass_3d_num = int_profile.mass_enclosed_3d(r, kwargs_profile=kwargs, lens_param=True) - print(mass_3d, mass_3d_num, 'test num') + mass_3d_num = int_profile.mass_enclosed_3d( + r, kwargs_profile=kwargs, lens_param=True + ) + print(mass_3d, mass_3d_num, "test num") npt.assert_almost_equal(mass_3d / mass_3d_num, 1, decimal=2) def test_PJaffe(self): - kwargs = {'rho0': 1., 'Ra': 0.2, 'Rs': 2.} + kwargs = {"rho0": 1.0, "Ra": 0.2, "Rs": 2.0} from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe as Model + self.assert_integrals(Model, kwargs) def test_PJaffa_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.p_jaffe import PJaffe as Model + lensModel = Model() - sigma0 = 1. + sigma0 = 1.0 Ra = 0.2 - Rs = 2. + Rs = 2.0 rho0 = lensModel.sigma2rho(sigma0, Ra, Rs) - kwargs_lens = {'sigma0': sigma0, 'Ra': Ra, 'Rs': Rs, 'center_x': 0, 'center_y': 0} - kwargs_density = {'rho0': rho0, 'Ra': Ra, 'Rs': Rs} - r = 1. + kwargs_lens = { + "sigma0": sigma0, + "Ra": Ra, + "Rs": Rs, + "center_x": 0, + "center_y": 0, + } + kwargs_density = {"rho0": rho0, "Ra": Ra, "Rs": Rs} + r = 1.0 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_nfw(self): - kwargs = {'rho0': 1., 'Rs': 5., 'center_x': 0, 'center_y': 0} + kwargs = {"rho0": 1.0, "Rs": 5.0, "center_x": 0, "center_y": 0} from lenstronomy.LensModel.Profiles.nfw import NFW as Model + self.assert_integrals(Model, kwargs) - kwargs_lens = {'alpha_Rs': 1., 'Rs': 5., 'center_x': 0, 'center_y': 0} + kwargs_lens = {"alpha_Rs": 1.0, "Rs": 5.0, "center_x": 0, "center_y": 0} self.assert_lens_integrals(Model, kwargs_lens) def test_nfw_ellipse(self): - kwargs = {'rho0': 1., 'Rs': 5., 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0} + kwargs = { + "rho0": 1.0, + "Rs": 5.0, + "center_x": 0, + "center_y": 0, + "e1": 0, + "e2": 0, + } from lenstronomy.LensModel.Profiles.nfw_ellipse import NFW_ELLIPSE as Model - #self.assert_integrals(Model, kwargs) - kwargs_lens = {'alpha_Rs': 1., 'Rs': 5., 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0} + # self.assert_integrals(Model, kwargs) + + kwargs_lens = { + "alpha_Rs": 1.0, + "Rs": 5.0, + "center_x": 0, + "center_y": 0, + "e1": 0, + "e2": 0, + } self.assert_lens_integrals(Model, kwargs_lens) def test_nfw_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.nfw import NFW as Model + lensModel = Model() - alpha_Rs = 1. - Rs = 2. + alpha_Rs = 1.0 + Rs = 2.0 rho0 = lensModel.alpha2rho0(alpha_Rs, Rs) - kwargs_lens = {'alpha_Rs': alpha_Rs, 'Rs': Rs} - kwargs_density = {'rho0': rho0, 'Rs': Rs} + kwargs_lens = {"alpha_Rs": alpha_Rs, "Rs": Rs} + kwargs_density = {"rho0": rho0, "Rs": Rs} self.assert_lens_integrals(Model, kwargs_lens) self.assert_integrals(Model, kwargs_density) - r = 2. + r = 2.0 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_hernquist(self): from lenstronomy.LensModel.Profiles.hernquist import Hernquist as Model - kwargs = {'rho0': 1., 'Rs': 5.} + + kwargs = {"rho0": 1.0, "Rs": 5.0} self.assert_integrals(Model, kwargs) - kwargs = {'sigma0': 1., 'Rs': 5.} + kwargs = {"sigma0": 1.0, "Rs": 5.0} self.assert_lens_integrals(Model, kwargs) def test_hernquist_ellipse(self): - from lenstronomy.LensModel.Profiles.hernquist_ellipse import Hernquist_Ellipse as Model - kwargs = {'rho0': 1., 'Rs': 5., 'e1': 0, 'e2': 0} + from lenstronomy.LensModel.Profiles.hernquist_ellipse import ( + Hernquist_Ellipse as Model, + ) + + kwargs = {"rho0": 1.0, "Rs": 5.0, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) - kwargs = {'sigma0': 1., 'Rs': 5., 'e1': 0, 'e2': 0} + kwargs = {"sigma0": 1.0, "Rs": 5.0, "e1": 0, "e2": 0} self.assert_lens_integrals(Model, kwargs) def test_hernquist_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.hernquist import Hernquist as Model + lensModel = Model() - sigma0 = 1. - Rs = 2. + sigma0 = 1.0 + Rs = 2.0 rho0 = lensModel.sigma2rho(sigma0, Rs) - kwargs_lens = {'sigma0': sigma0, 'Rs': Rs} - kwargs_density = {'rho0': rho0, 'Rs': Rs} - r = .5 + kwargs_lens = {"sigma0": sigma0, "Rs": Rs} + kwargs_density = {"rho0": rho0, "Rs": Rs} + r = 0.5 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_sis(self): from lenstronomy.LensModel.Profiles.sis import SIS as Model - kwargs = {'rho0': 1.} + + kwargs = {"rho0": 1.0} self.assert_integrals(Model, kwargs) - kwargs_lens = {'theta_E': 1.} + kwargs_lens = {"theta_E": 1.0} self.assert_lens_integrals(Model, kwargs_lens, pi_convention=True) def test_sis_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.sis import SIS as Model + lensModel = Model() - theta_E = 1. + theta_E = 1.0 rho0 = lensModel.theta2rho(theta_E) - kwargs_lens = {'theta_E': theta_E} - kwargs_density = {'rho0': rho0} - r = .5 + kwargs_lens = {"theta_E": theta_E} + kwargs_density = {"rho0": rho0} + r = 0.5 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) lensModel.density_2d(1, 1, rho0=1) def test_sie(self): from lenstronomy.LensModel.Profiles.sie import SIE as Model - kwargs = {'rho0': 1., 'e1': 0, 'e2': 0} + + kwargs = {"rho0": 1.0, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) - kwargs_lens = {'theta_E': 1., 'e1': 0, 'e2': 0} + kwargs_lens = {"theta_E": 1.0, "e1": 0, "e2": 0} self.assert_lens_integrals(Model, kwargs_lens) def test_spep(self): from lenstronomy.LensModel.Profiles.spep import SPEP as Model - kwargs_lens = {'theta_E': 1, 'gamma': 2, 'e1': 0, 'e2': 0} + + kwargs_lens = {"theta_E": 1, "gamma": 2, "e1": 0, "e2": 0} self.assert_lens_integrals(Model, kwargs_lens) def test_sie_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.sie import SIE as Model + lensModel = Model() - theta_E = 1. + theta_E = 1.0 rho0 = lensModel.theta2rho(theta_E) - kwargs_lens = {'theta_E': theta_E, 'e1': 0, 'e2': 0} - kwargs_density = {'rho0': rho0, 'e1': 0, 'e2': 0} - r = .5 + kwargs_lens = {"theta_E": theta_E, "e1": 0, "e2": 0} + kwargs_density = {"rho0": rho0, "e1": 0, "e2": 0} + r = 0.5 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_spp(self): from lenstronomy.LensModel.Profiles.spp import SPP as Model - kwargs = {'rho0': 10., 'gamma': 2.2} + + kwargs = {"rho0": 10.0, "gamma": 2.2} self.assert_integrals(Model, kwargs) - kwargs = {'rho0': 1., 'gamma': 2.0} + kwargs = {"rho0": 1.0, "gamma": 2.0} self.assert_integrals(Model, kwargs) - kwargs_lens = {'theta_E': 1., 'gamma': 2.0} + kwargs_lens = {"theta_E": 1.0, "gamma": 2.0} self.assert_lens_integrals(Model, kwargs_lens) def test_spp_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.spp import SPP as Model + lensModel = Model() - theta_E = 1. + theta_E = 1.0 gamma = 2.2 rho0 = lensModel.theta2rho(theta_E, gamma) - kwargs_lens = {'theta_E': theta_E, 'gamma': gamma} - kwargs_density = {'rho0': rho0, 'gamma': gamma} - r = .5 + kwargs_lens = {"theta_E": theta_E, "gamma": gamma} + kwargs_density = {"rho0": rho0, "gamma": gamma} + r = 0.5 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_gaussian(self): from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa as Model - kwargs = {'amp': 1. / 4., 'sigma': 2.} + + kwargs = {"amp": 1.0 / 4.0, "sigma": 2.0} self.assert_integrals(Model, kwargs) def test_gaussian_density_deflection(self): - """ - tests whether the unit conversion between the lensing parameter 'sigma0' and the units in the density profile are ok - :return: - """ + """Tests whether the unit conversion between the lensing parameter 'sigma0' and + the units in the density profile are ok :return:""" from lenstronomy.LensModel.Profiles.gaussian_kappa import GaussianKappa as Model + lensModel = Model() - amp = 1. / 4. - sigma = 2. + amp = 1.0 / 4.0 + sigma = 2.0 amp_lens = lensModel._amp3d_to_2d(amp, sigma, sigma) - kwargs_lens = {'amp': amp_lens, 'sigma': sigma} - kwargs_density = {'amp': amp, 'sigma': sigma} - r = .5 + kwargs_lens = {"amp": amp_lens, "sigma": sigma} + kwargs_density = {"amp": amp, "sigma": sigma} + r = 0.5 mass_2d = lensModel.mass_2d(r, **kwargs_density) - alpha_mass = mass_2d/r + alpha_mass = mass_2d / r alpha_r, _ = lensModel.derivatives(r, 0, **kwargs_lens) npt.assert_almost_equal(alpha_mass / np.pi, alpha_r, decimal=5) def test_coreBurk(self): from lenstronomy.LensModel.Profiles.coreBurkert import CoreBurkert as Model - kwargs = {'rho0': 1., 'Rs': 10, 'r_core': 5} + + kwargs = {"rho0": 1.0, "Rs": 10, "r_core": 5} self.assert_integrals(Model, kwargs) - kwargs = {'rho0': 1., 'Rs': 9, 'r_core': 11} + kwargs = {"rho0": 1.0, "Rs": 9, "r_core": 11} self.assert_integrals(Model, kwargs) def test_tnfw(self): from lenstronomy.LensModel.Profiles.tnfw import TNFW as Model - kwargs = {'rho0': 1., 'Rs': 1, 'r_trunc': 4} + + kwargs = {"rho0": 1.0, "Rs": 1, "r_trunc": 4} self.assert_integrals(Model, kwargs) def test_cnfw(self): from lenstronomy.LensModel.Profiles.cnfw import CNFW as Model - kwargs = {'rho0': 1., 'Rs': 1, 'r_core': 0.5} + + kwargs = {"rho0": 1.0, "Rs": 1, "r_core": 0.5} self.assert_integrals(Model, kwargs) - kwargs_lens = {'alpha_Rs': 1., 'Rs': 5., 'r_core': 0.5} + kwargs_lens = {"alpha_Rs": 1.0, "Rs": 5.0, "r_core": 0.5} self.assert_lens_integrals(Model, kwargs_lens) def test_cnfw_ellipse(self): from lenstronomy.LensModel.Profiles.cnfw_ellipse import CNFW_ELLIPSE as Model - kwargs = {'rho0': 1., 'Rs': 1, 'r_core': 0.5, 'e1': 0, 'e2':0} - #self.assert_integrals(Model, kwargs) - kwargs_lens = {'alpha_Rs': 1., 'Rs': 5., 'r_core': 0.5, 'e1': 0, 'e2':0} + + kwargs = {"rho0": 1.0, "Rs": 1, "r_core": 0.5, "e1": 0, "e2": 0} + # self.assert_integrals(Model, kwargs) + kwargs_lens = {"alpha_Rs": 1.0, "Rs": 5.0, "r_core": 0.5, "e1": 0, "e2": 0} self.assert_lens_integrals(Model, kwargs_lens) def test_cored_density(self): from lenstronomy.LensModel.Profiles.cored_density import CoredDensity as Model - kwargs = {'sigma0': 0.1, 'r_core': 6.} + + kwargs = {"sigma0": 0.1, "r_core": 6.0} self.assert_integrals(Model, kwargs) self.assert_lens_integrals(Model, kwargs) def test_cored_density_2(self): - from lenstronomy.LensModel.Profiles.cored_density_2 import CoredDensity2 as Model - kwargs = {'sigma0': 0.1, 'r_core': 6.} + from lenstronomy.LensModel.Profiles.cored_density_2 import ( + CoredDensity2 as Model, + ) + + kwargs = {"sigma0": 0.1, "r_core": 6.0} self.assert_integrals(Model, kwargs) self.assert_lens_integrals(Model, kwargs) def test_cored_density_exp(self): - from lenstronomy.LensModel.Profiles.cored_density_exp import CoredDensityExp as Model - kwargs = {'kappa_0': 0.1, 'theta_c': 6.} + from lenstronomy.LensModel.Profiles.cored_density_exp import ( + CoredDensityExp as Model, + ) + + kwargs = {"kappa_0": 0.1, "theta_c": 6.0} self.assert_integrals(Model, kwargs) self.assert_lens_integrals(Model, kwargs) def test_uldm(self): from lenstronomy.LensModel.Profiles.uldm import Uldm as Model - kwargs = {'kappa_0': 0.1, 'theta_c': 6., 'slope': 7.8} + + kwargs = {"kappa_0": 0.1, "theta_c": 6.0, "slope": 7.8} self.assert_integrals(Model, kwargs) self.assert_lens_integrals(Model, kwargs) def test_splcore(self): - from lenstronomy.LensModel.Profiles.splcore import SPLCORE as Model - kwargs = {'rho0': 1., 'gamma': 3, 'r_core': 0.1} + + kwargs = {"rho0": 1.0, "gamma": 3, "r_core": 0.1} self.assert_integrals(Model, kwargs) - kwargs = {'sigma0': 1., 'gamma': 3, 'r_core': 0.1} + kwargs = {"sigma0": 1.0, "gamma": 3, "r_core": 0.1} self.assert_lens_integrals(Model, kwargs) - kwargs = {'rho0': 1., 'gamma': 2, 'r_core': 0.1} + kwargs = {"rho0": 1.0, "gamma": 2, "r_core": 0.1} self.assert_integrals(Model, kwargs) - kwargs = {'sigma0': 1., 'gamma': 2, 'r_core': 0.1} + kwargs = {"sigma0": 1.0, "gamma": 2, "r_core": 0.1} self.assert_lens_integrals(Model, kwargs) - kwargs = {'rho0': 1., 'gamma': 2.5, 'r_core': 0.1} + kwargs = {"rho0": 1.0, "gamma": 2.5, "r_core": 0.1} self.assert_integrals(Model, kwargs) - kwargs = {'sigma0': 1., 'gamma': 2.5, 'r_core': 0.1} + kwargs = {"sigma0": 1.0, "gamma": 2.5, "r_core": 0.1} self.assert_lens_integrals(Model, kwargs) def test_nie(self): from lenstronomy.LensModel.Profiles.nie import NIE as Model - kwargs = {'theta_E': 0.7, 's_scale': 0.3, 'e1': 0., 'e2': 0} + + kwargs = {"theta_E": 0.7, "s_scale": 0.3, "e1": 0.0, "e2": 0} self.assert_lens_integrals(Model, kwargs) def test_chameleon(self): from lenstronomy.LensModel.Profiles.chameleon import Chameleon as Model - kwargs = {'alpha_1': 2., 'w_c': .1, 'w_t': 2., 'e1': 0., 'e2': 0} + + kwargs = {"alpha_1": 2.0, "w_c": 0.1, "w_t": 2.0, "e1": 0.0, "e2": 0} self.assert_lens_integrals(Model, kwargs) from lenstronomy.LensModel.Profiles.chameleon import DoubleChameleon as Model - kwargs = {'alpha_1': 2., 'ratio': 2, 'w_c1': 0.2, 'w_t1': 1, 'e11': 0, 'e21': 0, - 'w_c2': 0.5, 'w_t2': 2, 'e12': 0, 'e22': 0} + + kwargs = { + "alpha_1": 2.0, + "ratio": 2, + "w_c1": 0.2, + "w_t1": 1, + "e11": 0, + "e21": 0, + "w_c2": 0.5, + "w_t2": 2, + "e12": 0, + "e22": 0, + } self.assert_lens_integrals(Model, kwargs) from lenstronomy.LensModel.Profiles.chameleon import TripleChameleon as Model - kwargs = {'alpha_1': 2., 'ratio12': 2, 'ratio13': 0.2, 'w_c1': 0.2, 'w_t1': 1, 'e11': 0, 'e21': 0, - 'w_c2': 0.5, 'w_t2': 2, 'e12': 0, 'e22': 0, - 'w_c3': 2, 'w_t3': 5, 'e13': 0, 'e23': 0} + + kwargs = { + "alpha_1": 2.0, + "ratio12": 2, + "ratio13": 0.2, + "w_c1": 0.2, + "w_t1": 1, + "e11": 0, + "e21": 0, + "w_c2": 0.5, + "w_t2": 2, + "e12": 0, + "e22": 0, + "w_c3": 2, + "w_t3": 5, + "e13": 0, + "e23": 0, + } self.assert_lens_integrals(Model, kwargs) def test_gnfw(self): - from lenstronomy.LensModel.Profiles.general_nfw import GNFW as Model - kwargs = {'alpha_Rs': 0.7, 'Rs': 0.3, 'gamma_inner': 1., 'gamma_outer': 3.2} + + kwargs = {"alpha_Rs": 0.7, "Rs": 0.3, "gamma_inner": 1.0, "gamma_outer": 3.2} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 0.7, 'Rs': 0.3, 'gamma_inner': 2.5, 'gamma_outer': 3.0} + kwargs = {"alpha_Rs": 0.7, "Rs": 0.3, "gamma_inner": 2.5, "gamma_outer": 3.0} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 0.7, 'Rs': 0.3, 'gamma_inner': 0.5, 'gamma_outer': 2.6} + kwargs = {"alpha_Rs": 0.7, "Rs": 0.3, "gamma_inner": 0.5, "gamma_outer": 2.6} self.assert_lens_integrals(Model, kwargs) def test_tnfwc(self): - from lenstronomy.LensModel.Profiles.nfw_core_truncated import TNFWC as Model - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 0.1, 'r_trunc': 200.0} + + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 0.1, "r_trunc": 200.0} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 4.0, 'Rs': 2.0, 'r_core': 1.1, 'r_trunc': 100.0} + kwargs = {"alpha_Rs": 4.0, "Rs": 2.0, "r_core": 1.1, "r_trunc": 100.0} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 4.0, 'Rs': 0.5, 'r_core': 0.1, 'r_trunc': 3.0} + kwargs = {"alpha_Rs": 4.0, "Rs": 0.5, "r_core": 0.1, "r_trunc": 3.0} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 4.0, 'Rs': 0.5, 'r_core': 0.4, 'r_trunc': 3.0} + kwargs = {"alpha_Rs": 4.0, "Rs": 0.5, "r_core": 0.4, "r_trunc": 3.0} self.assert_lens_integrals(Model, kwargs) - kwargs = {'alpha_Rs': 4.0, 'Rs': 0.5, 'r_core': 0.9, 'r_trunc': 3.0} + kwargs = {"alpha_Rs": 4.0, "Rs": 0.5, "r_core": 0.9, "r_trunc": 3.0} self.assert_lens_integrals(Model, kwargs) """ @@ -417,5 +483,6 @@ def test_sersic(self): """ -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LensModel/test_single_plane.py b/test/test_LensModel/test_single_plane.py index a263aac21..e030c0c37 100644 --- a/test/test_LensModel/test_single_plane.py +++ b/test/test_LensModel/test_single_plane.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -9,52 +9,69 @@ try: import fastell4py + bool_test = True except: bool_test = False class TestLensModel(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" + def setup_method(self): - self.lensModel = SinglePlane(['GAUSSIAN']) - self.kwargs = [{'amp': 1., 'sigma_x': 2., 'sigma_y': 2., 'center_x': 0., 'center_y': 0.}] + self.lensModel = SinglePlane(["GAUSSIAN"]) + self.kwargs = [ + { + "amp": 1.0, + "sigma_x": 2.0, + "sigma_y": 2.0, + "center_x": 0.0, + "center_y": 0.0, + } + ] def test_potential(self): - output = self.lensModel.potential(x=1., y=1., kwargs=self.kwargs) - assert output == 0.77880078307140488/(8*np.pi) + output = self.lensModel.potential(x=1.0, y=1.0, kwargs=self.kwargs) + assert output == 0.77880078307140488 / (8 * np.pi) def test_alpha(self): - output1, output2 = self.lensModel.alpha(x=1., y=1., kwargs=self.kwargs) - assert output1 == -0.19470019576785122/(8*np.pi) - assert output2 == -0.19470019576785122/(8*np.pi) + output1, output2 = self.lensModel.alpha(x=1.0, y=1.0, kwargs=self.kwargs) + assert output1 == -0.19470019576785122 / (8 * np.pi) + assert output2 == -0.19470019576785122 / (8 * np.pi) def test_ray_shooting(self): - delta_x, delta_y = self.lensModel.ray_shooting(x=1., y=1., kwargs=self.kwargs) - assert delta_x == 1 + 0.19470019576785122/(8*np.pi) - assert delta_y == 1 + 0.19470019576785122/(8*np.pi) + delta_x, delta_y = self.lensModel.ray_shooting(x=1.0, y=1.0, kwargs=self.kwargs) + assert delta_x == 1 + 0.19470019576785122 / (8 * np.pi) + assert delta_y == 1 + 0.19470019576785122 / (8 * np.pi) def test_mass_2d(self): - lensModel = SinglePlane(['GAUSSIAN_KAPPA']) - kwargs = [{'amp': 1., 'sigma': 2., 'center_x': 0., 'center_y': 0.}] + lensModel = SinglePlane(["GAUSSIAN_KAPPA"]) + kwargs = [{"amp": 1.0, "sigma": 2.0, "center_x": 0.0, "center_y": 0.0}] output = lensModel.mass_2d(r=1, kwargs=kwargs) npt.assert_almost_equal(output, 0.11750309741540453, decimal=9) def test_density(self): theta_E = 1 r = 1 - lensModel = SinglePlane(lens_model_list=['SIS']) - density = lensModel.density(r=r, kwargs=[{'theta_E': theta_E}]) + lensModel = SinglePlane(lens_model_list=["SIS"]) + density = lensModel.density(r=r, kwargs=[{"theta_E": theta_E}]) sis = SIS() density_model = sis.density_lens(r=r, theta_E=theta_E) npt.assert_almost_equal(density, density_model, decimal=8) def test_bool_list(self): - lensModel = SinglePlane(['SPEP', 'SHEAR']) - kwargs = [{'theta_E': 1, 'gamma': 2, 'e1': 0.1, 'e2': -0.1, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.01, 'gamma2': -0.02}] + lensModel = SinglePlane(["SPEP", "SHEAR"]) + kwargs = [ + { + "theta_E": 1, + "gamma": 2, + "e1": 0.1, + "e2": -0.1, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.01, "gamma2": -0.02}, + ] alphax_1, alphay_1 = lensModel.alpha(1, 1, kwargs, k=0) alphax_1_list, alphay_1_list = lensModel.alpha(1, 1, kwargs, k=[0]) npt.assert_almost_equal(alphax_1, alphax_1_list, decimal=5) @@ -67,28 +84,34 @@ def test_bool_list(self): npt.assert_almost_equal(alphay_1_1 + alphay_1_2, alphay_full, decimal=5) def test_init(self): - lens_model_list = ['TNFW', 'TRIPLE_CHAMELEON', 'SHEAR_GAMMA_PSI', 'CURVED_ARC_CONST', 'NFW_MC', - 'ARC_PERT','MULTIPOLE', 'CURVED_ARC_SPP'] + lens_model_list = [ + "TNFW", + "TRIPLE_CHAMELEON", + "SHEAR_GAMMA_PSI", + "CURVED_ARC_CONST", + "NFW_MC", + "ARC_PERT", + "MULTIPOLE", + "CURVED_ARC_SPP", + ] lensModel = SinglePlane(lens_model_list=lens_model_list) - assert lensModel.func_list[0].param_names[0] == 'Rs' + assert lensModel.func_list[0].param_names[0] == "Rs" class TestRaise(unittest.TestCase): - def test_raise(self): - """ - check whether raises occurs if fastell4py is not installed + """Check whether raises occurs if fastell4py is not installed. :return: """ if bool_test is False: with self.assertRaises(ImportError): - SinglePlane(lens_model_list=['PEMD']) + SinglePlane(lens_model_list=["PEMD"]) with self.assertRaises(ImportError): - SinglePlane(lens_model_list=['SPEMD']) + SinglePlane(lens_model_list=["SPEMD"]) else: - SinglePlane(lens_model_list=['PEMD', 'SPEMD']) + SinglePlane(lens_model_list=["PEMD", "SPEMD"]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LightModel/test_Profiles/test_chameleon.py b/test/test_LightModel/test_Profiles/test_chameleon.py index d0157d9f1..a7d6004fc 100644 --- a/test/test_LightModel/test_Profiles/test_chameleon.py +++ b/test/test_LightModel/test_Profiles/test_chameleon.py @@ -1,25 +1,27 @@ - import pytest import numpy as np import numpy.testing as npt from lenstronomy.LightModel.Profiles.nie import NIE -from lenstronomy.LightModel.Profiles.chameleon import Chameleon, DoubleChameleon, TripleChameleon +from lenstronomy.LightModel.Profiles.chameleon import ( + Chameleon, + DoubleChameleon, + TripleChameleon, +) from lenstronomy.LensModel.Profiles.chameleon import Chameleon as ChameleonLens import lenstronomy.Util.param_util as param_util from lenstronomy.Util import util class TestChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = Chameleon() names = chameleon.param_names - assert names[0] == 'amp' + assert names[0] == "amp" def test_function(self): """ @@ -30,34 +32,38 @@ def test_function(self): nie = NIE() x = np.linspace(0.1, 10, 10) - w_c, w_t = 0.5, 1. + w_c, w_t = 0.5, 1.0 phi_G, q = 0.3, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'amp': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - amp_new, w_c, w_t, s_scale_1, s_scale_2 = chameleon._chameleonLens.param_convert(1, w_c, w_t, e1, e2) - kwargs_1 = {'amp': amp_new, 's_scale': s_scale_1, 'e1': e1, 'e2': e2} - kwargs_2 = {'amp': amp_new, 's_scale': s_scale_2, 'e1': e1, 'e2': e2} - flux = chameleon.function(x=x, y=1., **kwargs_light) - flux1 = nie.function(x=x, y=1., **kwargs_1) - flux2 = nie.function(x=x, y=1., **kwargs_2) + kwargs_light = {"amp": 1.0, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + ( + amp_new, + w_c, + w_t, + s_scale_1, + s_scale_2, + ) = chameleon._chameleonLens.param_convert(1, w_c, w_t, e1, e2) + kwargs_1 = {"amp": amp_new, "s_scale": s_scale_1, "e1": e1, "e2": e2} + kwargs_2 = {"amp": amp_new, "s_scale": s_scale_2, "e1": e1, "e2": e2} + flux = chameleon.function(x=x, y=1.0, **kwargs_light) + flux1 = nie.function(x=x, y=1.0, **kwargs_1) + flux2 = nie.function(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(flux, flux1 - flux2, decimal=5) def test_lens_model_correspondence(self): - """ - here we test the proportionality of the convergence of the lens model with the surface brightness of the light - model - """ + """Here we test the proportionality of the convergence of the lens model with + the surface brightness of the light model.""" chameleon_lens = ChameleonLens() chameleon = Chameleon() x, y = util.make_grid(numPix=100, deltapix=0.1) - e1, e2 = 0., 0 - w_c, w_t = 0.5, 1. - kwargs_light = {'amp': 1., 'w_c': w_c, 'w_t': w_t, 'e1': e1, 'e2': e2} - kwargs_lens = {'alpha_1': 1., 'w_c': w_c, 'w_t': w_t, 'e1': e1, 'e2': e2} + e1, e2 = 0.0, 0 + w_c, w_t = 0.5, 1.0 + kwargs_light = {"amp": 1.0, "w_c": w_c, "w_t": w_t, "e1": e1, "e2": e2} + kwargs_lens = {"alpha_1": 1.0, "w_c": w_c, "w_t": w_t, "e1": e1, "e2": e2} flux = chameleon.function(x=x, y=y, **kwargs_light) f_xx, f_xy, f_yx, f_yy = chameleon_lens.hessian(x=x, y=y, **kwargs_lens) - kappa = 1 / 2. * (f_xx + f_yy) + kappa = 1 / 2.0 * (f_xx + f_yy) # flux2d = util.array2image(flux) # kappa2d = util.array2image(kappa) @@ -66,16 +72,15 @@ def test_lens_model_correspondence(self): class TestDoubleChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = DoubleChameleon() names = chameleon.param_names - assert names[0] == 'amp' + assert names[0] == "amp" def test_function(self): """ @@ -87,29 +92,51 @@ def test_function(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - ratio = 2. + ratio = 2.0 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'amp': 1., 'ratio': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2} - - kwargs_1 = {'amp': 1. / (1 + 1. / ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'amp': 1. / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - flux = doublechameleon.function(x=x, y=1., **kwargs_light) - flux1 = chameleon.function(x=x, y=1., **kwargs_1) - flux2 = chameleon.function(x=x, y=1., **kwargs_2) + kwargs_light = { + "amp": 1.0, + "ratio": 2, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + } + + kwargs_1 = { + "amp": 1.0 / (1 + 1.0 / ratio), + "w_c": 0.5, + "w_t": 1.0, + "e1": e1, + "e2": e2, + } + kwargs_2 = { + "amp": 1.0 / (1 + ratio), + "w_c": 0.1, + "w_t": 0.5, + "e1": e1, + "e2": e2, + } + flux = doublechameleon.function(x=x, y=1.0, **kwargs_light) + flux1 = chameleon.function(x=x, y=1.0, **kwargs_1) + flux2 = chameleon.function(x=x, y=1.0, **kwargs_2) npt.assert_almost_equal(flux, flux1 + flux2, decimal=8) class TestTripleChameleon(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass def test_param_name(self): chameleon = DoubleChameleon() names = chameleon.param_names - assert names[0] == 'amp' + assert names[0] == "amp" def test_function(self): """ @@ -121,26 +148,39 @@ def test_function(self): x = np.linspace(0.1, 10, 10) phi_G, q = 0.3, 0.8 - ratio12 = 2. + ratio12 = 2.0 ratio13 = 3 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_light = {'amp': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, - 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2, - 'w_c3': .4, 'w_t3': .8, 'e13': e1, 'e23': e2 - } - - amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13) + kwargs_light = { + "amp": 1.0, + "ratio12": ratio12, + "ratio13": ratio13, + "w_c1": 0.5, + "w_t1": 1.0, + "e11": e1, + "e21": e2, + "w_c2": 0.1, + "w_t2": 0.5, + "e12": e1, + "e22": e2, + "w_c3": 0.4, + "w_t3": 0.8, + "e13": e1, + "e23": e2, + } + + amp1 = 1.0 / (1.0 + 1.0 / ratio12 + 1.0 / ratio13) amp2 = amp1 / ratio12 amp3 = amp1 / ratio13 - kwargs_1 = {'amp': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2} - kwargs_2 = {'amp': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2} - kwargs_3 = {'amp': amp3, 'w_c': .4, 'w_t': .8, 'e1': e1, 'e2': e2} - flux = triplechameleon.function(x=x, y=1., **kwargs_light) - flux1 = chameleon.function(x=x, y=1., **kwargs_1) - flux2 = chameleon.function(x=x, y=1., **kwargs_2) - flux3 = chameleon.function(x=x, y=1., **kwargs_3) + kwargs_1 = {"amp": amp1, "w_c": 0.5, "w_t": 1.0, "e1": e1, "e2": e2} + kwargs_2 = {"amp": amp2, "w_c": 0.1, "w_t": 0.5, "e1": e1, "e2": e2} + kwargs_3 = {"amp": amp3, "w_c": 0.4, "w_t": 0.8, "e1": e1, "e2": e2} + flux = triplechameleon.function(x=x, y=1.0, **kwargs_light) + flux1 = chameleon.function(x=x, y=1.0, **kwargs_1) + flux2 = chameleon.function(x=x, y=1.0, **kwargs_2) + flux3 = chameleon.function(x=x, y=1.0, **kwargs_3) npt.assert_almost_equal(flux, flux1 + flux2 + flux3, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_ellipsoid.py b/test/test_LightModel/test_Profiles/test_ellipsoid.py index 4dbb5822f..89f93dabe 100644 --- a/test/test_LightModel/test_Profiles/test_ellipsoid.py +++ b/test/test_LightModel/test_Profiles/test_ellipsoid.py @@ -1,4 +1,3 @@ - import pytest import numpy as np import lenstronomy.LightModel.Profiles.ellipsoid as torus @@ -6,9 +5,8 @@ class TestTorus(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" + def setup_method(self): pass @@ -17,12 +15,11 @@ def test_function(self): :return: """ - output = torus.function(x=1, y=1, amp=1., sigma=2, center_x=0, center_y=0) + output = torus.function(x=1, y=1, amp=1.0, sigma=2, center_x=0, center_y=0) assert output == 0.079577471545947673 class TestEllipsoid(object): - def setup_method(self): pass @@ -32,11 +29,15 @@ def test_function(self): :return: """ ellipsoid = Ellipsoid() - output = ellipsoid.function(x=1, y=1, amp=1., radius=1, e1=0, e2=0, center_x=0, center_y=0) + output = ellipsoid.function( + x=1, y=1, amp=1.0, radius=1, e1=0, e2=0, center_x=0, center_y=0 + ) assert output == 0 - output = ellipsoid.function(x=0.99, y=0, amp=1., radius=1, e1=0, e2=0, center_x=0, center_y=0) - assert output == 1./np.pi + output = ellipsoid.function( + x=0.99, y=0, amp=1.0, radius=1, e1=0, e2=0, center_x=0, center_y=0 + ) + assert output == 1.0 / np.pi -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_gaussian.py b/test/test_LightModel/test_Profiles/test_gaussian.py index 9a00f5948..5fa44d0d5 100644 --- a/test/test_LightModel/test_Profiles/test_gaussian.py +++ b/test/test_LightModel/test_Profiles/test_gaussian.py @@ -1,15 +1,18 @@ - import pytest import numpy as np import numpy.testing as npt from lenstronomy.Util import util -from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian, MultiGaussianEllipse, GaussianEllipse, Gaussian +from lenstronomy.LightModel.Profiles.gaussian import ( + MultiGaussian, + MultiGaussianEllipse, + GaussianEllipse, + Gaussian, +) class TestMultiGaussian(object): - """ - class to test the Gaussian profile - """ + """Class to test the Gaussian profile.""" + def setup_method(self): pass @@ -19,13 +22,14 @@ def test_function_split(self): :return: """ profile = MultiGaussian() - output = profile.function_split(x=1., y=1., amp=[1., 2], sigma=[1, 2], center_x=0, center_y=0) + output = profile.function_split( + x=1.0, y=1.0, amp=[1.0, 2], sigma=[1, 2], center_x=0, center_y=0 + ) npt.assert_almost_equal(output[0], 0.058549831524319168, decimal=8) npt.assert_almost_equal(output[1], 0.061974997154826489, decimal=8) class TestGaussian(object): - def setup_method(self): pass @@ -43,7 +47,6 @@ def test_total_flux(self): class TestGaussianEllipse(object): - def setup_method(self): pass @@ -54,8 +57,12 @@ def test_function_split(self): """ multiGaussian = MultiGaussian() multiGaussianEllipse = MultiGaussianEllipse() - output = multiGaussian.function_split(x=1., y=1., amp=[1., 2], sigma=[1, 2], center_x=0, center_y=0) - output_2 = multiGaussianEllipse.function_split(x=1., y=1., amp=[1., 2], sigma=[1, 2], e1=0, e2=0, center_x=0, center_y=0) + output = multiGaussian.function_split( + x=1.0, y=1.0, amp=[1.0, 2], sigma=[1, 2], center_x=0, center_y=0 + ) + output_2 = multiGaussianEllipse.function_split( + x=1.0, y=1.0, amp=[1.0, 2], sigma=[1, 2], e1=0, e2=0, center_x=0, center_y=0 + ) npt.assert_almost_equal(output[0], output_2[0], decimal=8) npt.assert_almost_equal(output[1], output_2[1], decimal=8) @@ -72,8 +79,8 @@ def test_light_3d(self): gaussian = Gaussian() sigma = 1 - r = 1. - amp = 1. + r = 1.0 + amp = 1.0 flux_spherical = gaussian.light_3d(r, amp, sigma) flux = gaussianEllipse.light_3d(r, amp, sigma) npt.assert_almost_equal(flux, flux_spherical, decimal=8) @@ -81,11 +88,11 @@ def test_light_3d(self): multiGaussian = MultiGaussian() multiGaussianEllipse = MultiGaussianEllipse() amp = [1, 2] - sigma = [1., 2] + sigma = [1.0, 2] flux_spherical = multiGaussian.light_3d(r, amp, sigma) flux = multiGaussianEllipse.light_3d(r, amp, sigma) npt.assert_almost_equal(flux, flux_spherical, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_interpolation.py b/test/test_LightModel/test_Profiles/test_interpolation.py index 7214630a8..008e864ce 100644 --- a/test/test_LightModel/test_Profiles/test_interpolation.py +++ b/test/test_LightModel/test_Profiles/test_interpolation.py @@ -7,9 +7,8 @@ class TestInterpol(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" + def setup_method(self): pass @@ -19,24 +18,46 @@ def test_function(self): :return: """ for len_x, len_y in [(20, 20), (14, 20)]: - x, y = util.make_grid(numPix=(len_x, len_y), deltapix=1.) + x, y = util.make_grid(numPix=(len_x, len_y), deltapix=1.0) gauss = Gaussian() - flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=1.) + flux = gauss.function(x, y, amp=1.0, center_x=0.0, center_y=0.0, sigma=1.0) image = util.array2image(flux, nx=len_y, ny=len_x) interp = Interpol() - kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 0., 'center_y': 0.} + kwargs_interp = { + "image": image, + "scale": 1.0, + "phi_G": 0.0, + "center_x": 0.0, + "center_y": 0.0, + } output = interp.function(x, y, **kwargs_interp) npt.assert_equal(output, flux) - flux = gauss.function(x-1., y, amp=1., center_x=0., center_y=0., sigma=1.) - kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 1., 'center_y': 0.} + flux = gauss.function( + x - 1.0, y, amp=1.0, center_x=0.0, center_y=0.0, sigma=1.0 + ) + kwargs_interp = { + "image": image, + "scale": 1.0, + "phi_G": 0.0, + "center_x": 1.0, + "center_y": 0.0, + } output = interp.function(x, y, **kwargs_interp) npt.assert_almost_equal(output, flux, decimal=0) - flux = gauss.function(x - 1., y - 1., amp=1, center_x=0., center_y=0., sigma=1.) - kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 1., 'center_y': 1.} + flux = gauss.function( + x - 1.0, y - 1.0, amp=1, center_x=0.0, center_y=0.0, sigma=1.0 + ) + kwargs_interp = { + "image": image, + "scale": 1.0, + "phi_G": 0.0, + "center_x": 1.0, + "center_y": 1.0, + } output = interp.function(x, y, **kwargs_interp) npt.assert_almost_equal(output, flux, decimal=0) @@ -44,16 +65,24 @@ def test_function(self): assert out == 0 # test change of center without re-doing interpolation - out = interp.function(x=0, y=0, image=image, scale=1., phi_G=0, center_x=0, center_y=0) - out_shift = interp.function(x=1, y=0, image=image, scale=1., phi_G=0, center_x=1, center_y=0) + out = interp.function( + x=0, y=0, image=image, scale=1.0, phi_G=0, center_x=0, center_y=0 + ) + out_shift = interp.function( + x=1, y=0, image=image, scale=1.0, phi_G=0, center_x=1, center_y=0 + ) assert out_shift == out # function must give a single value when evaluated at a single point assert isinstance(out, float) # test change of scale without re-doing interpolation - out = interp.function(x=1., y=0, image=image, scale=1., phi_G=0, center_x=0, center_y=0) - out_scaled = interp.function(x=2., y=0, image=image, scale=2, phi_G=0, center_x=0, center_y=0) + out = interp.function( + x=1.0, y=0, image=image, scale=1.0, phi_G=0, center_x=0, center_y=0 + ) + out_scaled = interp.function( + x=2.0, y=0, image=image, scale=2, phi_G=0, center_x=0, center_y=0 + ) assert out_scaled == out def test_flux_normalization(self): @@ -62,11 +91,17 @@ def test_flux_normalization(self): len_x, len_y = 21, 21 x, y = util.make_grid(numPix=(len_x, len_y), deltapix=delta_pix) gauss = Gaussian() - flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=0.3) + flux = gauss.function(x, y, amp=1.0, center_x=0.0, center_y=0.0, sigma=0.3) image = util.array2image(flux, nx=len_y, ny=len_x) flux_total = np.sum(image) - kwargs_interp = {'image': image, 'scale': delta_pix, 'phi_G': 0., 'center_x': 0., 'center_y': 0.} + kwargs_interp = { + "image": image, + "scale": delta_pix, + "phi_G": 0.0, + "center_x": 0.0, + "center_y": 0.0, + } image_interp = interp.function(x, y, **kwargs_interp) flux_interp = np.sum(image_interp) npt.assert_almost_equal(flux_interp, flux_total, decimal=3) @@ -75,23 +110,35 @@ def test_flux_normalization(self): # demands same surface brightness values. We rescale the pixel grid by the same amount as the image scale = 0.5 x, y = util.make_grid(numPix=(len_x, len_y), deltapix=delta_pix * scale) - kwargs_interp = {'image': image, 'scale': delta_pix * scale, 'phi_G': 0., 'center_x': 0., 'center_y': 0.} + kwargs_interp = { + "image": image, + "scale": delta_pix * scale, + "phi_G": 0.0, + "center_x": 0.0, + "center_y": 0.0, + } output = interp.function(x, y, **kwargs_interp) npt.assert_almost_equal(output / image_interp, 1, decimal=5) def test_delete_cache(self): - x, y = util.make_grid(numPix=20, deltapix=1.) + x, y = util.make_grid(numPix=20, deltapix=1.0) gauss = Gaussian() - flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=1.) + flux = gauss.function(x, y, amp=1.0, center_x=0.0, center_y=0.0, sigma=1.0) image = util.array2image(flux) interp = Interpol() - kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 0., 'center_y': 0.} + kwargs_interp = { + "image": image, + "scale": 1.0, + "phi_G": 0.0, + "center_x": 0.0, + "center_y": 0.0, + } output = interp.function(x, y, **kwargs_interp) - assert hasattr(interp, '_image_interp') + assert hasattr(interp, "_image_interp") interp.delete_cache() - assert not hasattr(interp, '_image_interp') + assert not hasattr(interp, "_image_interp") -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_moffat.py b/test/test_LightModel/test_Profiles/test_moffat.py index 5c3dcbb87..e29b1b22c 100644 --- a/test/test_LightModel/test_Profiles/test_moffat.py +++ b/test/test_LightModel/test_Profiles/test_moffat.py @@ -1,12 +1,10 @@ - import pytest from lenstronomy.LightModel.Profiles.moffat import Moffat class TestMoffat(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass @@ -16,9 +14,11 @@ def test_function(self): :return: """ profile = Moffat() - output = profile.function(x=1., y=1., amp=1., alpha=2., beta=1., center_x=0, center_y=0) + output = profile.function( + x=1.0, y=1.0, amp=1.0, alpha=2.0, beta=1.0, center_x=0, center_y=0 + ) assert output == 0.6666666666666666 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_nie.py b/test/test_LightModel/test_Profiles/test_nie.py index fc1c5f76c..8d9104116 100644 --- a/test/test_LightModel/test_Profiles/test_nie.py +++ b/test/test_LightModel/test_Profiles/test_nie.py @@ -1,4 +1,3 @@ - import pytest import numpy.testing as npt import numpy as np @@ -8,9 +7,8 @@ class TestNIE(object): - """ - class to test the NIE profile - """ + """Class to test the NIE profile.""" + def setup_method(self): pass @@ -24,17 +22,17 @@ def test_function(self): x, y = util.make_grid(numPix=100, deltapix=0.1) e1, e2 = 0.2, 0 - s = 1. - kwargs_light = {'amp': 1., 'e1': e1, 'e2': e2, 's_scale': s} - kwargs_lens = {'theta_E': 1., 'e1': e1, 'e2': e2, 's_scale': s} + s = 1.0 + kwargs_light = {"amp": 1.0, "e1": e1, "e2": e2, "s_scale": s} + kwargs_lens = {"theta_E": 1.0, "e1": e1, "e2": e2, "s_scale": s} flux = light.function(x=x, y=y, **kwargs_light) f_xx, f_xy, f_yx, f_yy = lens.hessian(x=x, y=y, **kwargs_lens) - kappa = 1/2. * (f_xx + f_yy) + kappa = 1 / 2.0 * (f_xx + f_yy) - npt.assert_almost_equal(flux/flux[-1], kappa/kappa[-1], decimal=3) + npt.assert_almost_equal(flux / flux[-1], kappa / kappa[-1], decimal=3) # test whether ellipticity changes overall flux normalization - kwargs_light_round = {'amp': 1., 'e1': 0, 'e2': 0, 's_scale': s} + kwargs_light_round = {"amp": 1.0, "e1": 0, "e2": 0, "s_scale": s} x_, y_ = util.points_on_circle(radius=1, num_points=20) f_r = light.function(x=x_, y=y_, **kwargs_light) @@ -44,5 +42,5 @@ def test_function(self): npt.assert_almost_equal(f_r / f_r_round, 1, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_power_law.py b/test/test_LightModel/test_Profiles/test_power_law.py index a9f0593aa..870a7242c 100644 --- a/test/test_LightModel/test_Profiles/test_power_law.py +++ b/test/test_LightModel/test_Profiles/test_power_law.py @@ -1,4 +1,3 @@ - import pytest import numpy as np import numpy.testing as npt @@ -10,9 +9,8 @@ class TestPowerLaw(object): - """ - class to test the Moffat profile - """ + """Class to test the Moffat profile.""" + def setup_method(self): pass @@ -25,37 +23,37 @@ def test_function(self): spp = SPP() sis = SIS() x = np.linspace(0.1, 10, 10) - kwargs_light = {'amp': 1., 'gamma': 2, 'e1': 0, 'e2': 0} - kwargs_spp = {'theta_E': 1., 'gamma': 2} - kwargs_sis = {'theta_E': 1.} - flux = profile.function(x=x, y=1., **kwargs_light) - f_xx, f_xy, f_yx, f_yy = spp.hessian(x=x, y=1., **kwargs_spp) - kappa_spp = 1/2. * (f_xx + f_yy) - f_xx, f_xy, f_yx, f_yy = sis.hessian(x=x, y=1., **kwargs_sis) - kappa_sis = 1 / 2. * (f_xx + f_yy) + kwargs_light = {"amp": 1.0, "gamma": 2, "e1": 0, "e2": 0} + kwargs_spp = {"theta_E": 1.0, "gamma": 2} + kwargs_sis = {"theta_E": 1.0} + flux = profile.function(x=x, y=1.0, **kwargs_light) + f_xx, f_xy, f_yx, f_yy = spp.hessian(x=x, y=1.0, **kwargs_spp) + kappa_spp = 1 / 2.0 * (f_xx + f_yy) + f_xx, f_xy, f_yx, f_yy = sis.hessian(x=x, y=1.0, **kwargs_sis) + kappa_sis = 1 / 2.0 * (f_xx + f_yy) npt.assert_almost_equal(kappa_sis, kappa_spp, decimal=5) - npt.assert_almost_equal(flux/flux[0], kappa_sis/kappa_sis[0], decimal=5) + npt.assert_almost_equal(flux / flux[0], kappa_sis / kappa_sis[0], decimal=5) # test against NIE nie = NIE() e1, e2 = 0.2, -0.1 - kwargs_light = {'amp': 1., 'gamma': 2, 'e1': e1, 'e2': e2} - kwargs_nie = {'theta_E': 1., 'e1': e1, 'e2': e2, 's_scale': 0.00001} - flux = profile.function(x=x, y=1., **kwargs_light) - f_xx, f_xy, f_yx, f_yy = nie.hessian(x=x, y=1., **kwargs_nie) - kappa_nie = 1/2. * (f_xx + f_yy) - npt.assert_almost_equal(flux/flux[0], kappa_nie/kappa_nie[0], decimal=5) + kwargs_light = {"amp": 1.0, "gamma": 2, "e1": e1, "e2": e2} + kwargs_nie = {"theta_E": 1.0, "e1": e1, "e2": e2, "s_scale": 0.00001} + flux = profile.function(x=x, y=1.0, **kwargs_light) + f_xx, f_xy, f_yx, f_yy = nie.hessian(x=x, y=1.0, **kwargs_nie) + kappa_nie = 1 / 2.0 * (f_xx + f_yy) + npt.assert_almost_equal(flux / flux[0], kappa_nie / kappa_nie[0], decimal=5) # test against EPL epl = EPL() e1, e2 = 0.2, -0.1 - kwargs_light = {'amp': 1., 'gamma': 2, 'e1': e1, 'e2': e2} - kwargs_epl = {'theta_E': 1., 'e1': e1, 'e2': e2, 'gamma': 2} - flux = profile.function(x=x, y=1., **kwargs_light) - f_xx, f_xy, f_yx, f_yy = epl.hessian(x=x, y=1., **kwargs_epl) - kappa_epl = 1 / 2. * (f_xx + f_yy) + kwargs_light = {"amp": 1.0, "gamma": 2, "e1": e1, "e2": e2} + kwargs_epl = {"theta_E": 1.0, "e1": e1, "e2": e2, "gamma": 2} + flux = profile.function(x=x, y=1.0, **kwargs_light) + f_xx, f_xy, f_yx, f_yy = epl.hessian(x=x, y=1.0, **kwargs_epl) + kappa_epl = 1 / 2.0 * (f_xx + f_yy) npt.assert_almost_equal(flux / flux[0], kappa_epl / kappa_epl[0], decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_profile_base.py b/test/test_LightModel/test_Profiles/test_profile_base.py index 8e5b60056..d2f2f20d0 100644 --- a/test/test_LightModel/test_Profiles/test_profile_base.py +++ b/test/test_LightModel/test_Profiles/test_profile_base.py @@ -4,7 +4,6 @@ class TestRaise(unittest.TestCase): - def test_raise(self): lighModel = LightProfileBase() with self.assertRaises(ValueError): @@ -13,5 +12,5 @@ def test_raise(self): lighModel.light_3d() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_sersic.py b/test/test_LightModel/test_Profiles/test_sersic.py index 5102564ba..e51817517 100644 --- a/test/test_LightModel/test_Profiles/test_sersic.py +++ b/test/test_LightModel/test_Profiles/test_sersic.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" from lenstronomy.LightModel.Profiles.sersic import Sersic, SersicElliptic, CoreSersic @@ -10,9 +10,8 @@ class TestSersic(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.sersic = Sersic(smoothing=0.02) self.sersic_elliptic = SersicElliptic(smoothing=0.02, sersic_major_axis=True) @@ -26,47 +25,73 @@ def test_sersic(self): n_sersic = 1 center_x = 0 center_y = 0 - values = self.sersic.function(x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y) + values = self.sersic.function( + x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.12658651833626802, decimal=6) x = np.array([0]) y = np.array([0]) - values = self.sersic.function( x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y) - npt.assert_almost_equal(values[0], 5.1482559148107292, decimal=2) - - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.sersic.function( x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y) + values = self.sersic.function( + x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y + ) + npt.assert_almost_equal(values[0], 5.1482559148107292, decimal=2) + + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.sersic.function( + x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.12658651833626802, decimal=6) npt.assert_almost_equal(values[1], 0.026902273598180083, decimal=6) npt.assert_almost_equal(values[2], 0.0053957432862338055, decimal=6) - value = self.sersic.function(1000, 0, I0_sersic, R_sersic, n_sersic, center_x, center_y) + value = self.sersic.function( + 1000, 0, I0_sersic, R_sersic, n_sersic, center_x, center_y + ) npt.assert_almost_equal(value, 0, decimal=8) def test_symmetry_r_sersic(self): - x = np.array([2,3,4]) - y = np.array([1,1,1]) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) I0_sersic = 1 R_sersic1 = 1 R_sersic2 = 0.1 n_sersic = 1 center_x = 0 center_y = 0 - values1 = self.sersic.function(x*R_sersic1, y*R_sersic1, I0_sersic, R_sersic1, n_sersic, center_x, center_y) - values2 = self.sersic.function(x*R_sersic2, y*R_sersic2, I0_sersic, R_sersic2, n_sersic, center_x, center_y) + values1 = self.sersic.function( + x * R_sersic1, + y * R_sersic1, + I0_sersic, + R_sersic1, + n_sersic, + center_x, + center_y, + ) + values2 = self.sersic.function( + x * R_sersic2, + y * R_sersic2, + I0_sersic, + R_sersic2, + n_sersic, + center_x, + center_y, + ) npt.assert_almost_equal(values1[0], values2[0], decimal=6) npt.assert_almost_equal(values1[1], values2[1], decimal=6) npt.assert_almost_equal(values1[2], values2[2], decimal=6) def test_sersic_center(self): x = 0.01 - y = 0. + y = 0.0 I0_sersic = 1 R_sersic = 0.1 - n_sersic = 4. + n_sersic = 4.0 center_x = 0 center_y = 0 - values = self.sersic.function(x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y) + values = self.sersic.function( + x, y, I0_sersic, R_sersic, n_sersic, center_x, center_y + ) npt.assert_almost_equal(values, 12.688073819377406, decimal=6) def test_sersic_elliptic(self): @@ -80,16 +105,22 @@ def test_sersic_elliptic(self): e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) center_x = 0 center_y = 0 - values = self.sersic_elliptic.function(x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y) + values = self.sersic_elliptic.function( + x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.12595366113005077, decimal=6) x = np.array([0]) y = np.array([0]) - values = self.sersic_elliptic.function(x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y) + values = self.sersic_elliptic.function( + x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 5.1482553482055664, decimal=2) - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.sersic_elliptic.function(x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.sersic_elliptic.function( + x, y, I0_sersic, R_sersic, n_sersic, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.11308277793465012, decimal=6) npt.assert_almost_equal(values[1], 0.021188620675507107, decimal=6) npt.assert_almost_equal(values[2], 0.0037276744362724477, decimal=6) @@ -107,16 +138,22 @@ def test_core_sersic(self): e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) center_x = 0 center_y = 0 - values = self.core_sersic.function(x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y) + values = self.core_sersic.function( + x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.10338957116342086, decimal=8) x = np.array([0]) y = np.array([0]) - values = self.core_sersic.function(x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y) + values = self.core_sersic.function( + x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 187852.14004235074, decimal=0) - x = np.array([2,3,4]) - y = np.array([1,1,1]) - values = self.core_sersic.function(x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y) + x = np.array([2, 3, 4]) + y = np.array([1, 1, 1]) + values = self.core_sersic.function( + x, y, I0, Rb, Re, n, gamma, e1, e2, center_x, center_y + ) npt.assert_almost_equal(values[0], 0.09255079955772508, decimal=6) npt.assert_almost_equal(values[1], 0.01767817014938002, decimal=6) npt.assert_almost_equal(values[2], 0.0032541063777438853, decimal=6) @@ -125,29 +162,42 @@ def test_total_flux(self): deltapix = 0.1 x_grid, y_grid = util.make_grid(numPix=400, deltapix=deltapix) r_eff = 1 - I_eff = 1. + I_eff = 1.0 n_sersic = 2 - flux_analytic = self.sersic.total_flux(amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=0, e2=0) - flux_grid = self.sersic.function(x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff) + flux_analytic = self.sersic.total_flux( + amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=0, e2=0 + ) + flux_grid = self.sersic.function( + x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff + ) flux_numeric = np.sum(flux_grid) * deltapix**2 - npt.assert_almost_equal(flux_numeric/flux_analytic, 1, decimal=2) + npt.assert_almost_equal(flux_numeric / flux_analytic, 1, decimal=2) # and here we check with ellipticity e1, e2 = 0.1, 0 sersic_elliptic_major = SersicElliptic(smoothing=0.02, sersic_major_axis=True) - flux_analytic_ell = sersic_elliptic_major.total_flux(amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=e1, e2=e2) - flux_grid = sersic_elliptic_major.function(x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff, e1=e1, e2=e2) - flux_numeric_ell = np.sum(flux_grid) * deltapix ** 2 + flux_analytic_ell = sersic_elliptic_major.total_flux( + amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=e1, e2=e2 + ) + flux_grid = sersic_elliptic_major.function( + x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff, e1=e1, e2=e2 + ) + flux_numeric_ell = np.sum(flux_grid) * deltapix**2 npt.assert_almost_equal(flux_numeric_ell / flux_analytic_ell, 1, decimal=2) e1, e2 = 0.1, 0 - sersic_elliptic_product = SersicElliptic(smoothing=0.02, sersic_major_axis=False) - flux_analytic_ell = sersic_elliptic_product.total_flux(amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=e1, e2=e2) - flux_grid = sersic_elliptic_product.function(x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff, e1=e1, - e2=e2) - flux_numeric_ell = np.sum(flux_grid) * deltapix ** 2 + sersic_elliptic_product = SersicElliptic( + smoothing=0.02, sersic_major_axis=False + ) + flux_analytic_ell = sersic_elliptic_product.total_flux( + amp=I_eff, R_sersic=r_eff, n_sersic=n_sersic, e1=e1, e2=e2 + ) + flux_grid = sersic_elliptic_product.function( + x_grid, y_grid, R_sersic=r_eff, n_sersic=n_sersic, amp=I_eff, e1=e1, e2=e2 + ) + flux_numeric_ell = np.sum(flux_grid) * deltapix**2 npt.assert_almost_equal(flux_numeric_ell / flux_analytic_ell, 1, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_shapelets.py b/test/test_LightModel/test_Profiles/test_shapelets.py index 4afadd23f..e93828622 100644 --- a/test/test_LightModel/test_Profiles/test_shapelets.py +++ b/test/test_LightModel/test_Profiles/test_shapelets.py @@ -7,9 +7,8 @@ class TestShapeletSet(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" + def setup_method(self): self.shapeletSet = ShapeletSet() self.shapelets = Shapelets(precalc=False) @@ -21,29 +20,39 @@ def test_shapelet_set(self): :return: """ n_max = 2 - beta = 1. - amp = [1,0,0,0,0,0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + beta = 1.0 + amp = [1, 0, 0, 0, 0, 0] + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) assert output == 0.20755374871029739 - input = np.array(0.) + input = np.array(0.0) input += output - output = self.shapeletSet.function(self.x, self.y, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + self.x, self.y, amp, n_max, beta, center_x=0, center_y=0 + ) assert output[10] == 0.47957022395315946 - output = self.shapeletSet.function(1, 1, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + 1, 1, amp, n_max, beta, center_x=0, center_y=0 + ) assert output == 0.20755374871029739 n_max = -1 - beta = 1. + beta = 1.0 amp = [1, 0, 0, 0, 0, 0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) assert output == 0 - beta = 1. + beta = 1.0 amp = 1 shapelets = Shapelets(precalc=False, stable_cut=False) - output = shapelets.function(np.array(1), np.array(1), amp, beta, 0, 0, center_x=0, center_y=0) - npt.assert_almost_equal(0.2075537487102974 , output, decimal=8) + output = shapelets.function( + np.array(1), np.array(1), amp, beta, 0, 0, center_x=0, center_y=0 + ) + npt.assert_almost_equal(0.2075537487102974, output, decimal=8) def test_shapelet_basis(self): num_order = 5 @@ -58,23 +67,31 @@ def test_decomposition(self): :return: """ n_max = 2 - beta = 10. + beta = 10.0 deltaPix = 1 - amp = np.array([1,1,1,1,1,1]) + amp = np.array([1, 1, 1, 1, 1, 1]) x, y = util.make_grid(100, deltaPix, 1) - input = self.shapeletSet.function(x, y, amp, n_max, beta, center_x=0, center_y=0) - amp_out = self.shapeletSet.decomposition(input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0) + input = self.shapeletSet.function( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + amp_out = self.shapeletSet.decomposition( + input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0 + ) for i in range(len(amp)): npt.assert_almost_equal(amp_out[i], amp[i], decimal=4) def test_function_split(self): n_max = 2 - beta = 10. + beta = 10.0 deltaPix = 0.1 - amp = np.array([1,1,1,1,1,1]) + amp = np.array([1, 1, 1, 1, 1, 1]) x, y = util.make_grid(10, deltaPix, 1) - function_set = self.shapeletSet.function_split(x, y, amp, n_max, beta, center_x=0, center_y=0) - test_flux = self.shapelets.function(x, y, amp=1., n1=0, n2=0, beta=beta, center_x=0, center_y=0) + function_set = self.shapeletSet.function_split( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + test_flux = self.shapelets.function( + x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0 + ) print(np.shape(function_set)) print(np.shape(test_flux)) assert function_set[0][10] == test_flux[10] @@ -83,18 +100,23 @@ def test_interpolate(self): shapeletsInterp = Shapelets(interpolation=True) x, y = 0.99, 0 beta = 0.5 - flux_full = self.shapelets.function(x, y, amp=1., n1=0, n2=0, beta=beta, center_x=0, center_y=0) - flux_interp = shapeletsInterp.function(x, y, amp=1., n1=0, n2=0, beta=beta, center_x=0, center_y=0) + flux_full = self.shapelets.function( + x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0 + ) + flux_interp = shapeletsInterp.function( + x, y, amp=1.0, n1=0, n2=0, beta=beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(flux_interp, flux_full, decimal=10) def test_hermval(self): x = np.linspace(0, 2000, 2001) n_array = [1, 2, 3, 0, 1] import numpy.polynomial.hermite as hermite + out_true = hermite.hermval(x, n_array) out_approx = self.shapelets.hermval(x, n_array) - shape_true = out_true * np.exp(-x ** 2 / 2.) - shape_approx = out_approx * np.exp(-x ** 2 / 2.) + shape_true = out_true * np.exp(-(x**2) / 2.0) + shape_approx = out_approx * np.exp(-(x**2) / 2.0) npt.assert_almost_equal(shape_approx, shape_true, decimal=6) x = 2 @@ -107,18 +129,17 @@ def test_hermval(self): n_array = [1, 2, 3, 0, 1] out_true = hermite.hermval(x, n_array) out_approx = self.shapelets.hermval(x, n_array) - shape_true = out_true * np.exp(-x**2/2.) - shape_approx = out_approx * np.exp(-x ** 2 / 2.) + shape_true = out_true * np.exp(-(x**2) / 2.0) + shape_approx = out_approx * np.exp(-(x**2) / 2.0) npt.assert_almost_equal(shape_approx, shape_true, decimal=6) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): shapelets = Shapelets() shapelets.pre_calc(1, 1, beta=1, n_order=200, center_x=0, center_y=0) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_shapelets_ellipse.py b/test/test_LightModel/test_Profiles/test_shapelets_ellipse.py index fa17ab452..054f52a39 100644 --- a/test/test_LightModel/test_Profiles/test_shapelets_ellipse.py +++ b/test/test_LightModel/test_Profiles/test_shapelets_ellipse.py @@ -7,7 +7,6 @@ class TestShapeletSetEllipse(object): - def setup(self): self.ellipse = ShapeletSetEllipse() self.spherical = ShapeletSet() @@ -19,11 +18,19 @@ def test_function(self): n_max = 3 num_param = int((n_max + 1) * (n_max + 2) / 2) amp_list = np.ones(num_param) - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=0, center_y=0) - kwargs_spherical = {'amp': amp_list, 'beta': 1, 'n_max': 3, 'center_x': 1, 'center_y': -1} + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=0, center_y=0 + ) + kwargs_spherical = { + "amp": amp_list, + "beta": 1, + "n_max": 3, + "center_x": 1, + "center_y": -1, + } kwargs_ellipse = copy.deepcopy(kwargs_spherical) - kwargs_ellipse['e1'] = e1 - kwargs_ellipse['e2'] = e2 + kwargs_ellipse["e1"] = e1 + kwargs_ellipse["e2"] = e2 flux_ellipse = self.ellipse.function(x, y, **kwargs_ellipse) flux_spherical = self.spherical.function(x_, y_, **kwargs_spherical) @@ -36,11 +43,19 @@ def test_function_split(self): n_max = 3 num_param = int((n_max + 1) * (n_max + 2) / 2) amp_list = np.ones(num_param) - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=0, center_y=0) - kwargs_spherical = {'amp': amp_list, 'beta': 1, 'n_max': 3, 'center_x': 1, 'center_y': -1} + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=0, center_y=0 + ) + kwargs_spherical = { + "amp": amp_list, + "beta": 1, + "n_max": 3, + "center_x": 1, + "center_y": -1, + } kwargs_ellipse = copy.deepcopy(kwargs_spherical) - kwargs_ellipse['e1'] = e1 - kwargs_ellipse['e2'] = e2 + kwargs_ellipse["e1"] = e1 + kwargs_ellipse["e2"] = e2 flux_ellipse = self.ellipse.function_split(x, y, **kwargs_ellipse) flux_spherical = self.spherical.function_split(x_, y_, **kwargs_spherical) diff --git a/test/test_LightModel/test_Profiles/test_shapelets_polar.py b/test/test_LightModel/test_Profiles/test_shapelets_polar.py index 1a8e7a163..9b6a24b56 100644 --- a/test/test_LightModel/test_Profiles/test_shapelets_polar.py +++ b/test/test_LightModel/test_Profiles/test_shapelets_polar.py @@ -3,40 +3,50 @@ import numpy.testing as npt import pytest import unittest -from lenstronomy.LightModel.Profiles.shapelets_polar import ShapeletsPolar, ShapeletSetPolar, ShapeletsPolarExp +from lenstronomy.LightModel.Profiles.shapelets_polar import ( + ShapeletsPolar, + ShapeletSetPolar, + ShapeletsPolarExp, +) class TestShapeletsPolar(object): - def setup_method(self): self.shapelets = ShapeletsPolar() def test_function(self): x, y = util.make_grid(10, 0.1, 1) - amp = 1. - beta = 1. + amp = 1.0 + beta = 1.0 n = 5 m = 0 complex_bool = False - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 36.3296290765419, decimal=6) complex_bool = True - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) n = 5 m = 3 complex_bool = False - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) complex_bool = True - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) def test_index2_poly(self): - index = 0 n, m, complex_bool = self.shapelets.index2poly(index) assert n == 0 @@ -91,36 +101,42 @@ def test__index2n(self): class TestShapeletsPolarExp(object): - def setup_method(self): self.shapelets = ShapeletsPolarExp() def test_function(self): x, y = util.make_grid(10, 0.1, 1) - amp = 1. - beta = 1. + amp = 1.0 + beta = 1.0 n = 2 m = 0 complex_bool = False - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 4.704663416542942, decimal=6) complex_bool = True - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) n = 5 m = 3 complex_bool = False - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) complex_bool = True - flux = self.shapelets.function(x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0) + flux = self.shapelets.function( + x, y, amp, beta, n, m, complex_bool, center_x=0, center_y=0 + ) npt.assert_almost_equal(np.sum(flux), 0, decimal=6) def test_index2_poly(self): - index = 0 n, m, complex_bool = self.shapelets.index2poly(index) assert n == 0 @@ -156,7 +172,7 @@ def test_index2_poly(self): for index in range(0, 20): n, m, complex_bool = self.shapelets.index2poly(index) - print(n, m, complex_bool, 'test') + print(n, m, complex_bool, "test") index_new = self.shapelets.poly2index(n, m, complex_bool) assert index == index_new @@ -176,9 +192,7 @@ def test__index2n(self): class TestShapeletSetPolar(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" def setup_method(self): self.shapeletSet = ShapeletSetPolar() @@ -191,22 +205,30 @@ def test_shapelet_set(self): :return: """ n_max = 2 - beta = 1. + beta = 1.0 amp = [1, 0, 0, 0, 0, 0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output, 0.20755374871029739, decimal=8) - input = np.array(0.) + input = np.array(0.0) input += output - output = self.shapeletSet.function(self.x, self.y, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + self.x, self.y, amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output[10], 0.47957022395315946, decimal=8) - output = self.shapeletSet.function(1, 1, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + 1, 1, amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output, 0.20755374871029739, decimal=8) n_max = -1 - beta = 1. + beta = 1.0 amp = [1, 0, 0, 0, 0, 0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) assert output == 0 def test_decomposition(self): @@ -215,34 +237,48 @@ def test_decomposition(self): :return: """ n_max = 2 - beta = 10. + beta = 10.0 deltaPix = 2 amp = np.array([1, 1, -1, 1, 1, 1]) x, y = util.make_grid(100, deltaPix, 1) - input = self.shapeletSet.function(x, y, amp, n_max, beta, center_x=0, center_y=0) - amp_out = self.shapeletSet.decomposition(input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0) - print(amp_out, 'amp_out') + input = self.shapeletSet.function( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + amp_out = self.shapeletSet.decomposition( + input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0 + ) + print(amp_out, "amp_out") for i in range(len(amp)): - print(i, 'i test') + print(i, "i test") npt.assert_almost_equal(amp_out[i], amp[i], decimal=4) def test_function_split(self): n_max = 2 - beta = 10. + beta = 10.0 deltaPix = 0.1 amp = np.array([1, 1, 1, 1, 1, 1]) x, y = util.make_grid(10, deltaPix, 1) - function_set = self.shapeletSet.function_split(x, y, amp, n_max, beta, center_x=0, center_y=0) - test_flux = self.shapelets.function(x, y, amp=1., n=0, m=0, complex_bool=False, beta=beta, center_x=0, center_y=0) + function_set = self.shapeletSet.function_split( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + test_flux = self.shapelets.function( + x, + y, + amp=1.0, + n=0, + m=0, + complex_bool=False, + beta=beta, + center_x=0, + center_y=0, + ) print(np.shape(function_set)) print(np.shape(test_flux)) assert function_set[0][10] == test_flux[10] class TestShapeletSetPolarExp(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" def setup_method(self): self.shapeletSet = ShapeletSetPolar(exponential=True) @@ -250,57 +286,75 @@ def setup_method(self): self.x, self.y = util.make_grid(10, 0.1, 1) def test_shapelet_set(self): - """ - - #:return: - """ + """#:return:""" n_max = 2 - beta = 1. + beta = 1.0 amp = [1, 0, 0, 0, 0, 0, 0, 0, 0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output, 0.19397908887786985, decimal=8) - input = np.array(0.) + input = np.array(0.0) input += output - output = self.shapeletSet.function(self.x, self.y, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + self.x, self.y, amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output[10], 0.4511844400064266, decimal=8) - output = self.shapeletSet.function(1, 1, amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + 1, 1, amp, n_max, beta, center_x=0, center_y=0 + ) npt.assert_almost_equal(output, 0.19397908887786985, decimal=8) n_max = -1 - beta = 1. + beta = 1.0 amp = [1, 0, 0] - output = self.shapeletSet.function(np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0) + output = self.shapeletSet.function( + np.array(1), np.array(1), amp, n_max, beta, center_x=0, center_y=0 + ) assert output == 0 def test_decomposition(self): - """ - - #:return: - """ + """#:return:""" scale = 10 n_max = 2 - beta = 1. * scale + beta = 1.0 * scale deltaPix = 0.5 * scale amp = np.array([1, 1, -1, 1, 1, 1, 1, 1, 1]) x, y = util.make_grid(1000, deltaPix, 1) - input = self.shapeletSet.function(x, y, amp, n_max, beta, center_x=0, center_y=0) - amp_out = self.shapeletSet.decomposition(input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0) - print(amp_out, 'amp_out') + input = self.shapeletSet.function( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + amp_out = self.shapeletSet.decomposition( + input, x, y, n_max, beta, deltaPix, center_x=0, center_y=0 + ) + print(amp_out, "amp_out") for i in range(len(amp)): print(self.shapeletSet.shapelets.index2poly(i)) for i in range(len(amp)): - print(i, 'i test') + print(i, "i test") npt.assert_almost_equal(amp_out[i], amp[i], decimal=2) def test_function_split(self): n_max = 2 - beta = 10. + beta = 10.0 deltaPix = 0.1 amp = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1]) x, y = util.make_grid(10, deltaPix, 1) - function_set = self.shapeletSet.function_split(x, y, amp, n_max, beta, center_x=0, center_y=0) - test_flux = self.shapelets.function(x, y, amp=1., n=0, m=0, complex_bool=False, beta=beta, center_x=0, center_y=0) + function_set = self.shapeletSet.function_split( + x, y, amp, n_max, beta, center_x=0, center_y=0 + ) + test_flux = self.shapelets.function( + x, + y, + amp=1.0, + n=0, + m=0, + complex_bool=False, + beta=beta, + center_x=0, + center_y=0, + ) print(np.shape(function_set)) print(np.shape(test_flux)) assert function_set[0][10] == test_flux[10] @@ -314,7 +368,6 @@ def test_index2poly(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): shapelets = ShapeletsPolar() @@ -324,5 +377,5 @@ def test_raise(self): shapelets.poly2index(n=2, m=0, complex_bool=True) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_starlets.py b/test/test_LightModel/test_Profiles/test_starlets.py index 53e7a05f2..fbe36fe03 100644 --- a/test/test_LightModel/test_Profiles/test_starlets.py +++ b/test/test_LightModel/test_Profiles/test_starlets.py @@ -9,18 +9,25 @@ from lenstronomy.Util import util -_force_no_pysap = True # if issues on Travis-CI to install pysap, force use python-only functions +_force_no_pysap = ( + True # if issues on Travis-CI to install pysap, force use python-only functions +) class TestSLITStarlets(object): - """ - class to test SLIT_Starlets light profile - """ + """Class to test SLIT_Starlets light profile.""" + def setup_method(self): # different versions of Starlet transforms - self.starlets = SLIT_Starlets(fast_inverse=False, second_gen=False, force_no_pysap=_force_no_pysap) - self.starlets_fast = SLIT_Starlets(fast_inverse=True, second_gen=False, force_no_pysap=_force_no_pysap) - self.starlets_2nd = SLIT_Starlets(second_gen=True, force_no_pysap=_force_no_pysap) + self.starlets = SLIT_Starlets( + fast_inverse=False, second_gen=False, force_no_pysap=_force_no_pysap + ) + self.starlets_fast = SLIT_Starlets( + fast_inverse=True, second_gen=False, force_no_pysap=_force_no_pysap + ) + self.starlets_2nd = SLIT_Starlets( + second_gen=True, force_no_pysap=_force_no_pysap + ) # define a test image with gaussian components self.num_pix = 50 @@ -30,9 +37,15 @@ def setup_method(self): # build a non-trivial positive image from sum of gaussians gaussian = Gaussian() - gaussian1 = gaussian.function(self.x, self.y, amp=100, sigma=1, center_x=-7, center_y=-7) - gaussian2 = gaussian.function(self.x, self.y, amp=500, sigma=3, center_x=-3, center_y=-3) - gaussian3 = gaussian.function(self.x, self.y, amp=2000, sigma=5, center_x=+5, center_y=+5) + gaussian1 = gaussian.function( + self.x, self.y, amp=100, sigma=1, center_x=-7, center_y=-7 + ) + gaussian2 = gaussian.function( + self.x, self.y, amp=500, sigma=3, center_x=-3, center_y=-3 + ) + gaussian3 = gaussian.function( + self.x, self.y, amp=2000, sigma=5, center_x=+5, center_y=+5 + ) self.test_image = util.array2image(gaussian1 + gaussian2 + gaussian3) self.test_coeffs = np.zeros((self.n_scales, self.num_pix, self.num_pix)) @@ -50,12 +63,18 @@ def test_reconstructions_2d(self): self.starlets_fast.decomposition_2d(self.test_image, self.n_scales) self.starlets_2nd.decomposition_2d(self.test_image, self.n_scales) - image = self.starlets.function_2d(coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels) - image_fast = self.starlets_fast.function_2d(coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels) + image = self.starlets.function_2d( + coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels + ) + image_fast = self.starlets_fast.function_2d( + coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels + ) assert image.shape == (self.num_pix, self.num_pix) assert image_fast.shape == (self.num_pix, self.num_pix) - image_2nd = self.starlets_2nd.function_2d(coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels) + image_2nd = self.starlets_2nd.function_2d( + coeffs=self.test_coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels + ) assert image_2nd.shape == (self.num_pix, self.num_pix) assert np.all(image_2nd >= 0) @@ -66,7 +85,9 @@ def test_decompositions_2d(self): """ # test equality between fast and std transform (which are identical) coeffs = self.starlets.decomposition_2d(self.test_image, self.n_scales) - coeffs_fast = self.starlets_fast.decomposition_2d(self.test_image, self.n_scales) + coeffs_fast = self.starlets_fast.decomposition_2d( + self.test_image, self.n_scales + ) assert coeffs.shape == (self.n_scales, self.num_pix, self.num_pix) assert coeffs_fast.shape == (self.n_scales, self.num_pix, self.num_pix) npt.assert_almost_equal(coeffs, coeffs_fast, decimal=3) @@ -88,45 +109,69 @@ def test_function(self): # test with a 1D input self.starlets.decomposition(util.image2array(self.test_image), self.n_scales) - coeffs_1d = self.test_coeffs.reshape(self.n_scales*self.num_pix**2) - - image_1d = self.starlets.function(self.x, self.y, amp=coeffs_1d, - n_scales=self.n_scales, n_pixels=self.n_pixels) + coeffs_1d = self.test_coeffs.reshape(self.n_scales * self.num_pix**2) + + image_1d = self.starlets.function( + self.x, + self.y, + amp=coeffs_1d, + n_scales=self.n_scales, + n_pixels=self.n_pixels, + ) assert image_1d.shape == (self.num_pix**2,) - image_1d_fast = self.starlets_fast.function(self.x, self.y, amp=coeffs_1d, - n_scales=self.n_scales, n_pixels=self.n_pixels) + image_1d_fast = self.starlets_fast.function( + self.x, + self.y, + amp=coeffs_1d, + n_scales=self.n_scales, + n_pixels=self.n_pixels, + ) assert image_1d_fast.shape == (self.num_pix**2,) - image_1d_2nd = self.starlets_2nd.function(self.x, self.y, amp=coeffs_1d, - n_scales=self.n_scales, n_pixels=self.n_pixels) + image_1d_2nd = self.starlets_2nd.function( + self.x, + self.y, + amp=coeffs_1d, + n_scales=self.n_scales, + n_pixels=self.n_pixels, + ) assert image_1d_2nd.shape == (self.num_pix**2,) def test_identity_operations_fast(self): - """ - test the decomposition/reconstruction + """Test the decomposition/reconstruction. :return: """ coeffs = self.starlets_fast.decomposition_2d(self.test_image, self.n_scales) - test_image_recon = self.starlets_fast.function_2d(coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels) + test_image_recon = self.starlets_fast.function_2d( + coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels + ) npt.assert_almost_equal(self.test_image, test_image_recon, decimal=5) def test_identity_operations_2nd(self): - """ - test the decomposition/reconstruction + """Test the decomposition/reconstruction. :return: """ coeffs = self.starlets_2nd.decomposition_2d(self.test_image, self.n_scales) - test_image_recon = self.starlets_2nd.function_2d(coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels) + test_image_recon = self.starlets_2nd.function_2d( + coeffs=coeffs, n_scales=self.n_scales, n_pixels=self.n_pixels + ) npt.assert_almost_equal(self.test_image, test_image_recon, decimal=5) def test_delete_cache(self): - amp = self.test_coeffs.reshape(self.n_scales*self.num_pix**2) - kwargs_starlets = dict(amp=amp, n_scales=self.n_scales, n_pixels=self.n_pixels, center_x=0, center_y=0, scale=1) + amp = self.test_coeffs.reshape(self.n_scales * self.num_pix**2) + kwargs_starlets = dict( + amp=amp, + n_scales=self.n_scales, + n_pixels=self.n_pixels, + center_x=0, + center_y=0, + scale=1, + ) output = self.starlets_fast.function(self.x, self.y, **kwargs_starlets) - assert hasattr(self.starlets_fast.interpol, '_image_interp') + assert hasattr(self.starlets_fast.interpol, "_image_interp") self.starlets_fast.delete_cache() - assert not hasattr(self.starlets_fast.interpol, '_image_interp') + assert not hasattr(self.starlets_fast.interpol, "_image_interp") def test_coeffs2pysap(self): n_scales = 3 @@ -146,6 +191,7 @@ def test_pysap2coeffs(self): for i in range(n_scales): assert pysap_list[i].shape == coeffs[i].shape + class TestRaise(unittest.TestCase): def test_raise(self): with self.assertRaises(ValueError): @@ -156,9 +202,15 @@ def test_raise(self): x, y = util.make_grid(num_pix, 1) # build a non-trivial positive image from sum of gaussians gaussian = Gaussian() - gaussian1 = gaussian.function(x, y, amp=100, sigma=1, center_x=-7, center_y=-7) - gaussian2 = gaussian.function(x, y, amp=500, sigma=3, center_x=-3, center_y=-3) - gaussian3 = gaussian.function(x, y, amp=2000, sigma=5, center_x=+5, center_y=+5) + gaussian1 = gaussian.function( + x, y, amp=100, sigma=1, center_x=-7, center_y=-7 + ) + gaussian2 = gaussian.function( + x, y, amp=500, sigma=3, center_x=-3, center_y=-3 + ) + gaussian3 = gaussian.function( + x, y, amp=2000, sigma=5, center_x=+5, center_y=+5 + ) test_image = util.array2image(gaussian1 + gaussian2 + gaussian3) n_scales = 100 _ = starlets.decomposition_2d(test_image, n_scales) @@ -170,18 +222,33 @@ def test_raise(self): x, y = util.make_grid(num_pix, 1) # build a non-trivial positive image from sum of gaussians gaussian = Gaussian() - gaussian1 = gaussian.function(x, y, amp=100, sigma=1, center_x=-7, center_y=-7) - gaussian2 = gaussian.function(x, y, amp=500, sigma=3, center_x=-3, center_y=-3) - gaussian3 = gaussian.function(x, y, amp=2000, sigma=5, center_x=+5, center_y=+5) + gaussian1 = gaussian.function( + x, y, amp=100, sigma=1, center_x=-7, center_y=-7 + ) + gaussian2 = gaussian.function( + x, y, amp=500, sigma=3, center_x=-3, center_y=-3 + ) + gaussian3 = gaussian.function( + x, y, amp=2000, sigma=5, center_x=+5, center_y=+5 + ) test_image = util.array2image(gaussian1 + gaussian2 + gaussian3) n_scales = -1 _ = starlets.decomposition_2d(test_image, n_scales) with self.assertRaises(ValueError): # function_split is not supported/defined for pixel-based profiles - light_model = LightModel(['SLIT_STARLETS']) + light_model = LightModel(["SLIT_STARLETS"]) num_pix = 20 x, y = util.make_grid(num_pix, 1) - kwargs_list = [{'amp': np.ones((3, num_pix, num_pix)), 'n_scales': 3, 'n_pixels': 20**2, 'center_x': 0, 'center_y': 0, 'scale': 1}] + kwargs_list = [ + { + "amp": np.ones((3, num_pix, num_pix)), + "n_scales": 3, + "n_pixels": 20**2, + "center_x": 0, + "center_y": 0, + "scale": 1, + } + ] _ = light_model.functions_split(x, y, kwargs_list) with self.assertRaises(ValueError): # provided a wrong shape for starlet coefficients @@ -189,7 +256,14 @@ def test_raise(self): num_pix = 20 x, y = util.make_grid(num_pix, 1) coeffs_wrong = np.ones((3, num_pix**2)) - kwargs_list = {'amp': coeffs_wrong, 'n_scales': 3, 'n_pixels': 20**2, 'center_x': 0, 'center_y': 0, 'scale': 1} + kwargs_list = { + "amp": coeffs_wrong, + "n_scales": 3, + "n_pixels": 20**2, + "center_x": 0, + "center_y": 0, + "scale": 1, + } _ = starlet_class.function(x, y, **kwargs_list) image_wrong = np.ones((1, num_pix, num_pix)) _ = starlet_class.decomposition(image_wrong, 3) @@ -200,5 +274,6 @@ def test_raise(self): image_wrong = np.ones((2, num_pix, num_pix)) _ = starlet_class.decomposition(image_wrong, 3) -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_Profiles/test_uniform.py b/test/test_LightModel/test_Profiles/test_uniform.py index ca56c05ad..434e513a9 100644 --- a/test/test_LightModel/test_Profiles/test_uniform.py +++ b/test/test_LightModel/test_Profiles/test_uniform.py @@ -1,12 +1,10 @@ - import pytest from lenstronomy.LightModel.Profiles.uniform import Uniform class TestShapelet(object): - """ - class to test Shapelets - """ + """Class to test Shapelets.""" + def setup_method(self): pass @@ -20,5 +18,5 @@ def test_function(self): assert output == 0.1 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_light3d.py b/test/test_LightModel/test_light3d.py index 624ce352a..98c5e365f 100644 --- a/test/test_LightModel/test_light3d.py +++ b/test/test_LightModel/test_light3d.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy.testing as npt @@ -7,63 +7,98 @@ class TestNumerics(object): - """ - tests the second derivatives of various lens models - """ + """Tests the second derivatives of various lens models.""" + def setup_method(self): pass def assert_integrals(self, Model, kwargs): lightModel = Model() - r = 2. - out = integrate.quad(lambda x: 2 * lightModel.light_3d(np.sqrt(x ** 2 + r ** 2), **kwargs), 0, 100) + r = 2.0 + out = integrate.quad( + lambda x: 2 * lightModel.light_3d(np.sqrt(x**2 + r**2), **kwargs), + 0, + 100, + ) light_2d_num = out[0] light_2d = lightModel.function(r, 0, **kwargs) - npt.assert_almost_equal(light_2d_num/light_2d, 1., decimal=1) + npt.assert_almost_equal(light_2d_num / light_2d, 1.0, decimal=1) def test_PJaffe(self): - kwargs = {'amp': 1., 'Ra': 0.2, 'Rs': 2.} + kwargs = {"amp": 1.0, "Ra": 0.2, "Rs": 2.0} from lenstronomy.LightModel.Profiles.p_jaffe import PJaffe as Model + self.assert_integrals(Model, kwargs) def test_hernquist(self): - kwargs = {'amp': 1., 'Rs': 5.} + kwargs = {"amp": 1.0, "Rs": 5.0} from lenstronomy.LightModel.Profiles.hernquist import Hernquist as Model + self.assert_integrals(Model, kwargs) def test_gaussian(self): from lenstronomy.LightModel.Profiles.gaussian import Gaussian as Model - kwargs = {'amp': 1. / 4., 'sigma': 2.} + + kwargs = {"amp": 1.0 / 4.0, "sigma": 2.0} self.assert_integrals(Model, kwargs) def test_power_law(self): from lenstronomy.LightModel.Profiles.power_law import PowerLaw as Model - kwargs = {'amp': 2, 'gamma': 2, 'e1': 0, 'e2': 0} + + kwargs = {"amp": 2, "gamma": 2, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) def test_nie(self): from lenstronomy.LightModel.Profiles.nie import NIE as Model - kwargs = {'amp': 2, 's_scale': 0.001, 'e1': 0, 'e2': 0} + + kwargs = {"amp": 2, "s_scale": 0.001, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) - kwargs = {'amp': 2, 's_scale': 1., 'e1': 0, 'e2': 0} + kwargs = {"amp": 2, "s_scale": 1.0, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) def test_chameleon(self): from lenstronomy.LightModel.Profiles.chameleon import Chameleon as Model - kwargs = {'amp': 2, 'w_c': 1, 'w_t': 2, 'e1': 0, 'e2': 0} + + kwargs = {"amp": 2, "w_c": 1, "w_t": 2, "e1": 0, "e2": 0} self.assert_integrals(Model, kwargs) from lenstronomy.LightModel.Profiles.chameleon import DoubleChameleon as Model - kwargs = {'amp': 2, 'ratio': 0.4, 'w_c1': 1, 'w_t1': 2, 'e11': 0, 'e21': 0, - 'w_c2': 2, 'w_t2': 3, 'e12': 0, 'e22': 0} + + kwargs = { + "amp": 2, + "ratio": 0.4, + "w_c1": 1, + "w_t1": 2, + "e11": 0, + "e21": 0, + "w_c2": 2, + "w_t2": 3, + "e12": 0, + "e22": 0, + } self.assert_integrals(Model, kwargs) from lenstronomy.LightModel.Profiles.chameleon import TripleChameleon as Model - kwargs = {'amp': 2, 'ratio12': 0.4, 'ratio13': 2, 'w_c1': 1, 'w_t1': 2, 'e11': 0, 'e21': 0, - 'w_c2': 2, 'w_t2': 3, 'e12': 0, 'e22': 0, - 'w_c3': 0.2, 'w_t3': 1, 'e13': 0, 'e23': 0} + + kwargs = { + "amp": 2, + "ratio12": 0.4, + "ratio13": 2, + "w_c1": 1, + "w_t1": 2, + "e11": 0, + "e21": 0, + "w_c2": 2, + "w_t2": 3, + "e12": 0, + "e22": 0, + "w_c3": 0.2, + "w_t3": 1, + "e13": 0, + "e23": 0, + } self.assert_integrals(Model, kwargs) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main("-k TestLensModel") diff --git a/test/test_LightModel/test_light_model.py b/test/test_LightModel/test_light_model.py index 9b3de3ff0..6eb4454ba 100644 --- a/test/test_LightModel/test_light_model.py +++ b/test/test_LightModel/test_light_model.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -11,57 +11,159 @@ class TestLightModel(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" def setup_method(self): - self.light_model_list = ['GAUSSIAN', 'MULTI_GAUSSIAN', 'SERSIC', 'SERSIC_ELLIPSE', - 'CORE_SERSIC', 'SHAPELETS', 'HERNQUIST', - 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', 'UNIFORM', 'POWER_LAW', 'NIE', - 'INTERPOL', 'SHAPELETS_POLAR_EXP', 'ELLIPSOID' - ] + self.light_model_list = [ + "GAUSSIAN", + "MULTI_GAUSSIAN", + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "SHAPELETS", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "UNIFORM", + "POWER_LAW", + "NIE", + "INTERPOL", + "SHAPELETS_POLAR_EXP", + "ELLIPSOID", + ] phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) self.kwargs = [ - {'amp': 1., 'sigma': 1., 'center_x': 0, 'center_y': 0}, # 'GAUSSIAN' - {'amp': [1., 2], 'sigma': [1, 3], 'center_x': 0, 'center_y': 0}, # 'MULTI_GAUSSIAN' - {'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'center_x': 0, 'center_y': 0}, # 'SERSIC' - {'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}, # 'SERSIC_ELLIPSE' - {'amp': 1, 'R_sersic': 0.5, 'Rb': 0.1, 'gamma': 2., 'n_sersic': 1, 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}, + {"amp": 1.0, "sigma": 1.0, "center_x": 0, "center_y": 0}, # 'GAUSSIAN' + { + "amp": [1.0, 2], + "sigma": [1, 3], + "center_x": 0, + "center_y": 0, + }, # 'MULTI_GAUSSIAN' + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC' + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC_ELLIPSE' + { + "amp": 1, + "R_sersic": 0.5, + "Rb": 0.1, + "gamma": 2.0, + "n_sersic": 1, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + }, # 'CORE_SERSIC' - {'amp': [1, 1, 1], 'beta': 0.5, 'n_max': 1, 'center_x': 0, 'center_y': 0}, # 'SHAPELETS' - {'amp': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}, # 'HERNQUIST' - {'amp': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2}, # 'HERNQUIST_ELLIPSE' - {'amp': 1, 'Ra': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}, # 'PJAFFE' - {'amp': 1, 'Ra': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2}, # 'PJAFFE_ELLIPSE' - {'amp': 1}, # 'UNIFORM' - {'amp': 1., 'gamma': 2., 'e1': e1, 'e2': e2, 'center_x': 0, 'center_y': 0}, # 'POWER_LAW' - {'amp': .001, 'e1': 0, 'e2': 1., 'center_x': 0, 'center_y': 0, 's_scale': 1.}, # 'NIE' - {'image': np.zeros((20, 5)), 'scale': 1, 'phi_G': 0, 'center_x': 0, 'center_y': 0}, - {'amp': [1], 'n_max': 0, 'beta': 1, 'center_x': 0, 'center_y': 0}, - {'amp': 1, 'radius': 1., 'e1': 0, 'e2': 0.1, 'center_x': 0, 'center_y': 0} # 'ELLIPSOID' - ] - - self.LightModel = LightModel(light_model_list=self.light_model_list, sersic_major_axis=False) + { + "amp": [1, 1, 1], + "beta": 0.5, + "n_max": 1, + "center_x": 0, + "center_y": 0, + }, # 'SHAPELETS' + {"amp": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}, # 'HERNQUIST' + { + "amp": 1, + "Rs": 0.5, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + }, # 'HERNQUIST_ELLIPSE' + {"amp": 1, "Ra": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}, # 'PJAFFE' + { + "amp": 1, + "Ra": 1, + "Rs": 0.5, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + }, # 'PJAFFE_ELLIPSE' + {"amp": 1}, # 'UNIFORM' + { + "amp": 1.0, + "gamma": 2.0, + "e1": e1, + "e2": e2, + "center_x": 0, + "center_y": 0, + }, # 'POWER_LAW' + { + "amp": 0.001, + "e1": 0, + "e2": 1.0, + "center_x": 0, + "center_y": 0, + "s_scale": 1.0, + }, # 'NIE' + { + "image": np.zeros((20, 5)), + "scale": 1, + "phi_G": 0, + "center_x": 0, + "center_y": 0, + }, + {"amp": [1], "n_max": 0, "beta": 1, "center_x": 0, "center_y": 0}, + { + "amp": 1, + "radius": 1.0, + "e1": 0, + "e2": 0.1, + "center_x": 0, + "center_y": 0, + }, # 'ELLIPSOID' + ] + + self.LightModel = LightModel( + light_model_list=self.light_model_list, sersic_major_axis=False + ) def test_init(self): - model_list = ['CORE_SERSIC', 'SHAPELETS', 'SHAPELETS_ELLIPSE', 'SHAPELETS_POLAR', 'SHAPELETS_POLAR_EXP', - 'UNIFORM', 'CHAMELEON', - 'DOUBLE_CHAMELEON', 'TRIPLE_CHAMELEON'] + model_list = [ + "CORE_SERSIC", + "SHAPELETS", + "SHAPELETS_ELLIPSE", + "SHAPELETS_POLAR", + "SHAPELETS_POLAR_EXP", + "UNIFORM", + "CHAMELEON", + "DOUBLE_CHAMELEON", + "TRIPLE_CHAMELEON", + ] lightModel = LightModel(light_model_list=model_list) assert len(lightModel.profile_type_list) == len(model_list) def test_surface_brightness(self): - output = self.LightModel.surface_brightness(x=1., y=1., kwargs_list=self.kwargs) + output = self.LightModel.surface_brightness( + x=1.0, y=1.0, kwargs_list=self.kwargs + ) npt.assert_almost_equal(output, 2.647127, decimal=6) def test_surface_brightness_array(self): - output = self.LightModel.surface_brightness(x=[1], y=[1], kwargs_list=self.kwargs) + output = self.LightModel.surface_brightness( + x=[1], y=[1], kwargs_list=self.kwargs + ) npt.assert_almost_equal(output[0], 2.647127113888489, decimal=6) def test_functions_split(self): - output = self.LightModel.functions_split(x=1., y=1., kwargs_list=self.kwargs) + output = self.LightModel.functions_split(x=1.0, y=1.0, kwargs_list=self.kwargs) npt.assert_almost_equal(output[0][0], 0.058549831524319168, decimal=6) def test_param_name_list(self): @@ -72,7 +174,6 @@ def test_param_name_list_latex(self): param_name_list = self.LightModel.param_name_list_latex assert len(self.light_model_list) == len(param_name_list) - def test_num_param_linear(self): num = self.LightModel.num_param_linear(self.kwargs, list_return=False) assert num == 19 @@ -83,21 +184,70 @@ def test_num_param_linear(self): def test_update_linear(self): response, n = self.LightModel.functions_split(1, 1, self.kwargs) param = np.ones(n) * 2 - kwargs_out, i = self.LightModel.update_linear(param, i=0, kwargs_list=self.kwargs) + kwargs_out, i = self.LightModel.update_linear( + param, i=0, kwargs_list=self.kwargs + ) assert i == n - assert kwargs_out[0]['amp'] == 2 + assert kwargs_out[0]["amp"] == 2 def test_total_flux(self): - light_model_list = ['SERSIC', 'SERSIC_ELLIPSE', 'INTERPOL', 'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'MULTI_GAUSSIAN', - 'MULTI_GAUSSIAN_ELLIPSE'] - kwargs_list = [{'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'center_x': 0, 'center_y': 0}, # 'SERSIC' - {'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'e1': 0.1, 'e2': 0, 'center_x': 0, 'center_y': 0}, # 'SERSIC_ELLIPSE' - {'image': np.ones((20, 5)), 'scale': 1, 'phi_G': 0, 'center_x': 0, 'center_y': 0}, # 'INTERPOL' - {'amp': 2, 'sigma': 2, 'center_x': 0, 'center_y': 0}, # 'GAUSSIAN' - {'amp': 2, 'sigma': 2, 'e1': 0.1, 'e2': 0, 'center_x': 0, 'center_y': 0}, # 'GAUSSIAN_ELLIPSE' - {'amp': [1,1], 'sigma': [2, 1], 'center_x': 0, 'center_y': 0}, # 'MULTI_GAUSSIAN' - {'amp': [1, 1], 'sigma': [2, 1], 'e1': 0.1, 'e2': 0, 'center_x': 0, 'center_y': 0} # 'MULTI_GAUSSIAN_ELLIPSE' - ] + light_model_list = [ + "SERSIC", + "SERSIC_ELLIPSE", + "INTERPOL", + "GAUSSIAN", + "GAUSSIAN_ELLIPSE", + "MULTI_GAUSSIAN", + "MULTI_GAUSSIAN_ELLIPSE", + ] + kwargs_list = [ + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC' + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "e1": 0.1, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC_ELLIPSE' + { + "image": np.ones((20, 5)), + "scale": 1, + "phi_G": 0, + "center_x": 0, + "center_y": 0, + }, # 'INTERPOL' + {"amp": 2, "sigma": 2, "center_x": 0, "center_y": 0}, # 'GAUSSIAN' + { + "amp": 2, + "sigma": 2, + "e1": 0.1, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, # 'GAUSSIAN_ELLIPSE' + { + "amp": [1, 1], + "sigma": [2, 1], + "center_x": 0, + "center_y": 0, + }, # 'MULTI_GAUSSIAN' + { + "amp": [1, 1], + "sigma": [2, 1], + "e1": 0.1, + "e2": 0, + "center_x": 0, + "center_y": 0, + }, # 'MULTI_GAUSSIAN_ELLIPSE' + ] lightModel = LightModel(light_model_list=light_model_list) total_flux_list = lightModel.total_flux(kwargs_list) assert total_flux_list[2] == 100 @@ -114,60 +264,59 @@ def test_total_flux(self): assert total_flux_list[6] == 2 def test_delete_interpol_caches(self): - x, y = util.make_grid(numPix=20, deltapix=1.) + x, y = util.make_grid(numPix=20, deltapix=1.0) gauss = Gaussian() - flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=1.) + flux = gauss.function(x, y, amp=1.0, center_x=0.0, center_y=0.0, sigma=1.0) image = util.array2image(flux) - light_model_list = ['INTERPOL', 'INTERPOL'] + light_model_list = ["INTERPOL", "INTERPOL"] kwargs_list = [ - {'image': image, 'scale': 1, 'phi_G': 0, 'center_x': 0, 'center_y': 0}, - {'image': image, 'scale': 1, 'phi_G': 0, 'center_x': 0, 'center_y': 0} + {"image": image, "scale": 1, "phi_G": 0, "center_x": 0, "center_y": 0}, + {"image": image, "scale": 1, "phi_G": 0, "center_x": 0, "center_y": 0}, ] lightModel = LightModel(light_model_list=light_model_list) output = lightModel.surface_brightness(x, y, kwargs_list) for func in lightModel.func_list: - assert hasattr(func, '_image_interp') + assert hasattr(func, "_image_interp") lightModel.delete_interpol_caches() for func in lightModel.func_list: - assert not hasattr(func, '_image_interp') + assert not hasattr(func, "_image_interp") def test_check_positive_flux_profile(self): - ligthModel = LightModel(light_model_list=['GAUSSIAN']) - kwargs_list = [{'amp': 0, 'sigma': 1}] + ligthModel = LightModel(light_model_list=["GAUSSIAN"]) + kwargs_list = [{"amp": 0, "sigma": 1}] bool = ligthModel.check_positive_flux_profile(kwargs_list) assert bool - kwargs_list = [{'amp': -1, 'sigma': 1}] + kwargs_list = [{"amp": -1, "sigma": 1}] bool = ligthModel.check_positive_flux_profile(kwargs_list) assert not bool class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['WRONG']) + lighModel = LightModel(light_model_list=["WRONG"]) with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['UNIFORM']) - lighModel.light_3d(r=1, kwargs_list=[{'amp': 1}]) + lighModel = LightModel(light_model_list=["UNIFORM"]) + lighModel.light_3d(r=1, kwargs_list=[{"amp": 1}]) with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['UNIFORM']) - lighModel.profile_type_list = ['WRONG'] + lighModel = LightModel(light_model_list=["UNIFORM"]) + lighModel.profile_type_list = ["WRONG"] lighModel.functions_split(x=0, y=0, kwargs_list=[{}]) with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['UNIFORM']) - lighModel.profile_type_list = ['WRONG'] + lighModel = LightModel(light_model_list=["UNIFORM"]) + lighModel.profile_type_list = ["WRONG"] lighModel.num_param_linear(kwargs_list=[{}]) with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['UNIFORM']) - lighModel.profile_type_list = ['WRONG'] + lighModel = LightModel(light_model_list=["UNIFORM"]) + lighModel.profile_type_list = ["WRONG"] lighModel.update_linear(param=[1], i=0, kwargs_list=[{}]) with self.assertRaises(ValueError): - lighModel = LightModel(light_model_list=['UNIFORM']) - lighModel.profile_type_list = ['WRONG'] + lighModel = LightModel(light_model_list=["UNIFORM"]) + lighModel.profile_type_list = ["WRONG"] lighModel.total_flux(kwargs_list=[{}]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_light_param.py b/test/test_LightModel/test_light_param.py index 710e581aa..08c60f693 100644 --- a/test/test_LightModel/test_light_param.py +++ b/test/test_LightModel/test_light_param.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -8,30 +8,108 @@ class TestParam(object): - def setup_method(self): - self.light_model_list = ['GAUSSIAN', 'MULTI_GAUSSIAN', 'SERSIC', 'SERSIC_ELLIPSE', - 'CORE_SERSIC', 'SHAPELETS', 'HERNQUIST', - 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE', 'UNIFORM', - 'SHAPELETS', 'SHAPELETS_POLAR_EXP', - 'SLIT_STARLETS', - ] + self.light_model_list = [ + "GAUSSIAN", + "MULTI_GAUSSIAN", + "SERSIC", + "SERSIC_ELLIPSE", + "CORE_SERSIC", + "SHAPELETS", + "HERNQUIST", + "HERNQUIST_ELLIPSE", + "PJAFFE", + "PJAFFE_ELLIPSE", + "UNIFORM", + "SHAPELETS", + "SHAPELETS_POLAR_EXP", + "SLIT_STARLETS", + ] self.kwargs = [ - {'amp': 1., 'sigma': 1, 'center_x': 0, 'center_y': 0}, # 'GAUSSIAN' - {'amp': [1., 2], 'sigma': [1, 3], 'center_x': 0, 'center_y': 0}, # 'MULTI_GAUSSIAN' - {'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'center_x': 0, 'center_y': 0}, # 'SERSIC' - {'amp': 1, 'R_sersic': 0.5, 'n_sersic': 1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0, 'center_y': 0}, # 'SERSIC_ELLIPSE' - {'amp': 1, 'R_sersic': 0.5, 'Rb': 0.1, 'gamma': 2., 'n_sersic': 1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0, 'center_y': 0}, + {"amp": 1.0, "sigma": 1, "center_x": 0, "center_y": 0}, # 'GAUSSIAN' + { + "amp": [1.0, 2], + "sigma": [1, 3], + "center_x": 0, + "center_y": 0, + }, # 'MULTI_GAUSSIAN' + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC' + { + "amp": 1, + "R_sersic": 0.5, + "n_sersic": 1, + "e1": 0.1, + "e2": 0.1, + "center_x": 0, + "center_y": 0, + }, # 'SERSIC_ELLIPSE' + { + "amp": 1, + "R_sersic": 0.5, + "Rb": 0.1, + "gamma": 2.0, + "n_sersic": 1, + "e1": 0.1, + "e2": 0.1, + "center_x": 0, + "center_y": 0, + }, # 'CORE_SERSIC' - {'amp': [1, 1, 1], 'beta': 0.5, 'n_max': 1, 'center_x': 0, 'center_y': 0}, # 'SHAPELETS' - {'amp': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}, # 'HERNQUIST' - {'amp': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1}, # 'HERNQUIST_ELLIPSE' - {'amp': 1, 'Ra': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0}, # 'PJAFFE' - {'amp': 1, 'Ra': 1, 'Rs': 0.5, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1}, # 'PJAFFE_ELLIPSE' - {'amp': 1}, # 'UNIFORM' - {'amp': [1], 'beta': 1, 'n_max': 0, 'center_x': 0, 'center_y': 0}, # 'SHAPELETS' - {'amp': [1], 'beta': 1, 'n_max': 0, 'center_x': 0, 'center_y': 0}, # 'SHAPELETS_POLAR_EXP' - {'amp': np.ones((3*20**2,)), 'n_scales': 3, 'n_pixels': 20**2, 'scale': 0.05, 'center_x': 0, 'center_y': 0}, # 'SLIT_STARLETS' + { + "amp": [1, 1, 1], + "beta": 0.5, + "n_max": 1, + "center_x": 0, + "center_y": 0, + }, # 'SHAPELETS' + {"amp": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}, # 'HERNQUIST' + { + "amp": 1, + "Rs": 0.5, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + }, # 'HERNQUIST_ELLIPSE' + {"amp": 1, "Ra": 1, "Rs": 0.5, "center_x": 0, "center_y": 0}, # 'PJAFFE' + { + "amp": 1, + "Ra": 1, + "Rs": 0.5, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + }, # 'PJAFFE_ELLIPSE' + {"amp": 1}, # 'UNIFORM' + { + "amp": [1], + "beta": 1, + "n_max": 0, + "center_x": 0, + "center_y": 0, + }, # 'SHAPELETS' + { + "amp": [1], + "beta": 1, + "n_max": 0, + "center_x": 0, + "center_y": 0, + }, # 'SHAPELETS_POLAR_EXP' + { + "amp": np.ones((3 * 20**2,)), + "n_scales": 3, + "n_pixels": 20**2, + "scale": 0.05, + "center_x": 0, + "center_y": 0, + }, # 'SLIT_STARLETS' ] # self.kwargs_sigma = [ # {'amp_sigma': 1., 'sigma_sigma': 1, 'center_x_sigma': 0, 'center_y_sigma': 0}, @@ -51,24 +129,72 @@ def setup_method(self): # {'amp_sigma': 0.1}, # 'UNIFORM' # ] self.kwargs_fixed = [ - {}, {'sigma': [1, 3]}, {}, {}, {}, {'n_max': 1}, {}, {}, {}, {}, {}, {'n_max': 0}, {'n_max': 0}, - {'n_scales': 3, 'n_pixels': 20**2, 'scale': 0.05, 'center_x': 0, 'center_y': 0}, + {}, + {"sigma": [1, 3]}, + {}, + {}, + {}, + {"n_max": 1}, + {}, + {}, + {}, + {}, + {}, + {"n_max": 0}, + {"n_max": 0}, + { + "n_scales": 3, + "n_pixels": 20**2, + "scale": 0.05, + "center_x": 0, + "center_y": 0, + }, ] self.kwargs_fixed_linear = [ - {}, {'sigma': [1, 3]}, {}, {}, {}, {'n_max': 1}, {}, {}, {}, {}, {}, {}, {}, - {'n_scales': 3, 'n_pixels': 20**2, 'scale': 0.05, 'center_x': 0, 'center_y': 0}, + {}, + {"sigma": [1, 3]}, + {}, + {}, + {}, + {"n_max": 1}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + { + "n_scales": 3, + "n_pixels": 20**2, + "scale": 0.05, + "center_x": 0, + "center_y": 0, + }, ] # self.kwargs_mean = [] # for i in range(len(self.light_model_list)): # kwargs_mean_k = self.kwargs[i].copy() # #kwargs_mean_k.update(self.kwargs_sigma[i]) # self.kwargs_mean.append(kwargs_mean_k) - self.param = LightParam(light_model_list=self.light_model_list, - kwargs_fixed=self.kwargs_fixed, param_type='source_light', linear_solver=False) - self.param_linear = LightParam(light_model_list=self.light_model_list, - kwargs_fixed=self.kwargs_fixed_linear, param_type='source_light', linear_solver=True) - self.param_fixed = LightParam(light_model_list=self.light_model_list, - kwargs_fixed=self.kwargs, param_type='source_light', linear_solver=False) + self.param = LightParam( + light_model_list=self.light_model_list, + kwargs_fixed=self.kwargs_fixed, + param_type="source_light", + linear_solver=False, + ) + self.param_linear = LightParam( + light_model_list=self.light_model_list, + kwargs_fixed=self.kwargs_fixed_linear, + param_type="source_light", + linear_solver=True, + ) + self.param_fixed = LightParam( + light_model_list=self.light_model_list, + kwargs_fixed=self.kwargs, + param_type="source_light", + linear_solver=False, + ) def test_get_setParams(self): args = self.param.set_params(self.kwargs) @@ -91,78 +217,137 @@ def test_get_setParams(self): def test_num_params(self): num, list = self.param.num_param() - assert num == (66+1200) + assert num == (66 + 1200) def test_param_name_list(self): param_name_list = self.param.param_name_list - assert param_name_list[0][0] == 'amp' + assert param_name_list[0][0] == "amp" def test_num_param_linear(self): - kwargs_fixed = [ - {}, {'sigma': [1, 3]}, {}, {}, {}, {'n_max': 1}, {}, {}, - {}, {}, {}, {'n_max': 0}, {'n_max': 0}, - {'n_scales': 3, 'n_pixels': 20**2, 'scale': 0.05, 'center_x': 0, 'center_y': 0}, + {}, + {"sigma": [1, 3]}, + {}, + {}, + {}, + {"n_max": 1}, + {}, + {}, + {}, + {}, + {}, + {"n_max": 0}, + {"n_max": 0}, + { + "n_scales": 3, + "n_pixels": 20**2, + "scale": 0.05, + "center_x": 0, + "center_y": 0, + }, ] - param = LightParam(light_model_list=self.light_model_list, - kwargs_fixed=kwargs_fixed, param_type='source_light', linear_solver=True) + param = LightParam( + light_model_list=self.light_model_list, + kwargs_fixed=kwargs_fixed, + param_type="source_light", + linear_solver=True, + ) num = param.num_param_linear() - assert num == (16+1200) + assert num == (16 + 1200) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['WRONG'], kwargs_fixed=[{}]) + lighModel = LightParam(light_model_list=["WRONG"], kwargs_fixed=[{}]) with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['MULTI_GAUSSIAN'], kwargs_fixed=[{}]) - lighModel.set_params(kwargs_list=[{'amp': 1, 'sigma': 1}]) + lighModel = LightParam( + light_model_list=["MULTI_GAUSSIAN"], kwargs_fixed=[{}] + ) + lighModel.set_params(kwargs_list=[{"amp": 1, "sigma": 1}]) with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['SHAPELETS'], kwargs_fixed=[{}], linear_solver=False) + lighModel = LightParam( + light_model_list=["SHAPELETS"], kwargs_fixed=[{}], linear_solver=False + ) lighModel.num_param() with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['SHAPELETS'], kwargs_fixed=[{}], linear_solver=False) + lighModel = LightParam( + light_model_list=["SHAPELETS"], kwargs_fixed=[{}], linear_solver=False + ) lighModel.get_params(args=[], i=0) with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['MULTI_GAUSSIAN'], kwargs_fixed=[{}], linear_solver=False) + lighModel = LightParam( + light_model_list=["MULTI_GAUSSIAN"], + kwargs_fixed=[{}], + linear_solver=False, + ) lighModel.get_params(args=[1, 1, 1, 1], i=0) with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{}], linear_solver=False) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{}], + linear_solver=False, + ) lighModel.get_params(args=[1], i=0) with self.assertRaises(ValueError): - # no fixed params provided - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{}], linear_solver=False) - lighModel.set_params(kwargs_list=[{'amp': np.ones((3 * 20 ** 2))}]) + # no fixed params provided + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{}], + linear_solver=False, + ) + lighModel.set_params(kwargs_list=[{"amp": np.ones((3 * 20**2))}]) with self.assertRaises(ValueError): # missing fixed params - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{'n_scales': 3}], linear_solver=False) - lighModel.set_params(kwargs_list=[{'amp': np.ones((3 * 20 ** 2))}]) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{"n_scales": 3}], + linear_solver=False, + ) + lighModel.set_params(kwargs_list=[{"amp": np.ones((3 * 20**2))}]) with self.assertRaises(ValueError): # missing fixed params - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{'n_scales': 3}], linear_solver=False) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{"n_scales": 3}], + linear_solver=False, + ) lighModel.num_param() with self.assertRaises(ValueError): # missing fixed params - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{'amp': np.ones((3*20**2))}], - linear_solver=False) - lighModel.set_params([{'n_scales': 3}]) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{"amp": np.ones((3 * 20**2))}], + linear_solver=False, + ) + lighModel.set_params([{"n_scales": 3}]) with self.assertRaises(ValueError): # missing fixed params 'n_pixels' - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{}], linear_solver=False) - lighModel.set_params([{'n_scales': 3}]) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{}], + linear_solver=False, + ) + lighModel.set_params([{"n_scales": 3}]) # missing fixed params 'n_scales' - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{}], linear_solver=False) - lighModel.set_params([{'n_pixels': 3}]) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{}], + linear_solver=False, + ) + lighModel.set_params([{"n_pixels": 3}]) with self.assertRaises(ValueError): - lighModel = LightParam(light_model_list=['SLIT_STARLETS'], kwargs_fixed=[{'amp': np.ones((3 * 20 ** 2))}], - linear_solver=False) - lighModel.set_params([{'n_scales': 3}]) + lighModel = LightParam( + light_model_list=["SLIT_STARLETS"], + kwargs_fixed=[{"amp": np.ones((3 * 20**2))}], + linear_solver=False, + ) + lighModel.set_params([{"n_scales": 3}]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_LightModel/test_linear_basis.py b/test/test_LightModel/test_linear_basis.py index 654d15298..06697b90e 100644 --- a/test/test_LightModel/test_linear_basis.py +++ b/test/test_LightModel/test_linear_basis.py @@ -1,15 +1,13 @@ - from lenstronomy.LightModel.linear_basis import LinearBasis class TestLinearBasis(object): - def setup_method(self): pass def test_linear_param_from_kwargs(self): - linear_basis = LinearBasis(light_model_list=['UNIFORM', 'UNIFORM']) - kwargs_list = [{'amp': 0.5}, {'amp': -1}] + linear_basis = LinearBasis(light_model_list=["UNIFORM", "UNIFORM"]) + kwargs_list = [{"amp": 0.5}, {"amp": -1}] param = linear_basis.linear_param_from_kwargs(kwargs_list) - assert param[0] == kwargs_list[0]['amp'] - assert param[1] == kwargs_list[1]['amp'] \ No newline at end of file + assert param[0] == kwargs_list[0]["amp"] + assert param[1] == kwargs_list[1]["amp"] diff --git a/test/test_Plots/test_chain_plot.py b/test/test_Plots/test_chain_plot.py index a078fe3b1..b4e4f4a16 100644 --- a/test/test_Plots/test_chain_plot.py +++ b/test/test_Plots/test_chain_plot.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import lenstronomy.Util.simulation_util as sim_util @@ -16,27 +16,35 @@ import unittest import matplotlib -matplotlib.use('agg') + +matplotlib.use("agg") import matplotlib.pyplot as plt import numpy as np class TestChainPlots(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" + def setup_method(self): # data specifics deltaPix = 0.5 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.5 # full width half max of PSF - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_gaussian = PSF(**kwargs_psf_gaussian) - self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf_gaussian.kernel_point_source} + self.kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": psf_gaussian.kernel_point_source, + } def test_psf_iteration_compare(self): kwargs_psf = self.kwargs_psf - kwargs_psf['kernel_point_source_init'] = kwargs_psf['kernel_point_source'] - kwargs_psf['psf_error_map'] = np.ones_like(kwargs_psf['kernel_point_source']) + kwargs_psf["kernel_point_source_init"] = kwargs_psf["kernel_point_source"] + kwargs_psf["psf_error_map"] = np.ones_like(kwargs_psf["kernel_point_source"]) f, ax = chain_plot.psf_iteration_compare(kwargs_psf=kwargs_psf, vmin=-1, vmax=1) plt.close() f, ax = chain_plot.psf_iteration_compare(kwargs_psf=kwargs_psf) @@ -46,21 +54,23 @@ def test_plot_chain(self): X2_list = [1, 1, 2] pos_list = [[1, 0], [2, 0], [3, 0]] vel_list = [[-1, 0], [0, 0], [1, 0]] - param_list = ['test1', 'test2'] + param_list = ["test1", "test2"] chain = X2_list, pos_list, vel_list chain_plot.plot_chain(chain=chain, param_list=param_list) plt.close() def test_plot_mcmc_behaviour(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - param_mcmc = ['a', 'b'] + param_mcmc = ["a", "b"] samples_mcmc = np.random.random((10, 1000)) dist_mcmc = np.random.random(1000) - chain_plot.plot_mcmc_behaviour(ax, samples_mcmc, param_mcmc, dist_mcmc, num_average=10) + chain_plot.plot_mcmc_behaviour( + ax, samples_mcmc, param_mcmc, dist_mcmc, num_average=10 + ) plt.close() def test_chain_list(self): - param = ['a', 'b'] + param = ["a", "b"] X2_list = [1, 1, 2] pos_list = [[1, 0], [2, 0], [3, 0]] @@ -70,10 +80,11 @@ def test_chain_list(self): samples_mcmc = np.random.random((10, 1000)) dist_mcmc = np.random.random(1000) - chain_list = [['PSO', chain, param], - ['EMCEE', samples_mcmc, param, dist_mcmc], - ['MULTINEST', samples_mcmc, param, dist_mcmc] - ] + chain_list = [ + ["PSO", chain, param], + ["EMCEE", samples_mcmc, param, dist_mcmc], + ["MULTINEST", samples_mcmc, param, dist_mcmc], + ] chain_plot.plot_chain_list(chain_list, index=0) plt.close() @@ -84,11 +95,10 @@ def test_chain_list(self): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - chain_plot.plot_chain_list(chain_list=[['WRONG']], index=0) + chain_plot.plot_chain_list(chain_list=[["WRONG"]], index=0) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Plots/test_lens_plot.py b/test/test_Plots/test_lens_plot.py index 8137487f1..346d96e50 100644 --- a/test/test_Plots/test_lens_plot.py +++ b/test/test_Plots/test_lens_plot.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest from lenstronomy.LensModel.lens_model import LensModel @@ -6,83 +6,170 @@ import lenstronomy.Plots.model_plot as output_plots import matplotlib -matplotlib.use('agg') + +matplotlib.use("agg") import matplotlib.pyplot as plt class TestLensPlot(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" + def setup_method(self): pass def test_lens_model_plot(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['SIS']) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - lens_plot.lens_model_plot(ax, lensModel, kwargs_lens, numPix=10, deltaPix=0.5, sourcePos_x=0, sourcePos_y=0, - point_source=True, with_caustics=True, fast_caustic=False) + lensModel = LensModel(lens_model_list=["SIS"]) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + lens_plot.lens_model_plot( + ax, + lensModel, + kwargs_lens, + numPix=10, + deltaPix=0.5, + sourcePos_x=0, + sourcePos_y=0, + point_source=True, + with_caustics=True, + fast_caustic=False, + ) plt.close() - lens_plot.lens_model_plot(ax, lensModel, kwargs_lens, numPix=10, deltaPix=0.5, sourcePos_x=0, sourcePos_y=0, - point_source=True, with_caustics=True, fast_caustic=True) + lens_plot.lens_model_plot( + ax, + lensModel, + kwargs_lens, + numPix=10, + deltaPix=0.5, + sourcePos_x=0, + sourcePos_y=0, + point_source=True, + with_caustics=True, + fast_caustic=True, + ) plt.close() - lens_plot.lens_model_plot(ax, lensModel, kwargs_lens, numPix=10, deltaPix=0.5, sourcePos_x=0, sourcePos_y=0, - point_source=True, with_caustics=True, fast_caustic=True, coord_inverse=True) + lens_plot.lens_model_plot( + ax, + lensModel, + kwargs_lens, + numPix=10, + deltaPix=0.5, + sourcePos_x=0, + sourcePos_y=0, + point_source=True, + with_caustics=True, + fast_caustic=True, + coord_inverse=True, + ) plt.close() def test_arrival_time_surface(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['SIS']) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - lens_plot.arrival_time_surface(ax, lensModel, kwargs_lens, numPix=100, deltaPix=0.05, sourcePos_x=0.02, - sourcePos_y=0, point_source=True, with_caustics=True, - image_color_list=['k', 'k', 'k', 'r']) + lensModel = LensModel(lens_model_list=["SIS"]) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + lens_plot.arrival_time_surface( + ax, + lensModel, + kwargs_lens, + numPix=100, + deltaPix=0.05, + sourcePos_x=0.02, + sourcePos_y=0, + point_source=True, + with_caustics=True, + image_color_list=["k", "k", "k", "r"], + ) plt.close() - lens_plot.arrival_time_surface(ax, lensModel, kwargs_lens, numPix=100, deltaPix=0.05, sourcePos_x=0.02, - sourcePos_y=0, point_source=True, with_caustics=False, - image_color_list=None) + lens_plot.arrival_time_surface( + ax, + lensModel, + kwargs_lens, + numPix=100, + deltaPix=0.05, + sourcePos_x=0.02, + sourcePos_y=0, + point_source=True, + with_caustics=False, + image_color_list=None, + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['SIS']) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - lens_plot.arrival_time_surface(ax, lensModel, kwargs_lens, numPix=100, deltaPix=0.05, sourcePos_x=0.02, - sourcePos_y=0, - point_source=False, with_caustics=False) + lensModel = LensModel(lens_model_list=["SIS"]) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + lens_plot.arrival_time_surface( + ax, + lensModel, + kwargs_lens, + numPix=100, + deltaPix=0.05, + sourcePos_x=0.02, + sourcePos_y=0, + point_source=False, + with_caustics=False, + ) plt.close() def test_distortions(self): - lensModel = LensModel(lens_model_list=['SIS']) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - lens_plot.distortions(lensModel, kwargs_lens, num_pix=10, delta_pix=0.2, center_ra=0, center_dec=0, differential_scale=0.0001) + lensModel = LensModel(lens_model_list=["SIS"]) + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + lens_plot.distortions( + lensModel, + kwargs_lens, + num_pix=10, + delta_pix=0.2, + center_ra=0, + center_dec=0, + differential_scale=0.0001, + ) plt.close() - lens_plot.distortions(lensModel, kwargs_lens, num_pix=10, delta_pix=0.2, center_ra=0, center_dec=0, - differential_scale=0.0001, smoothing_scale=0.1) + lens_plot.distortions( + lensModel, + kwargs_lens, + num_pix=10, + delta_pix=0.2, + center_ra=0, + center_dec=0, + differential_scale=0.0001, + smoothing_scale=0.1, + ) plt.close() def test_curved_arc_illustration(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['CURVED_ARC_SIS_MST']) - kwargs_lens = [{'radial_stretch': 1.0466690706465702, 'tangential_stretch': 4.598552192305616, 'curvature': 0.8116297351731543, 'direction': 2.6288852083221323, 'center_x': -1.200866007937402, 'center_y': 0.6829881436542166}] + lensModel = LensModel(lens_model_list=["CURVED_ARC_SIS_MST"]) + kwargs_lens = [ + { + "radial_stretch": 1.0466690706465702, + "tangential_stretch": 4.598552192305616, + "curvature": 0.8116297351731543, + "direction": 2.6288852083221323, + "center_x": -1.200866007937402, + "center_y": 0.6829881436542166, + } + ] lens_plot.curved_arc_illustration(ax, lensModel, kwargs_lens) plt.close() def test_stretch_plot(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['SIE']) - kwargs_lens = [{'theta_E': 1, 'e1': 0.2, 'e2': 0., 'center_x': 0, 'center_y': 0}] + lensModel = LensModel(lens_model_list=["SIE"]) + kwargs_lens = [ + {"theta_E": 1, "e1": 0.2, "e2": 0.0, "center_x": 0, "center_y": 0} + ] lens_plot.stretch_plot(ax, lensModel, kwargs_lens) plt.close() def test_shear_plot(self): f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensModel = LensModel(lens_model_list=['SIE']) - kwargs_lens = [{'theta_E': 1, 'e1': 0.2, 'e2': 0., 'center_x': 0, 'center_y': 0}] + lensModel = LensModel(lens_model_list=["SIE"]) + kwargs_lens = [ + {"theta_E": 1, "e1": 0.2, "e2": 0.0, "center_x": 0, "center_y": 0} + ] lens_plot.shear_plot(ax, lensModel, kwargs_lens) plt.close() -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Plots/test_model_plot.py b/test/test_Plots/test_model_plot.py index d77eb8a32..ab303fcfd 100644 --- a/test/test_Plots/test_model_plot.py +++ b/test/test_Plots/test_model_plot.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -14,17 +14,16 @@ from lenstronomy.Data.psf import PSF import matplotlib -matplotlib.use('agg') + +matplotlib.use("agg") import matplotlib.pyplot as plt import unittest class TestOutputPlots(object): - """ - test the fitting sequences - """ - def setup_method(self): + """Test the fitting sequences.""" + def setup_method(self): # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -34,87 +33,163 @@ def setup_method(self): # PSF specification - self.kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + self.kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**self.kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'truncation': 5, 'pixel_size': deltaPix} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "truncation": 5, + "pixel_size": deltaPix, + } psf_gaussian = PSF(**kwargs_psf_gaussian) - self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf_gaussian.kernel_point_source} + self.kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": psf_gaussian.kernel_point_source, + } psf_class = PSF(**self.kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) e1, e2 = param_util.phi_q2_ellipticity(0.2, 0.8) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} - - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] - lens_model_class = LensModel(lens_model_list=lens_model_list, multi_plane=True, lens_redshift_list=[0.5, 0.5], - z_source=2.0) + lens_model_class = LensModel( + lens_model_list=lens_model_list, + multi_plane=True, + lens_redshift_list=[0.5, 0.5], + z_source=2.0, + ) self.LensModel = lens_model_class # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_list = ['SOURCE_POSITION'] - point_source_class = PointSource(point_source_type_list=point_source_list, fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 1} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_list = ["SOURCE_POSITION"] + point_source_class = PointSource( + point_source_type_list=point_source_list, fixed_magnification_list=[True] + ) + kwargs_numerics = {"supersampling_factor": 1} + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - self.kwargs_data['image_data'] = image_sim - self.kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': point_source_list, - 'fixed_magnification_list': [False], - } - self.kwargs_model_multiplane = {'lens_model_list': lens_model_list, - 'lens_redshift_list': [0.5, 0.5], - 'multi_plane': True, - 'z_source': 2.0, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': point_source_list, - 'fixed_magnification_list': [False], - } + self.kwargs_data["image_data"] = image_sim + self.kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": point_source_list, + "fixed_magnification_list": [False], + } + self.kwargs_model_multiplane = { + "lens_model_list": lens_model_list, + "lens_redshift_list": [0.5, 0.5], + "multi_plane": True, + "z_source": 2.0, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": point_source_list, + "fixed_magnification_list": [False], + } self.kwargs_numerics = kwargs_numerics self.data_class = ImageData(**self.kwargs_data) - self.kwargs_params = {'kwargs_lens': self.kwargs_lens, 'kwargs_source': self.kwargs_source, 'kwargs_lens_light': self.kwargs_lens_light, - 'kwargs_ps': self.kwargs_ps} + self.kwargs_params = { + "kwargs_lens": self.kwargs_lens, + "kwargs_source": self.kwargs_source, + "kwargs_lens_light": self.kwargs_lens_light, + "kwargs_ps": self.kwargs_ps, + } def test_lensModelPlot(self): multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, cmap_string="gist_heat", - multi_band_type='single-band') - - multi_band_list_multiplane = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot_multiplane = ModelPlot(multi_band_list_multiplane, self.kwargs_model_multiplane, self.kwargs_params, arrow_size=0.02, - cmap_string="gist_heat", - multi_band_type='single-band') + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + multi_band_type="single-band", + ) + + multi_band_list_multiplane = [ + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] + ] + lensPlot_multiplane = ModelPlot( + multi_band_list_multiplane, + self.kwargs_model_multiplane, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + multi_band_type="single-band", + ) lensPlot.plot_main(with_caustics=True) plt.close() - cmap = plt.get_cmap('gist_heat') + cmap = plt.get_cmap("gist_heat") - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, cmap_string=cmap) + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string=cmap, + ) lensPlot.plot_separate() plt.close() @@ -135,7 +210,9 @@ def test_lensModelPlot(self): numPix = 100 deltaPix_source = 0.01 f, ax = plt.subplots(1, 1, figsize=(4, 4)) - lensPlot.error_map_source_plot(ax=ax, numPix=numPix, deltaPix_source=deltaPix_source, with_caustics=True) + lensPlot.error_map_source_plot( + ax=ax, numPix=numPix, deltaPix_source=deltaPix_source, with_caustics=True + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) @@ -147,64 +224,124 @@ def test_lensModelPlot(self): plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - kwargs_plot = {'index_macromodel': [0]} + kwargs_plot = {"index_macromodel": [0]} lensPlot.substructure_plot(ax=ax, **kwargs_plot) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - kwargs_plot = {'index_macromodel': [0], 'with_critical_curves': True} + kwargs_plot = {"index_macromodel": [0], "with_critical_curves": True} lensPlot.substructure_plot(ax=ax, **kwargs_plot) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - kwargs_plot = {'index_macromodel': [0], 'with_critical_curves': True} + kwargs_plot = {"index_macromodel": [0], "with_critical_curves": True} lensPlot_multiplane.substructure_plot(ax=ax, **kwargs_plot) plt.close() def test_source_plot(self): multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, - cmap_string="gist_heat", fast_caustic=False) + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + fast_caustic=False, + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.source_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, with_caustics=True, - caustic_color='yellow', - fsize=15, plot_scale='linear') + ax = lensPlot.source_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=True, + caustic_color="yellow", + fsize=15, + plot_scale="linear", + ) plt.close() def test_source(self): multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, cmap_string="gist_heat") - source, coords_source = lensPlot.source(band_index=0, numPix=10, deltaPix=0.1, image_orientation=True) + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + ) + source, coords_source = lensPlot.source( + band_index=0, numPix=10, deltaPix=0.1, image_orientation=True + ) assert len(source) == 10 - source, coords_source = lensPlot.source(band_index=0, numPix=10, deltaPix=0.1, image_orientation=False) + source, coords_source = lensPlot.source( + band_index=0, numPix=10, deltaPix=0.1, image_orientation=False + ) assert len(source) == 10 - source, coords_source = lensPlot.source(band_index=0, numPix=10, deltaPix=0.1, center=[0, 0]) + source, coords_source = lensPlot.source( + band_index=0, numPix=10, deltaPix=0.1, center=[0, 0] + ) assert len(source) == 10 def test_joint_linear(self): - multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, cmap_string="gist_heat", - multi_band_type='joint-linear', bands_compute=[True, False]) + multi_band_list = [ + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], + ] + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + multi_band_type="joint-linear", + bands_compute=[True, False], + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.data_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, with_caustics=False, - caustic_color='yellow', - fsize=15, plot_scale='linear') + ax = lensPlot.data_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + fsize=15, + plot_scale="linear", + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.model_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, with_caustics=False, - caustic_color='yellow', - fsize=15, plot_scale='linear') + ax = lensPlot.model_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + fsize=15, + plot_scale="linear", + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.convergence_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, with_caustics=False, - caustic_color='yellow', - fsize=15, plot_scale='linear') + ax = lensPlot.convergence_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + fsize=15, + plot_scale="linear", + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) ax = lensPlot.normalized_residual_plot(ax=ax) @@ -217,27 +354,40 @@ def test_joint_linear(self): plt.close() def test_reconstruction_all_bands(self): - multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], - [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, - cmap_string="gist_heat", - multi_band_type='joint-linear', bands_compute=[True, True]) + multi_band_list = [ + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], + [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics], + ] + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + multi_band_type="joint-linear", + bands_compute=[True, True], + ) f, axes = lensPlot.reconstruction_all_bands() assert len(axes) == 2 assert len(axes[0]) == 3 plt.close() multi_band_list = [[self.kwargs_data, self.kwargs_psf, self.kwargs_numerics]] - lensPlot = ModelPlot(multi_band_list, self.kwargs_model, self.kwargs_params, arrow_size=0.02, - cmap_string="gist_heat", - multi_band_type='joint-linear', bands_compute=[True]) + lensPlot = ModelPlot( + multi_band_list, + self.kwargs_model, + self.kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + multi_band_type="joint-linear", + bands_compute=[True], + ) f, axes = lensPlot.reconstruction_all_bands() assert len(axes) == 1 assert len(axes[0]) == 3 plt.close() def test_check_solver_error(self): - bool = check_solver_error(image=np.array([0, 0])) assert bool @@ -245,103 +395,195 @@ def test_check_solver_error(self): assert bool == 0 def test_no_linear_solver(self): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1, exposure_time=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1, exposure_time=1 + ) # kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], - 'kwargs_source': [{'amp': 2, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, bands_compute=[True], - arrow_size=0.02, cmap_string="gist_heat", linear_solver=False) + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 2, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {"psf_type": "NONE"}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + bands_compute=[True], + arrow_size=0.02, + cmap_string="gist_heat", + linear_solver=False, + ) lensPlot.plot_main(with_caustics=True) plt.close() - assert kwargs_params['kwargs_source'][0]['amp'] == 2 + assert kwargs_params["kwargs_source"][0]["amp"] == 2 class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1) - #kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], 'kwargs_source': [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, - arrow_size=0.02, cmap_string="gist_heat") + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1 + ) + # kwargs_data['image_data'] = np.zeros((10, 10)) + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {"psf_type": "NONE"}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + ) with self.assertRaises(ValueError): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1 + ) # kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], 'kwargs_source': [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, - arrow_size=0.02, cmap_string="gist_heat") + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + arrow_size=0.02, + cmap_string="gist_heat", + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.source_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, with_caustics=False, - caustic_color='yellow', - fsize=15, plot_scale='bad') + ax = lensPlot.source_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + fsize=15, + plot_scale="bad", + ) plt.close() with self.assertRaises(ValueError): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1 + ) # kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], - 'kwargs_source': [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, bands_compute=[False], - arrow_size=0.02, cmap_string="gist_heat") + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {"psf_type": "NONE"}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + bands_compute=[False], + arrow_size=0.02, + cmap_string="gist_heat", + ) lensPlot._select_band(band_index=0) with self.assertRaises(ValueError): - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1, exposure_time=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1, exposure_time=1 + ) # kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], - 'kwargs_source': [{'amp': 1, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, bands_compute=[True], - arrow_size=0.02, cmap_string="gist_heat") + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 1, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {"psf_type": "NONE"}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + bands_compute=[True], + arrow_size=0.02, + cmap_string="gist_heat", + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = lensPlot.source_plot(ax=ax, numPix=10, deltaPix_source=0.1, v_min=None, v_max=None, - with_caustics=False, - caustic_color='yellow', - fsize=15, plot_scale='wrong') + ax = lensPlot.source_plot( + ax=ax, + numPix=10, + deltaPix_source=0.1, + v_min=None, + v_max=None, + with_caustics=False, + caustic_color="yellow", + fsize=15, + plot_scale="wrong", + ) plt.close() with self.assertRaises(ValueError): # test whether linear_solver=False returns raise when having two bands - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1, exposure_time=1) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1, exposure_time=1 + ) # kwargs_data['image_data'] = np.zeros((10, 10)) - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], - 'kwargs_source': [{'amp': 2, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}], - [kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, bands_compute=[True], - arrow_size=0.02, cmap_string="gist_heat", linear_solver=False) + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 2, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[ + [kwargs_data, {"psf_type": "NONE"}, {}], + [kwargs_data, {"psf_type": "NONE"}, {}], + ], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + bands_compute=[True], + arrow_size=0.02, + cmap_string="gist_heat", + linear_solver=False, + ) + def test_interferometry_natwt_Model_Plot_linear_solver(): # Test no errors are raised in the Model Plot linear solver for 'interferometry_natwt' likelihood function. try: - kwargs_data = sim_util.data_configure_simple(numPix=10, deltaPix=1, background_rms=1, exposure_time=1) - kwargs_data['likelihood_method'] = 'interferometry_natwt' - kwargs_model = {'source_light_model_list': ['GAUSSIAN']} - kwargs_params = {'kwargs_lens': [], - 'kwargs_source': [{'amp': 2, 'sigma': 1, 'center_x': 0, 'center_y': 0}], - 'kwargs_ps': [], 'kwargs_lens_light': []} - lensPlot = ModelPlot(multi_band_list=[[kwargs_data, {'psf_type': 'NONE'}, {}]], - kwargs_model=kwargs_model, kwargs_params=kwargs_params, bands_compute=[True], - arrow_size=0.02, cmap_string="gist_heat", linear_solver=True) + kwargs_data = sim_util.data_configure_simple( + numPix=10, deltaPix=1, background_rms=1, exposure_time=1 + ) + kwargs_data["likelihood_method"] = "interferometry_natwt" + kwargs_model = {"source_light_model_list": ["GAUSSIAN"]} + kwargs_params = { + "kwargs_lens": [], + "kwargs_source": [{"amp": 2, "sigma": 1, "center_x": 0, "center_y": 0}], + "kwargs_ps": [], + "kwargs_lens_light": [], + } + lensPlot = ModelPlot( + multi_band_list=[[kwargs_data, {"psf_type": "NONE"}, {}]], + kwargs_model=kwargs_model, + kwargs_params=kwargs_params, + bands_compute=[True], + arrow_size=0.02, + cmap_string="gist_heat", + linear_solver=True, + ) except: - pytest.fail("Errors are raised in the Model Plot linear solver for the 'interferometric_natwt' likelihood method, which is not expected.") + pytest.fail( + "Errors are raised in the Model Plot linear solver for the 'interferometric_natwt' likelihood method, which is not expected." + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Plots/test_multi_patch_plot.py b/test/test_Plots/test_multi_patch_plot.py index d5b291743..5e184bd1a 100644 --- a/test/test_Plots/test_multi_patch_plot.py +++ b/test/test_Plots/test_multi_patch_plot.py @@ -16,61 +16,91 @@ class TestMultiPatchPlot(object): - def setup_method(self): # data specifics - sigma_bkg = .05 # background noise per pixel (Gaussian) - exp_time = 100. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) + sigma_bkg = 0.05 # background noise per pixel (Gaussian) + exp_time = 100.0 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.1 # full width half max of PSF (only valid when psf_type='gaussian') - psf_type = 'GAUSSIAN' # 'GAUSSIAN', 'PIXEL', 'NONE' + psf_type = "GAUSSIAN" # 'GAUSSIAN', 'PIXEL', 'NONE' # generate the coordinate grid and image properties - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) - kwargs_data['exposure_time'] = exp_time * np.ones_like(kwargs_data['image_data']) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) + kwargs_data["exposure_time"] = exp_time * np.ones_like( + kwargs_data["image_data"] + ) data_class = ImageData(**kwargs_data) # generate the psf variables - kwargs_psf = {'psf_type': psf_type, 'pixel_size': deltaPix, 'fwhm': fwhm} + kwargs_psf = {"psf_type": psf_type, "pixel_size": deltaPix, "fwhm": fwhm} # kwargs_psf = sim_util.psf_configure_simple(psf_type=psf_type, fwhm=fwhm, kernelsize=kernel_size, deltaPix=deltaPix, kernel=kernel) psf_class = PSF(**kwargs_psf) # lensing quantities - kwargs_shear = {'gamma1': 0.02, 'gamma2': -0.04} # shear values to the source plane - kwargs_spemd = {'theta_E': 1.26, 'gamma': 2., 'center_x': 0.0, 'center_y': 0.0, 'e1': -0.1, - 'e2': 0.05} # parameters of the deflector lens model + kwargs_shear = { + "gamma1": 0.02, + "gamma2": -0.04, + } # shear values to the source plane + kwargs_spemd = { + "theta_E": 1.26, + "gamma": 2.0, + "center_x": 0.0, + "center_y": 0.0, + "e1": -0.1, + "e2": 0.05, + } # parameters of the deflector lens model # the lens model is a supperposition of an elliptical lens model with external shear - lens_model_list = ['EPL', 'SHEAR'] + lens_model_list = ["EPL", "SHEAR"] kwargs_lens_true = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # choice of source type - source_type = 'SERSIC' # 'SERSIC' or 'SHAPELETS' + source_type = "SERSIC" # 'SERSIC' or 'SHAPELETS' - source_x = 0. + source_x = 0.0 source_y = 0.05 # Sersic parameters in the initial simulation phi_G, q = 0.5, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi_G, q) - kwargs_sersic_source = {'amp': 1000, 'R_sersic': 0.05, 'n_sersic': 1, 'e1': e1, 'e2': e2, 'center_x': source_x, - 'center_y': source_y} + kwargs_sersic_source = { + "amp": 1000, + "R_sersic": 0.05, + "n_sersic": 1, + "e1": e1, + "e2": e2, + "center_x": source_x, + "center_y": source_y, + } # kwargs_else = {'sourcePos_x': source_x, 'sourcePos_y': source_y, 'quasar_amp': 400., 'gamma1_foreground': 0.0, 'gamma2_foreground':-0.0} - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source_true = [kwargs_sersic_source] source_model_class = LightModel(light_model_list=source_model_list) lensEquationSolver = LensEquationSolver(lens_model_class) - x_image, y_image = lensEquationSolver.findBrightImage(source_x, source_y, kwargs_lens_true, numImages=4, - min_distance=deltaPix, search_window=numPix * deltaPix) + x_image, y_image = lensEquationSolver.findBrightImage( + source_x, + source_y, + kwargs_lens_true, + numImages=4, + min_distance=deltaPix, + search_window=numPix * deltaPix, + ) mag = lens_model_class.magnification(x_image, y_image, kwargs=kwargs_lens_true) - kwargs_numerics = {'supersampling_factor': 1} + kwargs_numerics = {"supersampling_factor": 1} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - kwargs_numerics=kwargs_numerics) + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + kwargs_numerics=kwargs_numerics, + ) # generate image model = imageModel.image(kwargs_lens_true, kwargs_source_true) @@ -79,11 +109,12 @@ def setup_method(self): image_sim = model + bkg + poisson data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - } + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + } # make cutous and data instances of them x_pos, y_pos = data_class.map_coord2pix(x_image, y_image) @@ -94,21 +125,35 @@ def setup_method(self): n_cut = 12 x_c = int(x_pos[i]) y_c = int(y_pos[i]) - image_cut = image_sim[int(y_c - n_cut):int(y_c + n_cut), int(x_c - n_cut):int(x_c + n_cut)] - exposure_map_cut = data_class.exposure_map[int(y_c - n_cut):int(y_c + n_cut), - int(x_c - n_cut):int(x_c + n_cut)] + image_cut = image_sim[ + int(y_c - n_cut) : int(y_c + n_cut), int(x_c - n_cut) : int(x_c + n_cut) + ] + exposure_map_cut = data_class.exposure_map[ + int(y_c - n_cut) : int(y_c + n_cut), int(x_c - n_cut) : int(x_c + n_cut) + ] kwargs_data_i = { - 'background_rms': data_class.background_rms, - 'exposure_time': exposure_map_cut, - 'ra_at_xy_0': ra_grid[y_c - n_cut, x_c - n_cut], 'dec_at_xy_0': dec_grid[y_c - n_cut, x_c - n_cut], - 'transform_pix2angle': data_class.transform_pix2angle - , 'image_data': image_cut + "background_rms": data_class.background_rms, + "exposure_time": exposure_map_cut, + "ra_at_xy_0": ra_grid[y_c - n_cut, x_c - n_cut], + "dec_at_xy_0": dec_grid[y_c - n_cut, x_c - n_cut], + "transform_pix2angle": data_class.transform_pix2angle, + "image_data": image_cut, } multi_band_list.append([kwargs_data_i, kwargs_psf, kwargs_numerics]) - kwargs_params = {'kwargs_lens': kwargs_lens_true, 'kwargs_source': kwargs_source_true} - self.multiPatch = MultiPatchPlot(multi_band_list, kwargs_model, kwargs_params, multi_band_type='joint-linear', - kwargs_likelihood=None, verbose=True, cmap_string="gist_heat") + kwargs_params = { + "kwargs_lens": kwargs_lens_true, + "kwargs_source": kwargs_source_true, + } + self.multiPatch = MultiPatchPlot( + multi_band_list, + kwargs_model, + kwargs_params, + multi_band_type="joint-linear", + kwargs_likelihood=None, + verbose=True, + cmap_string="gist_heat", + ) self.data_class = data_class self.model = model self.lens_model_class = lens_model_class @@ -153,5 +198,5 @@ def test_main_plot(self): plt.close() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Plots/test_plot_quasar_images.py b/test/test_Plots/test_plot_quasar_images.py index 6fbef05f5..555dfdcd5 100644 --- a/test/test_Plots/test_plot_quasar_images.py +++ b/test/test_Plots/test_plot_quasar_images.py @@ -6,23 +6,38 @@ class TestPlotQuasarImages(object): - def test_plot_quasar_images(self): - - lens_model_list = ['EPL', 'SHEAR'] + lens_model_list = ["EPL", "SHEAR"] z_source = 1.5 - kwargs_lens = [{'theta_E': 1., 'gamma': 2., 'e1': 0.02, 'e2': -0.09, 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.01, 'gamma2': 0.03}] + kwargs_lens = [ + { + "theta_E": 1.0, + "gamma": 2.0, + "e1": 0.02, + "e2": -0.09, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.01, "gamma2": 0.03}, + ] lensmodel = LensModel(lens_model_list) solver = LensEquationSolver(lensmodel) source_x, source_y = 0.07, 0.03 x_image, y_image = solver.findBrightImage(source_x, source_y, kwargs_lens) - source_fwhm_parsec = 40. + source_fwhm_parsec = 40.0 - plot_quasar_images(lensmodel, x_image, y_image, source_x, source_y, kwargs_lens, - source_fwhm_parsec, z_source) + plot_quasar_images( + lensmodel, + x_image, + y_image, + source_x, + source_y, + kwargs_lens, + source_fwhm_parsec, + z_source, + ) plt.close() -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Plots/test_plot_util.py b/test/test_Plots/test_plot_util.py index 39647b26b..d2e434546 100644 --- a/test/test_Plots/test_plot_util.py +++ b/test/test_Plots/test_plot_util.py @@ -8,7 +8,6 @@ class TestPlotUtil(object): - def setup_method(self): pass @@ -22,23 +21,34 @@ def test_scale_bar(self): plot_util.scale_bar(ax, 3, dist=1, text='1"', flipped=True) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - plot_util.text_description(ax, d=3, text='test', color='w', backgroundcolor='k', flipped=True) + plot_util.text_description( + ax, d=3, text="test", color="w", backgroundcolor="k", flipped=True + ) plt.close() def test_source_position_plot(self): from lenstronomy.PointSource.point_source import PointSource from lenstronomy.LensModel.lens_model import LensModel - lensModel = LensModel(lens_model_list=['SIS']) - ps = PointSource(point_source_type_list=['UNLENSED', 'LENSED_POSITION', 'SOURCE_POSITION'], lensModel=lensModel) - kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0}] - kwargs_ps = [{'ra_image': [1., 1.], 'dec_image': [0, 1], 'point_amp': [1, 1]}, - {'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]}, - {'ra_source': 0.1, 'dec_source': 0, 'point_amp': 1.}] + + lensModel = LensModel(lens_model_list=["SIS"]) + ps = PointSource( + point_source_type_list=["UNLENSED", "LENSED_POSITION", "SOURCE_POSITION"], + lensModel=lensModel, + ) + kwargs_lens = [{"theta_E": 1.0, "center_x": 0, "center_y": 0}] + kwargs_ps = [ + {"ra_image": [1.0, 1.0], "dec_image": [0, 1], "point_amp": [1, 1]}, + {"ra_image": [1.0], "dec_image": [1.0], "point_amp": [10]}, + {"ra_source": 0.1, "dec_source": 0, "point_amp": 1.0}, + ] ra_source, dec_source = ps.source_position(kwargs_ps, kwargs_lens) from lenstronomy.Data.coord_transforms import Coordinates - coords_source = Coordinates(transform_pix2angle=np.array([[1, 0], [0, 1]])* 0.1, - ra_at_xy_0=-2, - dec_at_xy_0=-2) + + coords_source = Coordinates( + transform_pix2angle=np.array([[1, 0], [0, 1]]) * 0.1, + ra_at_xy_0=-2, + dec_at_xy_0=-2, + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) plot_util.source_position_plot(ax, coords_source, ra_source, dec_source) @@ -46,60 +56,118 @@ def test_source_position_plot(self): def test_result_string(self): x = np.random.normal(loc=1, scale=0.1, size=10000) - string =plot_util.result_string(x, weights=None, title_fmt=".2f", label='test') + string = plot_util.result_string(x, weights=None, title_fmt=".2f", label="test") print(string) - assert string == str('test = ${1.00}_{-0.10}^{+0.10}$') + assert string == str("test = ${1.00}_{-0.10}^{+0.10}$") def test_cmap_conf(self): - cmap = plot_util.cmap_conf(cmap_string='gist_heat') + cmap = plot_util.cmap_conf(cmap_string="gist_heat") cmap_update = plot_util.cmap_conf(cmap_string=cmap) assert cmap.name == cmap_update.name def test_plot_line_set(self): - - coords = Coordinates(transform_pix2angle=[[1, 0], [0, 1]], ra_at_xy_0=0, dec_at_xy_0=0) + coords = Coordinates( + transform_pix2angle=[[1, 0], [0, 1]], ra_at_xy_0=0, dec_at_xy_0=0 + ) line_set_x = np.linspace(start=0, stop=1, num=10) line_set_y = np.linspace(start=0, stop=1, num=10) f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = plot_util.plot_line_set(ax, coords, line_set_x, line_set_y, origin=None, color='g', flipped_x=True, - pixel_offset=False) + ax = plot_util.plot_line_set( + ax, + coords, + line_set_x, + line_set_y, + origin=None, + color="g", + flipped_x=True, + pixel_offset=False, + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = plot_util.plot_line_set(ax, coords, line_set_x, line_set_y, origin=[1, 1], color='g', flipped_x=False, - pixel_offset=True) + ax = plot_util.plot_line_set( + ax, + coords, + line_set_x, + line_set_y, + origin=[1, 1], + color="g", + flipped_x=False, + pixel_offset=True, + ) plt.close() # and here we input a list of arrays - line_set_list_x = [np.linspace(start=0, stop=1, num=10), np.linspace(start=0, stop=1, num=10)] - line_set_list_y = [np.linspace(start=0, stop=1, num=10), np.linspace(start=0, stop=1, num=10)] + line_set_list_x = [ + np.linspace(start=0, stop=1, num=10), + np.linspace(start=0, stop=1, num=10), + ] + line_set_list_y = [ + np.linspace(start=0, stop=1, num=10), + np.linspace(start=0, stop=1, num=10), + ] f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = plot_util.plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=None, color='g', - flipped_x=True) + ax = plot_util.plot_line_set( + ax, + coords, + line_set_list_x, + line_set_list_y, + origin=None, + color="g", + flipped_x=True, + ) plt.close() f, ax = plt.subplots(1, 1, figsize=(4, 4)) - ax = plot_util.plot_line_set(ax, coords, line_set_list_x, line_set_list_y, origin=[1, 1], color='g', - flipped_x=False) + ax = plot_util.plot_line_set( + ax, + coords, + line_set_list_x, + line_set_list_y, + origin=[1, 1], + color="g", + flipped_x=False, + ) plt.close() def test_image_position_plot(self): - coords = Coordinates(transform_pix2angle=[[1, 0], [0, 1]], ra_at_xy_0=0, dec_at_xy_0=0) + coords = Coordinates( + transform_pix2angle=[[1, 0], [0, 1]], ra_at_xy_0=0, dec_at_xy_0=0 + ) f, ax = plt.subplots(1, 1, figsize=(4, 4)) ra_image, dec_image = np.array([1, 2]), np.array([1, 2]) - ax = plot_util.image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_list=None, - origin=None, flipped_x=False, pixel_offset=False) + ax = plot_util.image_position_plot( + ax, + coords, + ra_image, + dec_image, + color="w", + image_name_list=None, + origin=None, + flipped_x=False, + pixel_offset=False, + ) plt.close() - ax = plot_util.image_position_plot(ax, coords, ra_image, dec_image, color='w', image_name_list=['A', 'B'], - origin=[1, 1], flipped_x=True, pixel_offset=True) + ax = plot_util.image_position_plot( + ax, + coords, + ra_image, + dec_image, + color="w", + image_name_list=["A", "B"], + origin=[1, 1], + flipped_x=True, + pixel_offset=True, + ) plt.close() def test_cmap_copy(self): from lenstronomy.Plots.plot_util import cmap_conf + cmap_new = cmap_conf("gist_heat") - - -if __name__ == '__main__': + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_Types/test_lensed_position.py b/test/test_PointSource/test_Types/test_lensed_position.py index 499ac002e..8112d32ca 100644 --- a/test/test_PointSource/test_Types/test_lensed_position.py +++ b/test/test_PointSource/test_Types/test_lensed_position.py @@ -5,20 +5,25 @@ class TestLensedPosition(object): - def setup_method(self): - lens_model = LensModel(lens_model_list=['SIS']) - self.kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - self.ps_mag = LensedPositions(lens_model=lens_model, fixed_magnification=True, point_source_frame_list=[0, 0], - index_lens_model_list=[[0]]) + lens_model = LensModel(lens_model_list=["SIS"]) + self.kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + self.ps_mag = LensedPositions( + lens_model=lens_model, + fixed_magnification=True, + point_source_frame_list=[0, 0], + index_lens_model_list=[[0]], + ) self.ps = LensedPositions(lens_model=lens_model, fixed_magnification=False) - self.ps_add = LensedPositions(lens_model=lens_model, fixed_magnification=[False], additional_images=True) - self.kwargs = {'point_amp': [2, 1], 'ra_image': [0, 1.2], 'dec_image': [0, 0]} - self.kwargs_mag = {'source_amp': 2, 'ra_image': [0, 1.2], 'dec_image': [0, 0]} + self.ps_add = LensedPositions( + lens_model=lens_model, fixed_magnification=[False], additional_images=True + ) + self.kwargs = {"point_amp": [2, 1], "ra_image": [0, 1.2], "dec_image": [0, 0]} + self.kwargs_mag = {"source_amp": 2, "ra_image": [0, 1.2], "dec_image": [0, 0]} def test_image_source_position(self): x_img, y_img = self.ps.image_position(self.kwargs, self.kwargs_lens) - npt.assert_almost_equal(x_img, self.kwargs['ra_image']) + npt.assert_almost_equal(x_img, self.kwargs["ra_image"]) x_img_add, y_img_add = self.ps_add.image_position(self.kwargs, self.kwargs_lens) print(x_img_add, x_img) @@ -26,8 +31,10 @@ def test_image_source_position(self): # check whether the source solution matches x_src, y_src = self.ps.source_position(self.kwargs, self.kwargs_lens) - lens_model = LensModel(lens_model_list=['SIS']) - x_src_true, y_src_true = lens_model.ray_shooting(x_img_add, y_img_add, kwargs=self.kwargs_lens) + lens_model = LensModel(lens_model_list=["SIS"]) + x_src_true, y_src_true = lens_model.ray_shooting( + x_img_add, y_img_add, kwargs=self.kwargs_lens + ) npt.assert_almost_equal(x_src_true[0], x_src_true[1]) npt.assert_almost_equal(y_src_true[0], y_src_true[1]) @@ -35,25 +42,43 @@ def test_image_source_position(self): npt.assert_almost_equal(y_src_true, y_src) def test_image_amplitude(self): - amp = self.ps.image_amplitude(self.kwargs, kwargs_lens=self.kwargs_lens, x_pos=self.kwargs['ra_image'], - y_pos=self.kwargs['dec_image'], magnification_limit=None, - kwargs_lens_eqn_solver=None) - npt.assert_almost_equal(self.kwargs['point_amp'], amp) + amp = self.ps.image_amplitude( + self.kwargs, + kwargs_lens=self.kwargs_lens, + x_pos=self.kwargs["ra_image"], + y_pos=self.kwargs["dec_image"], + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) + npt.assert_almost_equal(self.kwargs["point_amp"], amp) - amp = self.ps_mag.image_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens, x_pos=None, - y_pos=None, magnification_limit=None, kwargs_lens_eqn_solver=None) + amp = self.ps_mag.image_amplitude( + self.kwargs_mag, + kwargs_lens=self.kwargs_lens, + x_pos=None, + y_pos=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) - amp_pos = self.ps_mag.image_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens, x_pos=self.kwargs['ra_image'], - y_pos=self.kwargs['dec_image'], magnification_limit=None, - kwargs_lens_eqn_solver=None) + amp_pos = self.ps_mag.image_amplitude( + self.kwargs_mag, + kwargs_lens=self.kwargs_lens, + x_pos=self.kwargs["ra_image"], + y_pos=self.kwargs["dec_image"], + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) npt.assert_almost_equal(amp, amp_pos) def test_source_amplitude(self): amp = self.ps.source_amplitude(self.kwargs, kwargs_lens=self.kwargs_lens) - amp_mag = self.ps_mag.source_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens) - npt.assert_almost_equal(amp_mag, self.kwargs_mag['source_amp']) + amp_mag = self.ps_mag.source_amplitude( + self.kwargs_mag, kwargs_lens=self.kwargs_lens + ) + npt.assert_almost_equal(amp_mag, self.kwargs_mag["source_amp"]) assert amp != amp_mag -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_Types/test_ps_base.py b/test/test_PointSource/test_Types/test_ps_base.py index 6f8875cee..e0d4335e4 100644 --- a/test/test_PointSource/test_Types/test_ps_base.py +++ b/test/test_PointSource/test_Types/test_ps_base.py @@ -6,9 +6,12 @@ class TestPSBase(object): - def setup_method(self): - self.base = PSBase(lens_model=LensModel(lens_model_list=[]), fixed_magnification=False, additional_images=False) + self.base = PSBase( + lens_model=LensModel(lens_model_list=[]), + fixed_magnification=False, + additional_images=False, + ) PSBase(fixed_magnification=True, additional_images=True) def test_update_lens_model(self): @@ -16,18 +19,18 @@ def test_update_lens_model(self): assert self.base._solver is None base = PSBase() - base.update_lens_model(lens_model_class=LensModel(lens_model_list=['SIS'])) + base.update_lens_model(lens_model_class=LensModel(lens_model_list=["SIS"])) assert base._solver is not None PSBase(fixed_magnification=True, additional_images=True) class TestUtil(object): - def setup_method(self): pass def test_expand_to_array(self): from lenstronomy.PointSource.Types.base_ps import _expand_to_array + array = 1 num = 3 array_out = _expand_to_array(array, num) @@ -46,8 +49,8 @@ def test_expand_to_array(self): assert array_out[1] == 1 def test_shrink_array(self): - from lenstronomy.PointSource.Types.base_ps import _shrink_array + array = [1, 2, 3] num = 2 array_out = _shrink_array(array, num) @@ -65,9 +68,7 @@ def test_shrink_array(self): _shrink_array(array, num) - class TestRaise(unittest.TestCase): - def test_raise(self): base = PSBase() with self.assertRaises(ValueError): @@ -80,5 +81,5 @@ def test_raise(self): base.source_amplitude(kwargs_ps=None) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_Types/test_source_position.py b/test/test_PointSource/test_Types/test_source_position.py index b4e6b3e4a..f49fe0a2c 100644 --- a/test/test_PointSource/test_Types/test_source_position.py +++ b/test/test_PointSource/test_Types/test_source_position.py @@ -5,54 +5,79 @@ class TestLensedPosition(object): - def setup_method(self): - lens_model = LensModel(lens_model_list=['SIS']) - self.kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] + lens_model = LensModel(lens_model_list=["SIS"]) + self.kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] self.ps_mag = SourcePositions(lens_model=lens_model, fixed_magnification=True) self.ps = SourcePositions(lens_model=lens_model, fixed_magnification=False) - self.kwargs = {'point_amp': [2, 1], 'ra_source': 0.1, 'dec_source': 0} - self.kwargs_mag = {'source_amp': 1, 'ra_source': 0.1, 'dec_source': 0} + self.kwargs = {"point_amp": [2, 1], "ra_source": 0.1, "dec_source": 0} + self.kwargs_mag = {"source_amp": 1, "ra_source": 0.1, "dec_source": 0} def test_image_position(self): x_img, y_img = self.ps.image_position(self.kwargs, self.kwargs_lens) - lens_model = LensModel(lens_model_list=['SIS']) + lens_model = LensModel(lens_model_list=["SIS"]) x_src, y_src = lens_model.ray_shooting(x_img, y_img, kwargs=self.kwargs_lens) - npt.assert_almost_equal(x_src, self.kwargs['ra_source']) - npt.assert_almost_equal(y_src, self.kwargs['dec_source']) + npt.assert_almost_equal(x_src, self.kwargs["ra_source"]) + npt.assert_almost_equal(y_src, self.kwargs["dec_source"]) def test_source_position(self): - x_src, y_src = self.ps.source_position(self.kwargs, kwargs_lens=self.kwargs_lens) - npt.assert_almost_equal(x_src, self.kwargs['ra_source']) - npt.assert_almost_equal(y_src, self.kwargs['dec_source']) + x_src, y_src = self.ps.source_position( + self.kwargs, kwargs_lens=self.kwargs_lens + ) + npt.assert_almost_equal(x_src, self.kwargs["ra_source"]) + npt.assert_almost_equal(y_src, self.kwargs["dec_source"]) def test_image_amplitude(self): x_img, y_img = self.ps.image_position(self.kwargs, kwargs_lens=self.kwargs_lens) - amp = self.ps_mag.image_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens, x_pos=None, - y_pos=None, magnification_limit=None, kwargs_lens_eqn_solver=None) - amp_pos = self.ps_mag.image_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens, x_pos=x_img, - y_pos=y_img, magnification_limit=None, kwargs_lens_eqn_solver=None) + amp = self.ps_mag.image_amplitude( + self.kwargs_mag, + kwargs_lens=self.kwargs_lens, + x_pos=None, + y_pos=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) + amp_pos = self.ps_mag.image_amplitude( + self.kwargs_mag, + kwargs_lens=self.kwargs_lens, + x_pos=x_img, + y_pos=y_img, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) npt.assert_almost_equal(amp_pos, amp) - amp = self.ps.image_amplitude(self.kwargs, kwargs_lens=self.kwargs_lens, x_pos=x_img, - y_pos=y_img, magnification_limit=None, - kwargs_lens_eqn_solver=None) - npt.assert_almost_equal(amp, self.kwargs['point_amp']) + amp = self.ps.image_amplitude( + self.kwargs, + kwargs_lens=self.kwargs_lens, + x_pos=x_img, + y_pos=y_img, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) + npt.assert_almost_equal(amp, self.kwargs["point_amp"]) - #see if works with mag_pert defined - self.kwargs['mag_pert'] = [0.1,0.1] - amp_pert = self.ps.image_amplitude(self.kwargs, kwargs_lens=self.kwargs_lens, x_pos=x_img, - y_pos=y_img, magnification_limit=None, - kwargs_lens_eqn_solver=None) - npt.assert_almost_equal(amp_pert, 0.1*amp) + # see if works with mag_pert defined + self.kwargs["mag_pert"] = [0.1, 0.1] + amp_pert = self.ps.image_amplitude( + self.kwargs, + kwargs_lens=self.kwargs_lens, + x_pos=x_img, + y_pos=y_img, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) + npt.assert_almost_equal(amp_pert, 0.1 * amp) def test_source_amplitude(self): amp = self.ps.source_amplitude(self.kwargs, kwargs_lens=self.kwargs_lens) - amp_mag = self.ps_mag.source_amplitude(self.kwargs_mag, kwargs_lens=self.kwargs_lens) - npt.assert_almost_equal(amp_mag, self.kwargs_mag['source_amp']) + amp_mag = self.ps_mag.source_amplitude( + self.kwargs_mag, kwargs_lens=self.kwargs_lens + ) + npt.assert_almost_equal(amp_mag, self.kwargs_mag["source_amp"]) assert amp != amp_mag -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_Types/test_unlensed.py b/test/test_PointSource/test_Types/test_unlensed.py index a440f5a4f..89e4d6b58 100644 --- a/test/test_PointSource/test_Types/test_unlensed.py +++ b/test/test_PointSource/test_Types/test_unlensed.py @@ -4,31 +4,35 @@ class TestUnlensed(object): - def setup_method(self): - self.ps = Unlensed() - self.kwargs = {'point_amp': [2, 1], 'ra_image': [0, 1], 'dec_image': [1, 0]} + self.kwargs = {"point_amp": [2, 1], "ra_image": [0, 1], "dec_image": [1, 0]} def test_image_position(self): x_img, y_img = self.ps.image_position(self.kwargs) - npt.assert_almost_equal(x_img, self.kwargs['ra_image']) - npt.assert_almost_equal(y_img, self.kwargs['dec_image']) + npt.assert_almost_equal(x_img, self.kwargs["ra_image"]) + npt.assert_almost_equal(y_img, self.kwargs["dec_image"]) def test_source_position(self): x_src, y_src = self.ps.source_position(self.kwargs, kwargs_lens=None) - npt.assert_almost_equal(x_src, self.kwargs['ra_image']) - npt.assert_almost_equal(y_src, self.kwargs['dec_image']) + npt.assert_almost_equal(x_src, self.kwargs["ra_image"]) + npt.assert_almost_equal(y_src, self.kwargs["dec_image"]) def test_image_amplitude(self): - amp = self.ps.image_amplitude(self.kwargs, kwargs_lens=None, x_pos=None, - y_pos=None, magnification_limit=None, kwargs_lens_eqn_solver=None) - npt.assert_almost_equal(amp, self.kwargs['point_amp']) + amp = self.ps.image_amplitude( + self.kwargs, + kwargs_lens=None, + x_pos=None, + y_pos=None, + magnification_limit=None, + kwargs_lens_eqn_solver=None, + ) + npt.assert_almost_equal(amp, self.kwargs["point_amp"]) def test_source_amplitude(self): amp = self.ps.source_amplitude(self.kwargs, kwargs_lens=None) - npt.assert_almost_equal(amp, self.kwargs['point_amp']) + npt.assert_almost_equal(amp, self.kwargs["point_amp"]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_point_source.py b/test/test_PointSource/test_point_source.py index bdf2a86d1..3c3621cc5 100644 --- a/test/test_PointSource/test_point_source.py +++ b/test/test_PointSource/test_point_source.py @@ -10,38 +10,72 @@ class TestPointSource(object): - def setup_method(self): - lensModel = LensModel(lens_model_list=['SPEP']) + lensModel = LensModel(lens_model_list=["SPEP"]) solver = LensEquationSolver(lensModel=lensModel) e1, e2 = param_util.phi_q2_ellipticity(0, 0.7) - self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}] + self.kwargs_lens = [ + { + "theta_E": 1.0, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + "gamma": 2, + } + ] self.sourcePos_x, self.sourcePos_y = 0.01, -0.01 - self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x, - sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens) - self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'], - lensModel=lensModel, fixed_magnification_list=[False]*3, - additional_images_list=[False]*4, flux_from_point_source_list=[True, True, True], - index_lens_model_list=[[0]], point_source_frame_list=[[0] * len(self.x_pos), [0], [0]]) - self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'point_amp': np.ones_like(self.x_pos) * 2}, - {'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]}, - {'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'point_amp': np.ones_like(self.x_pos)}, {}] + self.x_pos, self.y_pos = solver.image_position_from_source( + sourcePos_x=self.sourcePos_x, + sourcePos_y=self.sourcePos_y, + kwargs_lens=self.kwargs_lens, + ) + self.PointSource = PointSource( + point_source_type_list=["LENSED_POSITION", "UNLENSED", "SOURCE_POSITION"], + lensModel=lensModel, + fixed_magnification_list=[False] * 3, + additional_images_list=[False] * 4, + flux_from_point_source_list=[True, True, True], + index_lens_model_list=[[0]], + point_source_frame_list=[[0] * len(self.x_pos), [0], [0]], + ) + self.kwargs_ps = [ + { + "ra_image": self.x_pos, + "dec_image": self.y_pos, + "point_amp": np.ones_like(self.x_pos) * 2, + }, + {"ra_image": [1.0], "dec_image": [1.0], "point_amp": [10]}, + { + "ra_source": self.sourcePos_x, + "dec_source": self.sourcePos_y, + "point_amp": np.ones_like(self.x_pos), + }, + {}, + ] def test_image_position(self): - x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens) + x_image_list, y_image_list = self.PointSource.image_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens + ) npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8) npt.assert_almost_equal(x_image_list[1], 1, decimal=8) npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8) - x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, - kwargs_lens=self.kwargs_lens, - original_position=True, additional_images=True) + x_image_list, y_image_list = self.PointSource.image_position( + kwargs_ps=self.kwargs_ps, + kwargs_lens=self.kwargs_lens, + original_position=True, + additional_images=True, + ) npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8) npt.assert_almost_equal(x_image_list[1], 1, decimal=8) npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8) def test_source_position(self): - x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens) + x_source_list, y_source_list = self.PointSource.source_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens + ) npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8) npt.assert_almost_equal(x_source_list[1], 1, decimal=8) npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8) @@ -51,14 +85,17 @@ def test_num_basis(self): assert num_basis == 9 def test_linear_response_set(self): - ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False) + ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set( + self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False + ) num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens) assert amp[0][0] == 1 assert n == num_basis assert ra_pos[0][0] == self.x_pos[0] - ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, - with_amp=True) + ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set( + self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=True + ) num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens) assert amp[0][0] != 1 assert n == num_basis @@ -66,15 +103,19 @@ def test_linear_response_set(self): def test_linear_param_from_kwargs(self): param = self.PointSource.linear_param_from_kwargs(self.kwargs_ps) - assert param[0] == self.kwargs_ps[0]['point_amp'][0] - assert param[1] == self.kwargs_ps[0]['point_amp'][1] + assert param[0] == self.kwargs_ps[0]["point_amp"][0] + assert param[1] == self.kwargs_ps[0]["point_amp"][1] def test_point_source_list(self): - ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens) + ra_list, dec_list, amp_list = self.PointSource.point_source_list( + self.kwargs_ps, self.kwargs_lens + ) assert ra_list[0] == self.x_pos[0] assert len(ra_list) == 9 - ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens, k=0) + ra_list, dec_list, amp_list = self.PointSource.point_source_list( + self.kwargs_ps, self.kwargs_lens, k=0 + ) assert ra_list[0] == self.x_pos[0] assert len(ra_list) == 4 assert len(dec_list) == 4 @@ -92,54 +133,85 @@ def test_set_save_cache(self): assert self.PointSource._point_source_list[0]._save_cache == False def test_update_lens_model(self): - lensModel = LensModel(lens_model_list=['SIS']) + lensModel = LensModel(lens_model_list=["SIS"]) self.PointSource.update_lens_model(lens_model_class=lensModel) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, - kwargs_lens=kwargs_lens) - npt.assert_almost_equal(x_image_list[0][-1], -0.82654997748011705 , decimal=8) + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + x_image_list, y_image_list = self.PointSource.image_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=kwargs_lens + ) + npt.assert_almost_equal(x_image_list[0][-1], -0.82654997748011705, decimal=8) def test_set_amplitudes(self): - amp_list = [np.ones_like(self.x_pos)*20, [100], np.ones_like(self.x_pos)*10] + amp_list = [np.ones_like(self.x_pos) * 20, [100], np.ones_like(self.x_pos) * 10] kwargs_out = self.PointSource.set_amplitudes(amp_list, self.kwargs_ps) - assert kwargs_out[0]['point_amp'][0] == 10 * self.kwargs_ps[0]['point_amp'][0] - assert kwargs_out[1]['point_amp'][0] == 10 * self.kwargs_ps[1]['point_amp'][0] - assert kwargs_out[2]['point_amp'][3] == 10 * self.kwargs_ps[2]['point_amp'][3] + assert kwargs_out[0]["point_amp"][0] == 10 * self.kwargs_ps[0]["point_amp"][0] + assert kwargs_out[1]["point_amp"][0] == 10 * self.kwargs_ps[1]["point_amp"][0] + assert kwargs_out[2]["point_amp"][3] == 10 * self.kwargs_ps[2]["point_amp"][3] def test_update_search_window(self): search_window = 5 x_center, y_center = 1, 1 min_distance = 0.01 - point_source = PointSource(point_source_type_list=['LENSED_POSITION'], - lensModel=None, kwargs_lens_eqn_solver={}) - - point_source.update_search_window(search_window, x_center, y_center, min_distance=min_distance, only_from_unspecified=False) - assert point_source._kwargs_lens_eqn_solver['search_window'] == search_window - assert point_source._kwargs_lens_eqn_solver['x_center'] == x_center - assert point_source._kwargs_lens_eqn_solver['x_center'] == y_center - - point_source = PointSource(point_source_type_list=['LENSED_POSITION'], - lensModel=None, kwargs_lens_eqn_solver={}) - - point_source.update_search_window(search_window, x_center, y_center, min_distance=min_distance, - only_from_unspecified=True) - assert point_source._kwargs_lens_eqn_solver['search_window'] == search_window - assert point_source._kwargs_lens_eqn_solver['x_center'] == x_center - assert point_source._kwargs_lens_eqn_solver['x_center'] == y_center - - kwargs_lens_eqn_solver = {'search_window': search_window, - 'min_distance': min_distance, 'x_center': x_center, 'y_center': y_center} - point_source = PointSource(point_source_type_list=['LENSED_POSITION'], - lensModel=None, kwargs_lens_eqn_solver=kwargs_lens_eqn_solver) - point_source.update_search_window(search_window=-10, x_center=-10, y_center=-10, - min_distance=10, only_from_unspecified = True) - assert point_source._kwargs_lens_eqn_solver['search_window'] == search_window - assert point_source._kwargs_lens_eqn_solver['x_center'] == x_center - assert point_source._kwargs_lens_eqn_solver['x_center'] == y_center + point_source = PointSource( + point_source_type_list=["LENSED_POSITION"], + lensModel=None, + kwargs_lens_eqn_solver={}, + ) + + point_source.update_search_window( + search_window, + x_center, + y_center, + min_distance=min_distance, + only_from_unspecified=False, + ) + assert point_source._kwargs_lens_eqn_solver["search_window"] == search_window + assert point_source._kwargs_lens_eqn_solver["x_center"] == x_center + assert point_source._kwargs_lens_eqn_solver["x_center"] == y_center + + point_source = PointSource( + point_source_type_list=["LENSED_POSITION"], + lensModel=None, + kwargs_lens_eqn_solver={}, + ) + + point_source.update_search_window( + search_window, + x_center, + y_center, + min_distance=min_distance, + only_from_unspecified=True, + ) + assert point_source._kwargs_lens_eqn_solver["search_window"] == search_window + assert point_source._kwargs_lens_eqn_solver["x_center"] == x_center + assert point_source._kwargs_lens_eqn_solver["x_center"] == y_center + + kwargs_lens_eqn_solver = { + "search_window": search_window, + "min_distance": min_distance, + "x_center": x_center, + "y_center": y_center, + } + point_source = PointSource( + point_source_type_list=["LENSED_POSITION"], + lensModel=None, + kwargs_lens_eqn_solver=kwargs_lens_eqn_solver, + ) + point_source.update_search_window( + search_window=-10, + x_center=-10, + y_center=-10, + min_distance=10, + only_from_unspecified=True, + ) + assert point_source._kwargs_lens_eqn_solver["search_window"] == search_window + assert point_source._kwargs_lens_eqn_solver["x_center"] == x_center + assert point_source._kwargs_lens_eqn_solver["x_center"] == y_center def test__sort_position_by_original(self): from lenstronomy.PointSource.point_source import _sort_position_by_original + x_o, y_o = np.array([1, 2]), np.array([0, 0]) x_solved, y_solved = np.array([2]), np.array([0]) x_new, y_new = _sort_position_by_original(x_o, y_o, x_solved, y_solved) @@ -153,29 +225,55 @@ def test__sort_position_by_original(self): class TestPointSourceFixedMag(object): - def setup_method(self): - lensModel = LensModel(lens_model_list=['SPEP']) + lensModel = LensModel(lens_model_list=["SPEP"]) solver = LensEquationSolver(lensModel=lensModel) e1, e2 = param_util.phi_q2_ellipticity(0, 0.7) - self.kwargs_lens = [{'theta_E': 1., 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2, 'gamma': 2}] + self.kwargs_lens = [ + { + "theta_E": 1.0, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + "gamma": 2, + } + ] self.sourcePos_x, self.sourcePos_y = 0.01, -0.01 - self.x_pos, self.y_pos = solver.image_position_from_source(sourcePos_x=self.sourcePos_x, - sourcePos_y=self.sourcePos_y, kwargs_lens=self.kwargs_lens) - self.PointSource = PointSource(point_source_type_list=['LENSED_POSITION', 'UNLENSED', 'SOURCE_POSITION'], - lensModel=lensModel, fixed_magnification_list=[True]*4, additional_images_list=[False]*4) - self.kwargs_ps = [{'ra_image': self.x_pos, 'dec_image': self.y_pos, 'source_amp': 1}, - {'ra_image': [1.], 'dec_image': [1.], 'point_amp': [10]}, - {'ra_source': self.sourcePos_x, 'dec_source': self.sourcePos_y, 'source_amp': 1.}, {}] + self.x_pos, self.y_pos = solver.image_position_from_source( + sourcePos_x=self.sourcePos_x, + sourcePos_y=self.sourcePos_y, + kwargs_lens=self.kwargs_lens, + ) + self.PointSource = PointSource( + point_source_type_list=["LENSED_POSITION", "UNLENSED", "SOURCE_POSITION"], + lensModel=lensModel, + fixed_magnification_list=[True] * 4, + additional_images_list=[False] * 4, + ) + self.kwargs_ps = [ + {"ra_image": self.x_pos, "dec_image": self.y_pos, "source_amp": 1}, + {"ra_image": [1.0], "dec_image": [1.0], "point_amp": [10]}, + { + "ra_source": self.sourcePos_x, + "dec_source": self.sourcePos_y, + "source_amp": 1.0, + }, + {}, + ] def test_image_position(self): - x_image_list, y_image_list = self.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens) + x_image_list, y_image_list = self.PointSource.image_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens + ) npt.assert_almost_equal(x_image_list[0][0], self.x_pos[0], decimal=8) npt.assert_almost_equal(x_image_list[1], 1, decimal=8) npt.assert_almost_equal(x_image_list[2][0], self.x_pos[0], decimal=8) def test_source_position(self): - x_source_list, y_source_list = self.PointSource.source_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens) + x_source_list, y_source_list = self.PointSource.source_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens + ) npt.assert_almost_equal(x_source_list[0], self.sourcePos_x, decimal=8) npt.assert_almost_equal(x_source_list[1], 1, decimal=8) npt.assert_almost_equal(x_source_list[2], self.sourcePos_x, decimal=8) @@ -185,8 +283,9 @@ def test_num_basis(self): assert num_basis == 3 def test_linear_response_set(self): - ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, - with_amp=False) + ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set( + self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=False + ) num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens) assert n == num_basis assert ra_pos[0][0] == self.x_pos[0] @@ -194,8 +293,9 @@ def test_linear_response_set(self): assert np.all(amp != 1) npt.assert_almost_equal(ra_pos[2][0], self.x_pos[0], decimal=8) - ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set(self.kwargs_ps, kwargs_lens=self.kwargs_lens, - with_amp=True) + ra_pos, dec_pos, amp, n = self.PointSource.linear_response_set( + self.kwargs_ps, kwargs_lens=self.kwargs_lens, with_amp=True + ) num_basis = self.PointSource.num_basis(self.kwargs_ps, self.kwargs_lens) assert n == num_basis assert ra_pos[0][0] == self.x_pos[0] @@ -204,49 +304,64 @@ def test_linear_response_set(self): npt.assert_almost_equal(ra_pos[2][0], self.x_pos[0], decimal=8) def test_point_source_list(self): - ra_list, dec_list, amp_list = self.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens) + ra_list, dec_list, amp_list = self.PointSource.point_source_list( + self.kwargs_ps, self.kwargs_lens + ) assert ra_list[0] == self.x_pos[0] assert len(ra_list) == 9 def test_check_image_positions(self): - bool = self.PointSource.check_image_positions(self.kwargs_ps, self.kwargs_lens, tolerance=0.001) + bool = self.PointSource.check_image_positions( + self.kwargs_ps, self.kwargs_lens, tolerance=0.001 + ) assert bool is True # now we change the lens model to make the test fail - kwargs_lens = [{'theta_E': 2., 'center_x': 0, 'center_y': 0, 'e1': 0, 'e2': 0, 'gamma': 2}] - bool = self.PointSource.check_image_positions(self.kwargs_ps, kwargs_lens, tolerance=0.001) + kwargs_lens = [ + {"theta_E": 2.0, "center_x": 0, "center_y": 0, "e1": 0, "e2": 0, "gamma": 2} + ] + bool = self.PointSource.check_image_positions( + self.kwargs_ps, kwargs_lens, tolerance=0.001 + ) assert bool is False def test_set_amplitudes(self): amp_list = [10, [100], 10] kwargs_out = self.PointSource.set_amplitudes(amp_list, self.kwargs_ps) - assert kwargs_out[0]['source_amp'] == 10 * self.kwargs_ps[0]['source_amp'] - assert kwargs_out[1]['point_amp'][0] == 10 * self.kwargs_ps[1]['point_amp'][0] - assert kwargs_out[2]['source_amp'] == 10 * self.kwargs_ps[2]['source_amp'] + assert kwargs_out[0]["source_amp"] == 10 * self.kwargs_ps[0]["source_amp"] + assert kwargs_out[1]["point_amp"][0] == 10 * self.kwargs_ps[1]["point_amp"][0] + assert kwargs_out[2]["source_amp"] == 10 * self.kwargs_ps[2]["source_amp"] def test_positive_flux(self): - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': np.array([1, -1])}]) + bool = PointSource.check_positive_flux( + kwargs_ps=[{"point_amp": np.array([1, -1])}] + ) assert bool is False - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': -1}]) + bool = PointSource.check_positive_flux(kwargs_ps=[{"point_amp": -1}]) assert bool is False - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': np.array([0, 1])}]) + bool = PointSource.check_positive_flux( + kwargs_ps=[{"point_amp": np.array([0, 1])}] + ) assert bool is True - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': 1}]) + bool = PointSource.check_positive_flux(kwargs_ps=[{"point_amp": 1}]) assert bool is True - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': np.array([0, 1]), 'source_amp': 1}]) + bool = PointSource.check_positive_flux( + kwargs_ps=[{"point_amp": np.array([0, 1]), "source_amp": 1}] + ) assert bool is True - bool = PointSource.check_positive_flux(kwargs_ps=[{'point_amp': 1, 'source_amp': -1}]) + bool = PointSource.check_positive_flux( + kwargs_ps=[{"point_amp": 1, "source_amp": -1}] + ) assert bool is False class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - PointSource(point_source_type_list=['BAD']) + PointSource(point_source_type_list=["BAD"]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_point_source_cached.py b/test/test_PointSource/test_point_source_cached.py index ef563808a..2a5221c33 100644 --- a/test/test_PointSource/test_point_source_cached.py +++ b/test/test_PointSource/test_point_source_cached.py @@ -5,39 +5,50 @@ class TestPointSourceCached(object): - def setup_method(self): self.ps_cached = PointSourceCached(Unlensed(), save_cache=True) self.ps = Unlensed() - self.kwargs_ps = {'ra_image': [1], 'dec_image': [0], 'point_amp': [1]} - self.kwargs_ps_dummy = {'ra_image': [-1], 'dec_image': [10], 'point_amp': [-1]} + self.kwargs_ps = {"ra_image": [1], "dec_image": [0], "point_amp": [1]} + self.kwargs_ps_dummy = {"ra_image": [-1], "dec_image": [10], "point_amp": [-1]} def test_image_position(self): x_img, y_img = self.ps.image_position(kwargs_ps=self.kwargs_ps) - x_img_cached, y_img_cached = self.ps_cached.image_position(kwargs_ps=self.kwargs_ps) + x_img_cached, y_img_cached = self.ps_cached.image_position( + kwargs_ps=self.kwargs_ps + ) npt.assert_almost_equal(x_img_cached, x_img) npt.assert_almost_equal(y_img_cached, y_img) - x_img_cached, y_img_cached = self.ps_cached.image_position(kwargs_ps=self.kwargs_ps_dummy) + x_img_cached, y_img_cached = self.ps_cached.image_position( + kwargs_ps=self.kwargs_ps_dummy + ) npt.assert_almost_equal(x_img_cached, x_img) npt.assert_almost_equal(y_img_cached, y_img) self.ps_cached.delete_lens_model_cache() - x_img_cached, y_img_cached = self.ps_cached.image_position(kwargs_ps=self.kwargs_ps_dummy) + x_img_cached, y_img_cached = self.ps_cached.image_position( + kwargs_ps=self.kwargs_ps_dummy + ) assert x_img_cached[0] != x_img[0] def test_source_position(self): x_img, y_img = self.ps.source_position(kwargs_ps=self.kwargs_ps) - x_img_cached, y_img_cached = self.ps_cached.source_position(kwargs_ps=self.kwargs_ps) + x_img_cached, y_img_cached = self.ps_cached.source_position( + kwargs_ps=self.kwargs_ps + ) npt.assert_almost_equal(x_img_cached, x_img) npt.assert_almost_equal(y_img_cached, y_img) - x_img_cached, y_img_cached = self.ps_cached.source_position(kwargs_ps=self.kwargs_ps_dummy) + x_img_cached, y_img_cached = self.ps_cached.source_position( + kwargs_ps=self.kwargs_ps_dummy + ) npt.assert_almost_equal(x_img_cached, x_img) npt.assert_almost_equal(y_img_cached, y_img) self.ps_cached.delete_lens_model_cache() - x_img_cached, y_img_cached = self.ps_cached.source_position(kwargs_ps=self.kwargs_ps_dummy) + x_img_cached, y_img_cached = self.ps_cached.source_position( + kwargs_ps=self.kwargs_ps_dummy + ) assert x_img_cached[0] != x_img[0] def test_image_amplitude(self): @@ -67,5 +78,5 @@ def test_source_amplitude(self): assert amp_cached[0] != amp[0] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_PointSource/test_point_source_param.py b/test/test_PointSource/test_point_source_param.py index 7b590945a..fa6d841e9 100644 --- a/test/test_PointSource/test_point_source_param.py +++ b/test/test_PointSource/test_point_source_param.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import unittest @@ -8,23 +8,34 @@ class TestParam(object): - def setup_method(self): kwargs_fixed = [{}, {}, {}] num_point_sources_list = [4, 1, 1] fixed_magnification_list = [True, False, False] - point_source_model_list = ['LENSED_POSITION', 'SOURCE_POSITION', 'UNLENSED'] - self.param = PointSourceParam(model_list=point_source_model_list, kwargs_fixed=kwargs_fixed, - num_point_source_list=num_point_sources_list, - fixed_magnification_list=fixed_magnification_list) - self.kwargs =[{'ra_image': np.array([0, 0, 0, 0]), 'dec_image': np.array([0, 0, 0, 0]), - 'source_amp': 1}, - {'ra_source': 1, 'dec_source': 1, 'point_amp': np.array([1.])}, - {'ra_image': [1], 'dec_image': [1], 'point_amp': np.array([1.])}] - - self.param_linear = PointSourceParam(model_list=point_source_model_list, kwargs_fixed=[{}, {}, {}], - num_point_source_list=num_point_sources_list, linear_solver=False, - fixed_magnification_list=fixed_magnification_list) + point_source_model_list = ["LENSED_POSITION", "SOURCE_POSITION", "UNLENSED"] + self.param = PointSourceParam( + model_list=point_source_model_list, + kwargs_fixed=kwargs_fixed, + num_point_source_list=num_point_sources_list, + fixed_magnification_list=fixed_magnification_list, + ) + self.kwargs = [ + { + "ra_image": np.array([0, 0, 0, 0]), + "dec_image": np.array([0, 0, 0, 0]), + "source_amp": 1, + }, + {"ra_source": 1, "dec_source": 1, "point_amp": np.array([1.0])}, + {"ra_image": [1], "dec_image": [1], "point_amp": np.array([1.0])}, + ] + + self.param_linear = PointSourceParam( + model_list=point_source_model_list, + kwargs_fixed=[{}, {}, {}], + num_point_source_list=num_point_sources_list, + linear_solver=False, + fixed_magnification_list=fixed_magnification_list, + ) def test_get_setParams(self): args = self.param.set_params(self.kwargs) @@ -54,18 +65,29 @@ def test_num_param_linear(self): assert num == 0 def test_init(self): - ps_param = PointSourceParam(model_list=['UNLENSED'], kwargs_fixed=[{}], num_point_source_list=None) + ps_param = PointSourceParam( + model_list=["UNLENSED"], kwargs_fixed=[{}], num_point_source_list=None + ) assert ps_param._num_point_sources_list[0] == 1 class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - PointSourceParam(model_list=['BAD'], kwargs_fixed=[{}], kwargs_lower=None, kwargs_upper=[{'bla': 1}]) + PointSourceParam( + model_list=["BAD"], + kwargs_fixed=[{}], + kwargs_lower=None, + kwargs_upper=[{"bla": 1}], + ) with self.assertRaises(ValueError): - PointSourceParam(model_list=['BAD'], kwargs_fixed=[{}], kwargs_lower=[{'bla': 1}], kwargs_upper=None) + PointSourceParam( + model_list=["BAD"], + kwargs_fixed=[{}], + kwargs_lower=[{"bla": 1}], + kwargs_upper=None, + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Likelihoods/test_flux_ratio_likelihood.py b/test/test_Sampling/test_Likelihoods/test_flux_ratio_likelihood.py index 262040ed3..90f86663e 100644 --- a/test/test_Sampling/test_Likelihoods/test_flux_ratio_likelihood.py +++ b/test/test_Sampling/test_Likelihoods/test_flux_ratio_likelihood.py @@ -8,82 +8,127 @@ class TestFluxRatioLikelihood(object): - def setup_method(self): - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] lensModel = LensModel(lens_model_list=lens_model_list) lensModelExtensions = LensModelExtensions(lensModel=lensModel) lensEquationSolver = LensEquationSolver(lensModel=lensModel) x_source, y_source = 0.02, 0.01 - kwargs_lens = [{'theta_E': 1., 'e1': 0.1, 'e2': 0.1, 'gamma': 2., 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.06, 'gamma2': -0.03}] - - x_img, y_img = lensEquationSolver.image_position_from_source(kwargs_lens=kwargs_lens, sourcePos_x=x_source, - sourcePos_y=y_source) - print('image positions are: ', x_img, y_img) + kwargs_lens = [ + { + "theta_E": 1.0, + "e1": 0.1, + "e2": 0.1, + "gamma": 2.0, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.06, "gamma2": -0.03}, + ] + + x_img, y_img = lensEquationSolver.image_position_from_source( + kwargs_lens=kwargs_lens, sourcePos_x=x_source, sourcePos_y=y_source + ) + print("image positions are: ", x_img, y_img) mag_inf = lensModel.magnification(x_img, y_img, kwargs_lens) - print('point source magnification: ', mag_inf) + print("point source magnification: ", mag_inf) source_size_arcsec = 0.001 window_size = 0.1 grid_number = 100 - print('source size in arcsec: ', source_size_arcsec) - mag_finite = lensModelExtensions.magnification_finite(x_pos=x_img, y_pos=y_img, kwargs_lens=kwargs_lens, - source_sigma=source_size_arcsec, window_size=window_size, - grid_number=grid_number) + print("source size in arcsec: ", source_size_arcsec) + mag_finite = lensModelExtensions.magnification_finite( + x_pos=x_img, + y_pos=y_img, + kwargs_lens=kwargs_lens, + source_sigma=source_size_arcsec, + window_size=window_size, + grid_number=grid_number, + ) flux_ratios = mag_finite[1:] / mag_finite[0] flux_ratio_errors = [0.1, 0.1, 0.1] - flux_ratio_cov = np.diag([0.1, 0.1, 0.1])**2 - self.flux_likelihood = FluxRatioLikelihood(lens_model_class=lensModel, flux_ratios=flux_ratios, flux_ratio_errors=flux_ratio_errors, - source_type='GAUSSIAN', window_size=window_size, grid_number=grid_number) - - self.flux_likelihood_inf = FluxRatioLikelihood(lens_model_class=lensModel, flux_ratios=flux_ratios, - flux_ratio_errors=flux_ratio_errors, - source_type='INF', window_size=window_size, - grid_number=grid_number) - self.flux_likelihood_inf_cov = FluxRatioLikelihood(lens_model_class=lensModel, flux_ratios=flux_ratios, - flux_ratio_errors=flux_ratio_cov, - source_type='INF', window_size=window_size, - grid_number=grid_number) - self.kwargs_cosmo = {'source_size': source_size_arcsec} + flux_ratio_cov = np.diag([0.1, 0.1, 0.1]) ** 2 + self.flux_likelihood = FluxRatioLikelihood( + lens_model_class=lensModel, + flux_ratios=flux_ratios, + flux_ratio_errors=flux_ratio_errors, + source_type="GAUSSIAN", + window_size=window_size, + grid_number=grid_number, + ) + + self.flux_likelihood_inf = FluxRatioLikelihood( + lens_model_class=lensModel, + flux_ratios=flux_ratios, + flux_ratio_errors=flux_ratio_errors, + source_type="INF", + window_size=window_size, + grid_number=grid_number, + ) + self.flux_likelihood_inf_cov = FluxRatioLikelihood( + lens_model_class=lensModel, + flux_ratios=flux_ratios, + flux_ratio_errors=flux_ratio_cov, + source_type="INF", + window_size=window_size, + grid_number=grid_number, + ) + self.kwargs_cosmo = {"source_size": source_size_arcsec} self.x_img, self.y_img = x_img, y_img self.kwargs_lens = kwargs_lens def test_logL(self): - logL = self.flux_likelihood.logL(self.x_img, self.y_img, self.kwargs_lens, kwargs_special=self.kwargs_cosmo) + logL = self.flux_likelihood.logL( + self.x_img, self.y_img, self.kwargs_lens, kwargs_special=self.kwargs_cosmo + ) assert logL == 0 - logL_inf = self.flux_likelihood_inf.logL(self.x_img, self.y_img, self.kwargs_lens, {}) - npt.assert_almost_equal(logL_inf, 0 , decimal=4) + logL_inf = self.flux_likelihood_inf.logL( + self.x_img, self.y_img, self.kwargs_lens, {} + ) + npt.assert_almost_equal(logL_inf, 0, decimal=4) def test__logL(self): lensModel = LensModel(lens_model_list=[]) - flux_ratios_init = np.array([1., 1., 1.]) - flux_ratio_errors = np.array([1., 1., 1.]) - flux_likelihood = FluxRatioLikelihood(lens_model_class=lensModel, flux_ratios=flux_ratios_init, - flux_ratio_errors=flux_ratio_errors) + flux_ratios_init = np.array([1.0, 1.0, 1.0]) + flux_ratio_errors = np.array([1.0, 1.0, 1.0]) + flux_likelihood = FluxRatioLikelihood( + lens_model_class=lensModel, + flux_ratios=flux_ratios_init, + flux_ratio_errors=flux_ratio_errors, + ) flux_ratios = np.array([0, 1, np.nan]) logL = flux_likelihood._logL(flux_ratios) - assert logL == -10 ** 15 - - flux_likelihood = FluxRatioLikelihood(lens_model_class=lensModel, flux_ratios=flux_ratios_init, - flux_ratio_errors=np.array([0., 1., 1.])) - flux_ratios = np.array([1., 1., 1.]) + assert logL == -(10**15) + + flux_likelihood = FluxRatioLikelihood( + lens_model_class=lensModel, + flux_ratios=flux_ratios_init, + flux_ratio_errors=np.array([0.0, 1.0, 1.0]), + ) + flux_ratios = np.array([1.0, 1.0, 1.0]) logL = flux_likelihood._logL(flux_ratios) - assert logL == -10 ** 15 + assert logL == -(10**15) def test_numimgs(self): # Test with a different number of images - logL = self.flux_likelihood.logL(self.x_img[:-1], self.y_img[:-1], self.kwargs_lens, kwargs_special=self.kwargs_cosmo) - assert logL == -10**15 + logL = self.flux_likelihood.logL( + self.x_img[:-1], + self.y_img[:-1], + self.kwargs_lens, + kwargs_special=self.kwargs_cosmo, + ) + assert logL == -(10**15) def test_covmatrix(self): # Test with a different number of images - logL = self.flux_likelihood_inf_cov.logL(self.x_img, self.y_img, self.kwargs_lens, kwargs_special=self.kwargs_cosmo) + logL = self.flux_likelihood_inf_cov.logL( + self.x_img, self.y_img, self.kwargs_lens, kwargs_special=self.kwargs_cosmo + ) npt.assert_almost_equal(logL, 0, decimal=8) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Likelihoods/test_image_likelihood.py b/test/test_Sampling/test_Likelihoods/test_image_likelihood.py index 8285e122e..c822cfb9a 100644 --- a/test/test_Sampling/test_Likelihoods/test_image_likelihood.py +++ b/test/test_Sampling/test_Likelihoods/test_image_likelihood.py @@ -4,7 +4,6 @@ class TestImageLikelihood(object): - def setup_method(self): pass @@ -12,5 +11,5 @@ def test_create_im_sim(self): pass -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Likelihoods/test_position_likelihood.py b/test/test_Sampling/test_Likelihoods/test_position_likelihood.py index 727c427de..c053c3b99 100644 --- a/test/test_Sampling/test_Likelihoods/test_position_likelihood.py +++ b/test/test_Sampling/test_Likelihoods/test_position_likelihood.py @@ -8,53 +8,94 @@ class TestPositionLikelihood(object): - def setup_method(self): - # compute image positions - lensModel = LensModel(lens_model_list=['SIE']) + lensModel = LensModel(lens_model_list=["SIE"]) solver = LensEquationSolver(lensModel=lensModel) - self._kwargs_lens = [{'theta_E': 1, 'e1': 0.1, 'e2': -0.03, 'center_x': 0, 'center_y': 0}] - self.kwargs_lens_eqn_solver = {'min_distance': 0.1, 'search_window': 10} - x_pos, y_pos = solver.image_position_from_source(sourcePos_x=0.01, sourcePos_y=-0.01, kwargs_lens=self._kwargs_lens, **self.kwargs_lens_eqn_solver) - - point_source_class = PointSource(point_source_type_list=['LENSED_POSITION'], lensModel=lensModel, - kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver) - self.likelihood = PositionLikelihood(point_source_class, image_position_uncertainty=0.005, astrometric_likelihood=True, - image_position_likelihood=True, ra_image_list=[x_pos], dec_image_list=[y_pos], - source_position_likelihood=True, check_matched_source_position=False, source_position_tolerance=0.001, force_no_add_image=False, - restrict_image_number=False, max_num_images=None) - - self.likelihood_all = PositionLikelihood(point_source_class, image_position_uncertainty=0.005, - astrometric_likelihood=True, - image_position_likelihood=True, ra_image_list=[x_pos], - dec_image_list=[y_pos], - source_position_likelihood=True, check_matched_source_position=True, - source_position_tolerance=0.001, force_no_add_image=True, - restrict_image_number=True, max_num_images=5) + self._kwargs_lens = [ + {"theta_E": 1, "e1": 0.1, "e2": -0.03, "center_x": 0, "center_y": 0} + ] + self.kwargs_lens_eqn_solver = {"min_distance": 0.1, "search_window": 10} + x_pos, y_pos = solver.image_position_from_source( + sourcePos_x=0.01, + sourcePos_y=-0.01, + kwargs_lens=self._kwargs_lens, + **self.kwargs_lens_eqn_solver + ) + + point_source_class = PointSource( + point_source_type_list=["LENSED_POSITION"], + lensModel=lensModel, + kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver, + ) + self.likelihood = PositionLikelihood( + point_source_class, + image_position_uncertainty=0.005, + astrometric_likelihood=True, + image_position_likelihood=True, + ra_image_list=[x_pos], + dec_image_list=[y_pos], + source_position_likelihood=True, + check_matched_source_position=False, + source_position_tolerance=0.001, + force_no_add_image=False, + restrict_image_number=False, + max_num_images=None, + ) + + self.likelihood_all = PositionLikelihood( + point_source_class, + image_position_uncertainty=0.005, + astrometric_likelihood=True, + image_position_likelihood=True, + ra_image_list=[x_pos], + dec_image_list=[y_pos], + source_position_likelihood=True, + check_matched_source_position=True, + source_position_tolerance=0.001, + force_no_add_image=True, + restrict_image_number=True, + max_num_images=5, + ) self._x_pos, self._y_pos = x_pos, y_pos def test_image_position_likelihood(self): - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] - logL = self.likelihood.image_position_likelihood(kwargs_ps, self._kwargs_lens, sigma=0.01) + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] + logL = self.likelihood.image_position_likelihood( + kwargs_ps, self._kwargs_lens, sigma=0.01 + ) npt.assert_almost_equal(logL, 0, decimal=8) - kwargs_ps = [{'ra_image': self._x_pos + 0.01, 'dec_image': self._y_pos}] - logL = self.likelihood.image_position_likelihood(kwargs_ps, self._kwargs_lens, sigma=0.01) + kwargs_ps = [{"ra_image": self._x_pos + 0.01, "dec_image": self._y_pos}] + logL = self.likelihood.image_position_likelihood( + kwargs_ps, self._kwargs_lens, sigma=0.01 + ) npt.assert_almost_equal(logL, -2, decimal=8) - self.likelihood_all.image_position_likelihood(kwargs_ps, self._kwargs_lens, sigma=0.01) + self.likelihood_all.image_position_likelihood( + kwargs_ps, self._kwargs_lens, sigma=0.01 + ) npt.assert_almost_equal(logL, -2, decimal=8) def test_astrometric_likelihood(self): - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] - kwargs_special = {'delta_x_image': [0, 0, 0, 0.], 'delta_y_image': [0, 0, 0, 0.]} - logL = self.likelihood.astrometric_likelihood(kwargs_ps, kwargs_special, sigma=0.01) + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] + kwargs_special = { + "delta_x_image": [0, 0, 0, 0.0], + "delta_y_image": [0, 0, 0, 0.0], + } + logL = self.likelihood.astrometric_likelihood( + kwargs_ps, kwargs_special, sigma=0.01 + ) npt.assert_almost_equal(logL, 0, decimal=8) - kwargs_special = {'delta_x_image': [0, 0, 0, 0.01], 'delta_y_image': [0, 0, 0, 0.01]} - logL = self.likelihood.astrometric_likelihood(kwargs_ps, kwargs_special, sigma=0.01) + kwargs_special = { + "delta_x_image": [0, 0, 0, 0.01], + "delta_y_image": [0, 0, 0, 0.01], + } + logL = self.likelihood.astrometric_likelihood( + kwargs_ps, kwargs_special, sigma=0.01 + ) npt.assert_almost_equal(logL, -1, decimal=8) logL = self.likelihood.astrometric_likelihood([], kwargs_special, sigma=0.01) @@ -64,44 +105,68 @@ def test_astrometric_likelihood(self): npt.assert_almost_equal(logL, 0, decimal=8) def test_check_additional_images(self): - point_source_class = PointSource(point_source_type_list=['LENSED_POSITION'], additional_images_list=[True], - lensModel=LensModel(lens_model_list=['SIE']), - kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver) + point_source_class = PointSource( + point_source_type_list=["LENSED_POSITION"], + additional_images_list=[True], + lensModel=LensModel(lens_model_list=["SIE"]), + kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver, + ) likelihood = PositionLikelihood(point_source_class) - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] bool = likelihood.check_additional_images(kwargs_ps, self._kwargs_lens) assert bool is False - kwargs_ps = [{'ra_image': self._x_pos[1:], 'dec_image': self._y_pos[1:]}] + kwargs_ps = [{"ra_image": self._x_pos[1:], "dec_image": self._y_pos[1:]}] bool = likelihood.check_additional_images(kwargs_ps, self._kwargs_lens) assert bool is True def test_solver_penalty(self): - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] - logL = self.likelihood.source_position_likelihood(self._kwargs_lens, kwargs_ps, hard_bound_rms=0.0001, sigma=0.001, verbose=False) + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] + logL = self.likelihood.source_position_likelihood( + self._kwargs_lens, + kwargs_ps, + hard_bound_rms=0.0001, + sigma=0.001, + verbose=False, + ) npt.assert_almost_equal(logL, 0, decimal=9) - kwargs_ps = [{'ra_image': self._x_pos + 0.01, 'dec_image': self._y_pos}] - logL = self.likelihood.source_position_likelihood(self._kwargs_lens, kwargs_ps, hard_bound_rms=0.001, sigma=0.0001, verbose=False) + kwargs_ps = [{"ra_image": self._x_pos + 0.01, "dec_image": self._y_pos}] + logL = self.likelihood.source_position_likelihood( + self._kwargs_lens, + kwargs_ps, + hard_bound_rms=0.001, + sigma=0.0001, + verbose=False, + ) npt.assert_almost_equal(logL, -126467.04331894651, decimal=0) - #assert logL == -np.inf + # assert logL == -np.inf def test_logL(self): - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] - kwargs_special = {'delta_x_image': [0, 0, 0, 0.], 'delta_y_image': [0, 0, 0, 0.]} - logL = self.likelihood.logL(self._kwargs_lens, kwargs_ps, kwargs_special, verbose=True) + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] + kwargs_special = { + "delta_x_image": [0, 0, 0, 0.0], + "delta_y_image": [0, 0, 0, 0.0], + } + logL = self.likelihood.logL( + self._kwargs_lens, kwargs_ps, kwargs_special, verbose=True + ) npt.assert_almost_equal(logL, 0, decimal=9) def test_source_position_likelihood(self): - kwargs_ps = [{'ra_image': self._x_pos, 'dec_image': self._y_pos}] - logL = self.likelihood.source_position_likelihood(self._kwargs_lens, kwargs_ps, sigma=0.01) + kwargs_ps = [{"ra_image": self._x_pos, "dec_image": self._y_pos}] + logL = self.likelihood.source_position_likelihood( + self._kwargs_lens, kwargs_ps, sigma=0.01 + ) npt.assert_almost_equal(logL, 0, decimal=9) x_pos = copy.deepcopy(self._x_pos) x_pos[0] += 0.01 - kwargs_ps = [{'ra_image': x_pos, 'dec_image': self._y_pos}] - logL = self.likelihood.source_position_likelihood(self._kwargs_lens, kwargs_ps, sigma=0.01) + kwargs_ps = [{"ra_image": x_pos, "dec_image": self._y_pos}] + logL = self.likelihood.source_position_likelihood( + self._kwargs_lens, kwargs_ps, sigma=0.01 + ) npt.assert_almost_equal(logL, -0.33011713058631054, decimal=4) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Likelihoods/test_prior_likelihood.py b/test/test_Sampling/test_Likelihoods/test_prior_likelihood.py index 2df7c7ae8..90857f04d 100644 --- a/test/test_Sampling/test_Likelihoods/test_prior_likelihood.py +++ b/test/test_Sampling/test_Likelihoods/test_prior_likelihood.py @@ -5,70 +5,109 @@ class TestImageLikelihood(object): - def setup_method(self): - self.prior = PriorLikelihood(prior_lens=[[0, 'gamma', 2, 0.1]], prior_source=None, prior_lens_light=None, - prior_ps=None, - prior_special=[['source_size', 1, 0.1]]) - self.prior_lognormal = PriorLikelihood(prior_lens_lognormal=[[0, 'gamma', np.log(2.), 0.1]], - prior_source_lognormal=None, prior_lens_light_lognormal=None, - prior_ps_lognormal=None, - prior_special_lognormal=[['source_size', 0., 0.1]]) + self.prior = PriorLikelihood( + prior_lens=[[0, "gamma", 2, 0.1]], + prior_source=None, + prior_lens_light=None, + prior_ps=None, + prior_special=[["source_size", 1, 0.1]], + ) + self.prior_lognormal = PriorLikelihood( + prior_lens_lognormal=[[0, "gamma", np.log(2.0), 0.1]], + prior_source_lognormal=None, + prior_lens_light_lognormal=None, + prior_ps_lognormal=None, + prior_special_lognormal=[["source_size", 0.0, 0.1]], + ) def test_logL(self): - kwargs_lens = [{'gamma': 2.}] - kwargs_cosmo = {'source_size': 1.} - logL = self.prior.logL(kwargs_lens=kwargs_lens, kwargs_source=[], kwargs_lens_light=[], kwargs_ps=[], - kwargs_special=kwargs_cosmo) + kwargs_lens = [{"gamma": 2.0}] + kwargs_cosmo = {"source_size": 1.0} + logL = self.prior.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + kwargs_special=kwargs_cosmo, + ) assert logL == 0 - kwargs_lens = [{'gamma': 2.1}] - kwargs_cosmo = {'source_size': 1.1} - logL = self.prior.logL(kwargs_lens=kwargs_lens, kwargs_source=[], kwargs_lens_light=[], kwargs_ps=[], - kwargs_special=kwargs_cosmo) + kwargs_lens = [{"gamma": 2.1}] + kwargs_cosmo = {"source_size": 1.1} + logL = self.prior.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + kwargs_special=kwargs_cosmo, + ) npt.assert_almost_equal(logL, -1, decimal=8) def test_logL_lognormal(self): - kwargs_lens = [{'gamma': 2}] - kwargs_cosmo = {'source_size': 1} - logL = self.prior_lognormal.logL(kwargs_lens=kwargs_lens, kwargs_source=[], - kwargs_lens_light=[], kwargs_ps=[], - kwargs_special=kwargs_cosmo) - assert logL == -3. + kwargs_lens = [{"gamma": 2}] + kwargs_cosmo = {"source_size": 1} + logL = self.prior_lognormal.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + kwargs_special=kwargs_cosmo, + ) + assert logL == -3.0 - kwargs_lens = [{'gamma': 2.1}] - kwargs_cosmo = {'source_size': 1.1} - logL = self.prior_lognormal.logL(kwargs_lens=kwargs_lens, kwargs_source=[], - kwargs_lens_light=[], kwargs_ps=[], - kwargs_special=kwargs_cosmo) + kwargs_lens = [{"gamma": 2.1}] + kwargs_cosmo = {"source_size": 1.1} + logL = self.prior_lognormal.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + kwargs_special=kwargs_cosmo, + ) npt.assert_almost_equal(logL, -3.7732255247006443, decimal=8) def gauss(self, x, mean, simga): - return np.exp(-((x-mean)/(simga))**2/2) / np.sqrt(2*np.pi) / simga + return np.exp(-(((x - mean) / (simga)) ** 2) / 2) / np.sqrt(2 * np.pi) / simga def test_kde_prior(self): - x_array = np.linspace(1., 3., 200) - sigma = .2 + x_array = np.linspace(1.0, 3.0, 200) + sigma = 0.2 mean = 2 - #pdf_array = self.gauss(x_array, mean=mean, simga=sigma) + # pdf_array = self.gauss(x_array, mean=mean, simga=sigma) sample = np.random.normal(loc=mean, scale=sigma, size=50000) - #approx = Approx(x_array, pdf_array) - #sample = approx.draw(n=50000) - prior = PriorLikelihood(prior_lens_kde=[[0, 'gamma', sample]]) + # approx = Approx(x_array, pdf_array) + # sample = approx.draw(n=50000) + prior = PriorLikelihood(prior_lens_kde=[[0, "gamma", sample]]) - kwargs_lens = [{'gamma': 2}] - logL = prior.logL(kwargs_lens=kwargs_lens, kwargs_source=[], kwargs_lens_light=[], kwargs_ps=[]) + kwargs_lens = [{"gamma": 2}] + logL = prior.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + ) - kwargs_lens = [{'gamma': 2.2}] - logL_sigma = prior.logL(kwargs_lens=kwargs_lens, kwargs_source=[], kwargs_lens_light=[], kwargs_ps=[]) + kwargs_lens = [{"gamma": 2.2}] + logL_sigma = prior.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + ) delta_log = logL - logL_sigma npt.assert_almost_equal(delta_log, 0.5, decimal=1) - kwargs_lens = [{'gamma': 2.4}] - logL_sigma = prior.logL(kwargs_lens=kwargs_lens, kwargs_source=[], kwargs_lens_light=[], kwargs_ps=[]) + kwargs_lens = [{"gamma": 2.4}] + logL_sigma = prior.logL( + kwargs_lens=kwargs_lens, + kwargs_source=[], + kwargs_lens_light=[], + kwargs_ps=[], + ) delta_log = logL - logL_sigma npt.assert_almost_equal(delta_log, 2, decimal=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Likelihoods/test_time_delay_likelihood.py b/test/test_Sampling/test_Likelihoods/test_time_delay_likelihood.py index b3047857c..e3183b356 100644 --- a/test/test_Sampling/test_Likelihoods/test_time_delay_likelihood.py +++ b/test/test_Sampling/test_Likelihoods/test_time_delay_likelihood.py @@ -11,23 +11,26 @@ class TestImageLikelihood(object): - def setup_method(self): pass def test_logL(self): - z_source = 1.5 z_lens = 0.5 - cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.) + cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.0) lensCosmo = LensCosmo(cosmo=cosmo, z_lens=z_lens, z_source=z_source) # make class instances for a chosen lens model type # chose a lens model - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] # make instance of LensModel class - lensModel = LensModel(lens_model_list=lens_model_list, cosmo=cosmo, z_lens=z_lens, z_source=z_source) + lensModel = LensModel( + lens_model_list=lens_model_list, + cosmo=cosmo, + z_lens=z_lens, + z_source=z_source, + ) # we require routines accessible in the LensModelExtensions class # make instance of LensEquationSolver to solve the lens equation lensEquationSolver = LensEquationSolver(lensModel=lensModel) @@ -37,50 +40,84 @@ def test_logL(self): # we chose a source position (in units angle) x_source, y_source = 0.02, 0.01 # we chose a lens model - kwargs_lens = [{'theta_E': 1., 'e1': 0.1, 'e2': 0.2, 'gamma': 2., 'center_x': 0, 'center_y': 0}, - {'gamma1': 0.05, 'gamma2': -0.01}] + kwargs_lens = [ + { + "theta_E": 1.0, + "e1": 0.1, + "e2": 0.2, + "gamma": 2.0, + "center_x": 0, + "center_y": 0, + }, + {"gamma1": 0.05, "gamma2": -0.01}, + ] # compute image positions and their (finite) magnifications # we solve for the image position(s) of the provided source position and lens model - x_img, y_img = lensEquationSolver.image_position_from_source(kwargs_lens=kwargs_lens, sourcePos_x=x_source, - sourcePos_y=y_source) + x_img, y_img = lensEquationSolver.image_position_from_source( + kwargs_lens=kwargs_lens, sourcePos_x=x_source, sourcePos_y=y_source + ) - point_source_list = ['LENSED_POSITION'] - kwargs_ps = [{'ra_image': x_img, 'dec_image': y_img}] + point_source_list = ["LENSED_POSITION"] + kwargs_ps = [{"ra_image": x_img, "dec_image": y_img}] pointSource = PointSource(point_source_type_list=point_source_list) t_days = lensModel.arrival_time(x_img, y_img, kwargs_lens) time_delays_measured = t_days[1:] - t_days[0] time_delays_uncertainties = np.array([0.1, 0.1, 0.1]) - self.td_likelihood = TimeDelayLikelihood(time_delays_measured, time_delays_uncertainties, lens_model_class=lensModel, point_source_class=pointSource) - kwargs_cosmo = {'D_dt': lensCosmo.ddt} - logL = self.td_likelihood.logL(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo) + self.td_likelihood = TimeDelayLikelihood( + time_delays_measured, + time_delays_uncertainties, + lens_model_class=lensModel, + point_source_class=pointSource, + ) + kwargs_cosmo = {"D_dt": lensCosmo.ddt} + logL = self.td_likelihood.logL( + kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo + ) npt.assert_almost_equal(logL, 0, decimal=8) time_delays_measured_new = copy.deepcopy(time_delays_measured) time_delays_measured_new[0] += 0.1 - td_likelihood = TimeDelayLikelihood(time_delays_measured_new, time_delays_uncertainties, - lens_model_class=lensModel, point_source_class=pointSource) - kwargs_cosmo = {'D_dt': lensCosmo.ddt} - logL = td_likelihood.logL(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo) + td_likelihood = TimeDelayLikelihood( + time_delays_measured_new, + time_delays_uncertainties, + lens_model_class=lensModel, + point_source_class=pointSource, + ) + kwargs_cosmo = {"D_dt": lensCosmo.ddt} + logL = td_likelihood.logL( + kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo + ) npt.assert_almost_equal(logL, -0.5, decimal=8) - # Test a covariance matrix being used - time_delays_cov = np.diag([0.1, 0.1, 0.1])**2 - td_likelihood = TimeDelayLikelihood(time_delays_measured_new, time_delays_cov, - lens_model_class=lensModel, point_source_class=pointSource) - logL = td_likelihood.logL(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo) + time_delays_cov = np.diag([0.1, 0.1, 0.1]) ** 2 + td_likelihood = TimeDelayLikelihood( + time_delays_measured_new, + time_delays_cov, + lens_model_class=lensModel, + point_source_class=pointSource, + ) + logL = td_likelihood.logL( + kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo + ) npt.assert_almost_equal(logL, -0.5, decimal=8) # Test behaviour with a wrong number of images time_delays_measured_new = time_delays_measured_new[:-1] - time_delays_uncertainties = time_delays_uncertainties[:-1] # remove last image - td_likelihood = TimeDelayLikelihood(time_delays_measured_new, time_delays_uncertainties, - lens_model_class=lensModel, point_source_class=pointSource) - logL = td_likelihood.logL(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo) - npt.assert_almost_equal(logL, -10**15, decimal=8) - - -if __name__ == '__main__': + time_delays_uncertainties = time_delays_uncertainties[:-1] # remove last image + td_likelihood = TimeDelayLikelihood( + time_delays_measured_new, + time_delays_uncertainties, + lens_model_class=lensModel, + point_source_class=pointSource, + ) + logL = td_likelihood.logL( + kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_cosmo=kwargs_cosmo + ) + npt.assert_almost_equal(logL, -(10**15), decimal=8) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Pool/test_pool.py b/test/test_Sampling/test_Pool/test_pool.py index 4c9b0341b..5375d8f7b 100644 --- a/test/test_Sampling/test_Pool/test_pool.py +++ b/test/test_Sampling/test_Pool/test_pool.py @@ -4,12 +4,12 @@ class TestPool(object): - def setup_method(self): pass def test_choose_pool(self): import schwimmbad + pool = choose_pool(mpi=False, processes=1, use_dill=True) assert pool.is_master() is True assert isinstance(pool, schwimmbad.serial.SerialPool) @@ -24,5 +24,5 @@ def test_choose_pool(self): # assert isinstance(pool, schwimmbad.mpi.MPIPool) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/conftest.py b/test/test_Sampling/test_Samplers/conftest.py index 0cd2c110b..0cd67aeae 100644 --- a/test/test_Sampling/test_Samplers/conftest.py +++ b/test/test_Sampling/test_Samplers/conftest.py @@ -13,7 +13,6 @@ @pytest.fixture def simple_einstein_ring_likelihood(): - # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -25,76 +24,123 @@ def simple_einstein_ring_likelihood(): kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) data_class = ImageData(**kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix} + kwargs_psf_gaussian = {"psf_type": "GAUSSIAN", "fwhm": fwhm, "pixel_size": deltaPix} psf = PSF(**kwargs_psf_gaussian) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf.kernel_point_source} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": psf.kernel_point_source} psf_class = PSF(**kwargs_psf) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_model_list = ['SPEP'] + lens_model_list = ["SPEP"] kwargs_lens = [kwargs_spemd] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, - 'e1': 0.1, 'e2': 0.1} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False, 'compute_mode': 'regular'} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, kwargs_lens, kwargs_source, - kwargs_lens_light) + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + "compute_mode": "regular", + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, kwargs_lens, kwargs_source, kwargs_lens_light + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim - kwargs_data_joint = {'multi_band_list': [[kwargs_data, kwargs_psf, kwargs_numerics]], 'multi_band_type': 'single-band'} + kwargs_data["image_data"] = image_sim + kwargs_data_joint = { + "multi_band_list": [[kwargs_data, kwargs_psf, kwargs_numerics]], + "multi_band_type": "single-band", + } - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'fixed_magnification_list': [False], - } + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "fixed_magnification_list": [False], + } - kwargs_constraints = {'image_plane_source_list': [False] * len(source_model_list)} + kwargs_constraints = {"image_plane_source_list": [False] * len(source_model_list)} - kwargs_likelihood = {'source_marg': False, - 'image_position_uncertainty': 0.004, - 'check_matched_source_position': False, - 'source_position_tolerance': 0.001, - 'source_position_sigma': 0.001, - } + kwargs_likelihood = { + "source_marg": False, + "image_position_uncertainty": 0.004, + "check_matched_source_position": False, + "source_position_tolerance": 0.001, + "source_position_sigma": 0.001, + } # reduce number of param to sample (for runtime) - kwargs_fixed_lens = [{'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1}] - kwargs_lower_lens = [{'theta_E': 0.8, 'center_x': -0.1}] - kwargs_upper_lens = [{'theta_E': 1.2, 'center_x': 0.1}] + kwargs_fixed_lens = [ + {"gamma": 1.8, "center_x": 0, "center_y": 0, "e1": 0.1, "e2": 0.1} + ] + kwargs_lower_lens = [{"theta_E": 0.8, "center_x": -0.1}] + kwargs_upper_lens = [{"theta_E": 1.2, "center_x": 0.1}] kwargs_fixed_source = kwargs_source kwargs_fixed_lens_light = kwargs_lens_light - param_class = Param(kwargs_model, - kwargs_fixed_lens=kwargs_fixed_lens, - kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, - kwargs_lower_lens=kwargs_lower_lens, - kwargs_upper_lens=kwargs_upper_lens, - **kwargs_constraints) - - likelihood = LikelihoodModule(kwargs_data_joint=kwargs_data_joint, kwargs_model=kwargs_model, - param_class=param_class, **kwargs_likelihood) - kwargs_truths = {'kwargs_lens': kwargs_lens, 'kwargs_source': kwargs_source, 'kwargs_lens_light': kwargs_lens_light} + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_lower_lens=kwargs_lower_lens, + kwargs_upper_lens=kwargs_upper_lens, + **kwargs_constraints + ) + + likelihood = LikelihoodModule( + kwargs_data_joint=kwargs_data_joint, + kwargs_model=kwargs_model, + param_class=param_class, + **kwargs_likelihood + ) + kwargs_truths = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + } return likelihood, kwargs_truths @pytest.fixture def simple_einstein_ring_likelihood_2d(): - # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -106,68 +152,125 @@ def simple_einstein_ring_likelihood_2d(): kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) data_class = ImageData(**kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix} + kwargs_psf_gaussian = {"psf_type": "GAUSSIAN", "fwhm": fwhm, "pixel_size": deltaPix} psf = PSF(**kwargs_psf_gaussian) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf.kernel_point_source} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": psf.kernel_point_source} psf_class = PSF(**kwargs_psf) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_model_list = ['SPEP'] + lens_model_list = ["SPEP"] kwargs_lens = [kwargs_spemd] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, - 'e1': 0.1, 'e2': 0.1} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False, 'compute_mode': 'regular'} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, kwargs_lens, kwargs_source, - kwargs_lens_light) + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + "compute_mode": "regular", + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, kwargs_lens, kwargs_source, kwargs_lens_light + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim - kwargs_data_joint = {'multi_band_list': [[kwargs_data, kwargs_psf, kwargs_numerics]], 'multi_band_type': 'single-band'} + kwargs_data["image_data"] = image_sim + kwargs_data_joint = { + "multi_band_list": [[kwargs_data, kwargs_psf, kwargs_numerics]], + "multi_band_type": "single-band", + } - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'fixed_magnification_list': [False], - } + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "fixed_magnification_list": [False], + } - kwargs_constraints = {'image_plane_source_list': [False] * len(source_model_list)} + kwargs_constraints = {"image_plane_source_list": [False] * len(source_model_list)} - kwargs_likelihood = {'source_marg': False, - 'image_position_uncertainty': 0.004, - 'check_matched_source_position': False, - 'source_position_tolerance': 0.001, - 'source_position_sigma': 0.001, - } + kwargs_likelihood = { + "source_marg": False, + "image_position_uncertainty": 0.004, + "check_matched_source_position": False, + "source_position_tolerance": 0.001, + "source_position_sigma": 0.001, + } # reduce number of param to sample (for runtime) - kwargs_fixed_lens = [{'gamma': 1.8, 'center_y': 0, 'e1': 0.1, 'e2': 0.1}] - kwargs_lower_lens = [{'theta_E': 0.8, 'center_x': -0.1}] - kwargs_upper_lens = [{'theta_E': 1.2, 'center_x': 0.1}] - kwargs_fixed_source = [{'R_sersic': 0.6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1}] - kwargs_fixed_lens_light = [{'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0}] - - param_class = Param(kwargs_model, - kwargs_fixed_lens=kwargs_fixed_lens, - kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, - kwargs_lower_lens=kwargs_lower_lens, - kwargs_upper_lens=kwargs_upper_lens, - **kwargs_constraints) - - likelihood = LikelihoodModule(kwargs_data_joint=kwargs_data_joint, kwargs_model=kwargs_model, - param_class=param_class, **kwargs_likelihood) - kwargs_truths = {'kwargs_lens': kwargs_lens, 'kwargs_source': kwargs_source, 'kwargs_lens_light': kwargs_lens_light} + kwargs_fixed_lens = [{"gamma": 1.8, "center_y": 0, "e1": 0.1, "e2": 0.1}] + kwargs_lower_lens = [{"theta_E": 0.8, "center_x": -0.1}] + kwargs_upper_lens = [{"theta_E": 1.2, "center_x": 0.1}] + kwargs_fixed_source = [ + { + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } + ] + kwargs_fixed_lens_light = [ + {"R_sersic": 0.1, "n_sersic": 2, "center_x": 0, "center_y": 0} + ] + + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_lower_lens=kwargs_lower_lens, + kwargs_upper_lens=kwargs_upper_lens, + **kwargs_constraints + ) + + likelihood = LikelihoodModule( + kwargs_data_joint=kwargs_data_joint, + kwargs_model=kwargs_model, + param_class=param_class, + **kwargs_likelihood + ) + kwargs_truths = { + "kwargs_lens": kwargs_lens, + "kwargs_source": kwargs_source, + "kwargs_lens_light": kwargs_lens_light, + } return likelihood, kwargs_truths diff --git a/test/test_Sampling/test_Samplers/test_base_nested_sampler.py b/test/test_Sampling/test_Samplers/test_base_nested_sampler.py index 7a0e4e3e9..5656fe91a 100644 --- a/test/test_Sampling/test_Samplers/test_base_nested_sampler.py +++ b/test/test_Sampling/test_Samplers/test_base_nested_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import pytest import numpy as np @@ -16,17 +16,14 @@ def import_fixture(simple_einstein_ring_likelihood): likelihood, kwargs_truths = simple_einstein_ring_likelihood prior_means = likelihood.param.kwargs2args(**kwargs_truths) prior_sigmas = np.ones_like(prior_means) * 0.1 - sampler = NestedSampler(likelihood, 'gaussian', prior_means, prior_sigmas, 0.5, 0.5) + sampler = NestedSampler(likelihood, "gaussian", prior_means, prior_sigmas, 0.5, 0.5) return sampler, likelihood class TestNestedSampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): - pass def test_sampler(self, import_fixture): @@ -39,13 +36,17 @@ def test_sampler(self, import_fixture): def test_sampler_init(self, import_fixture): _, likelihood = import_fixture - sampler = NestedSampler(likelihood, 'uniform', None, None, 1, 1) + sampler = NestedSampler(likelihood, "uniform", None, None, 1, 1) try: - sampler = NestedSampler(likelihood, 'gaussian', None, None, 1, 1) # will raise an Error + sampler = NestedSampler( + likelihood, "gaussian", None, None, 1, 1 + ) # will raise an Error except Exception as e: assert isinstance(e, ValueError) try: - sampler = NestedSampler(likelihood, 'some_type', None, None, 1, 1) # will raise an Error + sampler = NestedSampler( + likelihood, "some_type", None, None, 1, 1 + ) # will raise an Error except Exception as e: assert isinstance(e, ValueError) @@ -68,5 +69,5 @@ def test_log_likelihood(self, import_fixture): assert isinstance(e, NotImplementedError) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_cobaya_sampler.py b/test/test_Sampling/test_Samplers/test_cobaya_sampler.py index 6e2ccf507..7cb63b921 100644 --- a/test/test_Sampling/test_Samplers/test_cobaya_sampler.py +++ b/test/test_Sampling/test_Samplers/test_cobaya_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'nataliehogg' +__author__ = "nataliehogg" import pytest import numpy as np @@ -15,84 +15,88 @@ def import_fixture(simple_einstein_ring_likelihood): """ likelihood, kwargs_truths = simple_einstein_ring_likelihood means = likelihood.param.kwargs2args(**kwargs_truths) - sigmas = np.ones_like(means)*0.1 - sampler = CobayaSampler(likelihood_module=likelihood, mean_start=means, sigma_start=sigmas) + sigmas = np.ones_like(means) * 0.1 + sampler = CobayaSampler( + likelihood_module=likelihood, mean_start=means, sigma_start=sigmas + ) return sampler, likelihood, means, sigmas class TestCobayaSampler(object): - """ - test cobaya - """ + """Test cobaya.""" def setup_method(self): pass def test_sampler(self, import_fixture): - ''' - function to test the sampler - ''' + """Function to test the sampler.""" # test the sampler sampler, likelihood, means, sigmas = import_fixture - test_cobaya = {'Rminus1_stop': 100} + test_cobaya = {"Rminus1_stop": 100} updated_info, sampler_name, best_fit_values = sampler.run(**test_cobaya) - assert str(sampler_name) == 'mcmc' + assert str(sampler_name) == "mcmc" # test labels sampler, likelihood, means, sigmas = import_fixture - test_labels_kwargs = {'Rminus1_stop': 100, 'latex': ['theta_{\rm E}']} + test_labels_kwargs = {"Rminus1_stop": 100, "latex": ["theta_{\rm E}"]} - updated_info_l, sampler_name_l, best_fit_values_l = sampler.run(**test_labels_kwargs) + updated_info_l, sampler_name_l, best_fit_values_l = sampler.run( + **test_labels_kwargs + ) - assert str(sampler_name_l) == 'mcmc' + assert str(sampler_name_l) == "mcmc" # test passing dict for proposals sampler, likelihood, means, sigmas = import_fixture - props = {'theta_E': 0.001} + props = {"theta_E": 0.001} - test_prop_kwargs = {'Rminus1_stop': 100, 'proposal_widths': props} + test_prop_kwargs = {"Rminus1_stop": 100, "proposal_widths": props} - updated_info_d, sampler_name_d, best_fit_values_d = sampler.run(**test_prop_kwargs) + updated_info_d, sampler_name_d, best_fit_values_d = sampler.run( + **test_prop_kwargs + ) - assert str(sampler_name_d) == 'mcmc' + assert str(sampler_name_d) == "mcmc" # test passing path sampler, likelihood, means, sigmas = import_fixture - test_path_kwargs = {'Rminus1_stop': 100, 'path': 'test_chain'} + test_path_kwargs = {"Rminus1_stop": 100, "path": "test_chain"} - updated_info_p, sampler_name_p, best_fit_values_p = sampler.run(**test_path_kwargs) + updated_info_p, sampler_name_p, best_fit_values_p = sampler.run( + **test_path_kwargs + ) - assert str(sampler_name_p) == 'mcmc' + assert str(sampler_name_p) == "mcmc" # use unittest to test raised exceptions t = TestCase() with t.assertRaises(TypeError): # checks that TypeError is raised if prop widths not list or dict - test_prop_type = {'proposal_widths': 0.1} + test_prop_type = {"proposal_widths": 0.1} sampler.run(**test_prop_type) with t.assertRaises(ValueError): # checks that ValueError is raised if wrong number of prop width - test_prop_num = {'proposal_widths': [0.1, 0.1, 0.1]} + test_prop_num = {"proposal_widths": [0.1, 0.1, 0.1]} sampler.run(**test_prop_num) with t.assertRaises(ValueError): # checks that ValueError is raised if wrong number of labels - test_latex_num = {'latex': ['theta_{\rm E}', 'gamma']} + test_latex_num = {"latex": ["theta_{\rm E}", "gamma"]} sampler.run(**test_latex_num) with t.assertRaises(ValueError): # checks that ValueError is raised if drag is passed - test_drag = {'drag': True} + test_drag = {"drag": True} sampler.run(**test_drag) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_dynesty_sampler.py b/test/test_Sampling/test_Samplers/test_dynesty_sampler.py index 59152b0ff..c1d3a2bb8 100644 --- a/test/test_Sampling/test_Samplers/test_dynesty_sampler.py +++ b/test/test_Sampling/test_Samplers/test_dynesty_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import pytest import numpy as np @@ -18,18 +18,19 @@ def import_fixture(simple_einstein_ring_likelihood): prior_means = likelihood.param.kwargs2args(**kwargs_truths) prior_means *= 1.01 prior_sigmas = np.ones_like(prior_means) - print(prior_sigmas, prior_means, 'test prior sigmas') - sampler = DynestySampler(likelihood, prior_type='uniform', - prior_means=prior_means, - prior_sigmas=prior_sigmas, - sigma_scale=0.5) + print(prior_sigmas, prior_means, "test prior sigmas") + sampler = DynestySampler( + likelihood, + prior_type="uniform", + prior_means=prior_means, + prior_sigmas=prior_sigmas, + sigma_scale=0.5, + ) return sampler, likelihood class TestDynestySampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): pass @@ -37,11 +38,11 @@ def setup_method(self): def test_sampler(self, import_fixture): sampler, likelihood = import_fixture kwargs_run = { - 'dlogz_init': 0.01, - 'nlive_init': 20, - 'nlive_batch': 20, - 'maxbatch': 1, - 'wt_kwargs': {'pfrac': 0.8}, + "dlogz_init": 0.01, + "nlive_init": 20, + "nlive_batch": 20, + "maxbatch": 1, + "wt_kwargs": {"pfrac": 0.8}, } samples, means, logZ, logZ_err, logL, results = sampler.run(kwargs_run) assert len(means) == 1 @@ -49,13 +50,16 @@ def test_sampler(self, import_fixture): def test_sampler_init(self, import_fixture): sampler, likelihood = import_fixture try: - sampler = DynestySampler(likelihood, prior_type='gaussian', - prior_means=None, # will raise an Error - prior_sigmas=None) # will raise an Error + sampler = DynestySampler( + likelihood, + prior_type="gaussian", + prior_means=None, # will raise an Error + prior_sigmas=None, + ) # will raise an Error except Exception as e: assert isinstance(e, ValueError) try: - sampler = DynestySampler(likelihood, prior_type='some_type') + sampler = DynestySampler(likelihood, prior_type="some_type") except Exception as e: assert isinstance(e, ValueError) @@ -65,7 +69,7 @@ def test_prior(self, import_fixture): cube_low = np.zeros(n_dims) cube_upp = np.ones(n_dims) - self.prior_type = 'uniform' + self.prior_type = "uniform" cube_low = sampler.prior(cube_low) npt.assert_equal(cube_low, sampler.lowers) cube_upp = sampler.prior(cube_upp) @@ -81,5 +85,5 @@ def test_log_likelihood(self, import_fixture): # assert logL == -1e15 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_multinest_sampler.py b/test/test_Sampling/test_Samplers/test_multinest_sampler.py index 1d214dc0b..94b71d6a8 100644 --- a/test/test_Sampling/test_Samplers/test_multinest_sampler.py +++ b/test/test_Sampling/test_Samplers/test_multinest_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import pytest import os @@ -11,14 +11,16 @@ try: import pymultinest except: - print("Warning : MultiNest/pymultinest not installed properly, \ -but tests will be trivially fulfilled") + print( + "Warning : MultiNest/pymultinest not installed properly, \ +but tests will be trivially fulfilled" + ) pymultinest_installed = False else: pymultinest_installed = True -_output_dir = 'test_nested_out' +_output_dir = "test_nested_out" @pytest.fixture @@ -31,32 +33,32 @@ def import_fixture(simple_einstein_ring_likelihood): likelihood, kwargs_truths = simple_einstein_ring_likelihood prior_means = likelihood.param.kwargs2args(**kwargs_truths) prior_sigmas = np.ones_like(prior_means) * 0.1 - sampler = MultiNestSampler(likelihood, prior_type='uniform', - prior_means=prior_means, - prior_sigmas=prior_sigmas, - output_dir=_output_dir, - remove_output_dir=True) + sampler = MultiNestSampler( + likelihood, + prior_type="uniform", + prior_means=prior_means, + prior_sigmas=prior_sigmas, + output_dir=_output_dir, + remove_output_dir=True, + ) return sampler, likelihood class TestMultiNestSampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): - pass def test_sampler(self, import_fixture): sampler, likelihood = import_fixture kwargs_run = { - 'n_live_points': 10, - 'evidence_tolerance': 0.5, - 'sampling_efficiency': 0.8, # 1 for posterior-only, 0 for evidence-only - 'importance_nested_sampling': False, - 'multimodal': True, - 'const_efficiency_mode': False, # reduce sampling_efficiency to 5% when True + "n_live_points": 10, + "evidence_tolerance": 0.5, + "sampling_efficiency": 0.8, # 1 for posterior-only, 0 for evidence-only + "importance_nested_sampling": False, + "multimodal": True, + "const_efficiency_mode": False, # reduce sampling_efficiency to 5% when True } samples, means, logZ, logZ_err, logL, results = sampler.run(kwargs_run) assert len(means) == 1 @@ -68,21 +70,25 @@ def test_sampler(self, import_fixture): def test_sampler_init(self, import_fixture): sampler, likelihood = import_fixture - test_dir = 'some_dir' + test_dir = "some_dir" os.mkdir(test_dir) - sampler = MultiNestSampler(likelihood, prior_type='uniform', - output_dir=test_dir) + sampler = MultiNestSampler( + likelihood, prior_type="uniform", output_dir=test_dir + ) shutil.rmtree(test_dir, ignore_errors=True) try: - sampler = MultiNestSampler(likelihood, prior_type='gaussian', - prior_means=None, # will raise an Error - prior_sigmas=None, # will raise an Error - output_dir=None, - remove_output_dir=True) + sampler = MultiNestSampler( + likelihood, + prior_type="gaussian", + prior_means=None, # will raise an Error + prior_sigmas=None, # will raise an Error + output_dir=None, + remove_output_dir=True, + ) except Exception as e: assert isinstance(e, ValueError) try: - sampler = MultiNestSampler(likelihood, prior_type='some_type') + sampler = MultiNestSampler(likelihood, prior_type="some_type") except Exception as e: assert isinstance(e, ValueError) @@ -92,14 +98,14 @@ def test_prior(self, import_fixture): cube_low = np.zeros(n_dims) cube_upp = np.ones(n_dims) - self.prior_type = 'uniform' + self.prior_type = "uniform" sampler.prior(cube_low, n_dims, n_dims) npt.assert_equal(cube_low, sampler.lowers) sampler.prior(cube_upp, n_dims, n_dims) npt.assert_equal(cube_upp, sampler.uppers) cube_mid = 0.5 * np.ones(n_dims) - self.prior_type = 'gaussian' + self.prior_type = "gaussian" sampler.prior(cube_mid, n_dims, n_dims) cube_gauss = np.array([1.0]) npt.assert_equal(cube_mid, cube_gauss) @@ -110,9 +116,9 @@ def test_log_likelihood(self, import_fixture): args = np.nan * np.ones(n_dims) logL = sampler.log_likelihood(args, n_dims, n_dims) assert logL < 0 - #npt.assert_almost_equal(logL, -53.24465641401431, decimal=8) - #assert logL == -1e15 + # npt.assert_almost_equal(logL, -53.24465641401431, decimal=8) + # assert logL == -1e15 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_nautilus_sampler.py b/test/test_Sampling/test_Samplers/test_nautilus_sampler.py index e280a43cc..6b8fe4211 100644 --- a/test/test_Sampling/test_Samplers/test_nautilus_sampler.py +++ b/test/test_Sampling/test_Samplers/test_nautilus_sampler.py @@ -20,9 +20,7 @@ def import_fixture(simple_einstein_ring_likelihood_2d): class TestNautilusSampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): pass @@ -30,11 +28,11 @@ def setup_method(self): def test_sampler(self, import_fixture): sampler, likelihood = import_fixture kwargs = { - 'mpi': False, - 'verbose': True, - 'f_live': 1.0, - 'n_eff': 0.0, - 'seed': 42, + "mpi": False, + "verbose": True, + "f_live": 1.0, + "n_eff": 0.0, + "seed": 42, } points, log_w, log_l, log_z = sampler.run(**kwargs) assert len(points) == len(log_w) @@ -42,5 +40,5 @@ def test_sampler(self, import_fixture): assert np.isfinite(log_z) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_polychord_sampler.py b/test/test_Sampling/test_Samplers/test_polychord_sampler.py index 2cd282e48..939d1bc7c 100644 --- a/test/test_Sampling/test_Samplers/test_polychord_sampler.py +++ b/test/test_Sampling/test_Samplers/test_polychord_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'aymgal' +__author__ = "aymgal" import pytest import os @@ -11,8 +11,10 @@ try: import dyPolyChord except: - print("Warning : PolyChordLite/DyPolyChord not installed properly, \ -but tests will be trivially fulfilled") + print( + "Warning : PolyChordLite/DyPolyChord not installed properly, \ +but tests will be trivially fulfilled" + ) dypolychord_installed = False else: dypolychord_installed = True @@ -20,15 +22,17 @@ try: import nestcheck except: - print("Warning : PolyChordLite/DyPolyChord not installed properly, \ -but tests will be trivially fulfilled") + print( + "Warning : PolyChordLite/DyPolyChord not installed properly, \ +but tests will be trivially fulfilled" + ) nestcheck_installed = False else: nestcheck_installed = True - + all_installed = dypolychord_installed and nestcheck_installed -_output_dir = 'test_nested_out' +_output_dir = "test_nested_out" @pytest.fixture @@ -41,31 +45,33 @@ def import_fixture(simple_einstein_ring_likelihood): likelihood, kwargs_truths = simple_einstein_ring_likelihood prior_means = likelihood.param.kwargs2args(**kwargs_truths) prior_sigmas = np.ones_like(prior_means) * 0.1 - sampler = DyPolyChordSampler(likelihood, prior_type='uniform', - prior_means=prior_means, - prior_sigmas=prior_sigmas, - output_dir=_output_dir, - remove_output_dir=True) + sampler = DyPolyChordSampler( + likelihood, + prior_type="uniform", + prior_means=prior_means, + prior_sigmas=prior_sigmas, + output_dir=_output_dir, + remove_output_dir=True, + ) return sampler, likelihood class TestDyPolyChordSampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): - pass def test_sampler(self, import_fixture): sampler, likelihood = import_fixture kwargs_run = { - 'ninit': 2, - 'nlive_const': 3, + "ninit": 2, + "nlive_const": 3, } dynamic_goal = 0.8 - samples, means, logZ, logZ_err, logL, results = sampler.run(dynamic_goal, kwargs_run) + samples, means, logZ, logZ_err, logL, results = sampler.run( + dynamic_goal, kwargs_run + ) assert len(means) == 1 if not all_installed: # trivial test when dypolychord is not installed properly @@ -75,21 +81,25 @@ def test_sampler(self, import_fixture): def test_sampler_init(self, import_fixture): sampler, likelihood = import_fixture - test_dir = 'some_dir' + test_dir = "some_dir" os.mkdir(test_dir) - sampler = DyPolyChordSampler(likelihood, prior_type='uniform', - output_dir=test_dir) + sampler = DyPolyChordSampler( + likelihood, prior_type="uniform", output_dir=test_dir + ) shutil.rmtree(test_dir, ignore_errors=True) try: - sampler = DyPolyChordSampler(likelihood, prior_type='gaussian', - prior_means=None, # will raise an Error - prior_sigmas=None, # will raise an Error - output_dir=None, - remove_output_dir=True) + sampler = DyPolyChordSampler( + likelihood, + prior_type="gaussian", + prior_means=None, # will raise an Error + prior_sigmas=None, # will raise an Error + output_dir=None, + remove_output_dir=True, + ) except Exception as e: assert isinstance(e, ValueError) try: - sampler = DyPolyChordSampler(likelihood, prior_type='some_type') + sampler = DyPolyChordSampler(likelihood, prior_type="some_type") except Exception as e: assert isinstance(e, ValueError) @@ -99,7 +109,7 @@ def test_prior(self, import_fixture): cube_low = np.zeros(n_dims) cube_upp = np.ones(n_dims) - self.prior_type = 'uniform' + self.prior_type = "uniform" cube_low = sampler.prior(cube_low) npt.assert_equal(cube_low, sampler.lowers) cube_upp = sampler.prior(cube_upp) @@ -111,24 +121,24 @@ def test_log_likelihood(self, import_fixture): args = np.nan * np.ones(n_dims) logL, phi = sampler.log_likelihood(args) assert logL < 0 - #npt.assert_almost_equal(logL, -53.607122396369675, decimal=8) - #assert logL == -1e15 + # npt.assert_almost_equal(logL, -53.607122396369675, decimal=8) + # assert logL == -1e15 assert phi == [] def test_write_equal_weights(self, import_fixture): sampler, likelihood = import_fixture n_dims = 10 ns_run = { - 'theta': np.zeros((1, n_dims)), - 'logl': np.zeros(1), - 'output': { - 'logZ': np.zeros(n_dims), - 'logZerr': np.zeros(n_dims), - 'param_means': np.zeros(n_dims) - } + "theta": np.zeros((1, n_dims)), + "logl": np.zeros(1), + "output": { + "logZ": np.zeros(n_dims), + "logZerr": np.zeros(n_dims), + "param_means": np.zeros(n_dims), + }, } - sampler._write_equal_weights(ns_run['theta'], ns_run['logl']) + sampler._write_equal_weights(ns_run["theta"], ns_run["logl"]) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_Samplers/test_pso.py b/test/test_Sampling/test_Samplers/test_pso.py index 8b47c4bf9..b18bba44c 100644 --- a/test/test_Sampling/test_Samplers/test_pso.py +++ b/test/test_Sampling/test_Samplers/test_pso.py @@ -1,8 +1,6 @@ -""" -Test the PSO module. +"""Test the PSO module. Execute with py.test -v - """ import numpy as np import pytest @@ -14,9 +12,8 @@ class TestParticleSwarmOptimizer(object): - """ + """""" - """ ctx = None params = np.array([[1, 2, 3], [4, 5, 6]]) @@ -122,10 +119,11 @@ def test_sample(self): n_iterations = 100 def ln_probability(x): - return -np.array(x)**2 + return -np.array(x) ** 2 - pso = ParticleSwarmOptimizer(func=ln_probability, low=[-10], high=[10], - particle_count=n_particle) + pso = ParticleSwarmOptimizer( + func=ln_probability, low=[-10], high=[10], particle_count=n_particle + ) init_pos = np.array([1]) pso.global_best.position = init_pos @@ -137,7 +135,7 @@ def ln_probability(x): time_start = time.time() if pso.is_master(): - print('Computing the PSO...') + print("Computing the PSO...") num_iter = 0 @@ -158,5 +156,5 @@ def ln_probability(x): npt.assert_almost_equal(result[0], 0, decimal=6) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_likelihood.py b/test/test_Sampling/test_likelihood.py index 708a1a96a..6e881abe0 100644 --- a/test/test_Sampling/test_likelihood.py +++ b/test/test_Sampling/test_likelihood.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -13,9 +13,7 @@ class TestLikelihoodModule(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): np.random.seed(42) @@ -27,163 +25,278 @@ def setup_method(self): deltaPix = 0.1 # pixel size in arcsec (area per pixel = deltaPix**2) fwhm = 0.5 # full width half max of PSF - kwargs_model = {'lens_model_list': ['SPEP'], - 'lens_light_model_list': ['SERSIC'], - 'source_light_model_list': ['SERSIC'], - 'point_source_model_list': ['SOURCE_POSITION'], - 'fixed_magnification_list': [True]} + kwargs_model = { + "lens_model_list": ["SPEP"], + "lens_light_model_list": ["SERSIC"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["SOURCE_POSITION"], + "fixed_magnification_list": [True], + } # PSF specification - kwargs_band = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_band = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_band) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix} + kwargs_psf = {"psf_type": "GAUSSIAN", "fwhm": fwhm, "pixel_size": deltaPix} psf_class = PSF(**kwargs_psf) - print(np.shape(psf_class.kernel_point_source), 'test kernel shape -') - kwargs_spep = {'theta_E': 1., 'gamma': 1.95, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} + print(np.shape(psf_class.kernel_point_source), "test kernel shape -") + kwargs_spep = { + "theta_E": 1.0, + "gamma": 1.95, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } self.kwargs_lens = [kwargs_spep] - kwargs_sersic = {'amp': 1/0.05**2., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1 / 0.05**2.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + } self.kwargs_lens_light = [kwargs_sersic] self.kwargs_source = [kwargs_sersic_ellipse] - self.kwargs_ps = [{'ra_source': 0.05, 'dec_source': 0.02, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - self.kwargs_cosmo = {'D_dt': 1000} - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances(**kwargs_model) - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, point_source_class, extinction_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) - ra_pos, dec_pos = imageModel.PointSource.image_position(kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens) + self.kwargs_ps = [ + {"ra_source": 0.05, "dec_source": 0.02, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + self.kwargs_cosmo = {"D_dt": 1000} + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + } + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(**kwargs_model) + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) + ra_pos, dec_pos = imageModel.PointSource.image_position( + kwargs_ps=self.kwargs_ps, kwargs_lens=self.kwargs_lens + ) data_class.update_data(image_sim) - kwargs_band['image_data'] = image_sim + kwargs_band["image_data"] = image_sim self.data_class = data_class self.psf_class = psf_class self.kwargs_model = kwargs_model self.kwargs_numerics = { - 'supersampling_factor': 1, - 'supersampling_convolution': False} + "supersampling_factor": 1, + "supersampling_convolution": False, + } kwargs_constraints = { - 'num_point_source_list': [4], - 'solver_type': 'NONE', # 'PROFILE', 'PROFILE_SHEAR', 'ELLIPSE', 'CENTER' - 'Ddt_sampling': True - } + "num_point_source_list": [4], + "solver_type": "NONE", # 'PROFILE', 'PROFILE_SHEAR', 'ELLIPSE', 'CENTER' + "Ddt_sampling": True, + } - def condition_definition(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps=None, kwargs_special=None, kwargs_extinction=None): + def condition_definition( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): logL = 0 - if kwargs_lens_light[0]['R_sersic'] > kwargs_source[0]['R_sersic']: + if kwargs_lens_light[0]["R_sersic"] > kwargs_source[0]["R_sersic"]: logL -= 10**15 return logL - kwargs_likelihood = {'force_no_add_image': True, - 'source_marg': True, - 'astrometric_likelihood': True, - 'image_position_uncertainty': 0.004, - 'check_matched_source_position': False, - 'source_position_tolerance': 0.001, - 'source_position_sigma': 0.001, - 'check_positive_flux': True, - 'flux_ratio_likelihood': True, - 'prior_lens': [[0, 'theta_E', 1, 0.1]], - 'custom_logL_addition': condition_definition, - 'image_position_likelihood': True - } - self.kwargs_data = {'multi_band_list': [[kwargs_band, kwargs_psf, kwargs_numerics]], 'multi_band_type': 'single-band', - 'time_delays_measured': np.ones(3), - 'time_delays_uncertainties': np.ones(3), - 'flux_ratios': np.ones(3), - 'flux_ratio_errors': np.ones(3), - 'ra_image_list': ra_pos, - 'dec_image_list': dec_pos - } + kwargs_likelihood = { + "force_no_add_image": True, + "source_marg": True, + "astrometric_likelihood": True, + "image_position_uncertainty": 0.004, + "check_matched_source_position": False, + "source_position_tolerance": 0.001, + "source_position_sigma": 0.001, + "check_positive_flux": True, + "flux_ratio_likelihood": True, + "prior_lens": [[0, "theta_E", 1, 0.1]], + "custom_logL_addition": condition_definition, + "image_position_likelihood": True, + } + self.kwargs_data = { + "multi_band_list": [[kwargs_band, kwargs_psf, kwargs_numerics]], + "multi_band_type": "single-band", + "time_delays_measured": np.ones(3), + "time_delays_uncertainties": np.ones(3), + "flux_ratios": np.ones(3), + "flux_ratio_errors": np.ones(3), + "ra_image_list": ra_pos, + "dec_image_list": dec_pos, + } self.param_class = Param(self.kwargs_model, **kwargs_constraints) - self.imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - self.Likelihood = LikelihoodModule(kwargs_data_joint=self.kwargs_data, kwargs_model=kwargs_model, param_class=self.param_class, **kwargs_likelihood) + self.imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + self.Likelihood = LikelihoodModule( + kwargs_data_joint=self.kwargs_data, + kwargs_model=kwargs_model, + param_class=self.param_class, + **kwargs_likelihood + ) self.kwargs_band = kwargs_band self.kwargs_psf = kwargs_psf self.numPix = numPix def test_logL(self): - args = self.param_class.kwargs2args(kwargs_lens=self.kwargs_lens, kwargs_source=self.kwargs_source, - kwargs_lens_light=self.kwargs_lens_light, kwargs_ps=self.kwargs_ps, kwargs_special=self.kwargs_cosmo) + args = self.param_class.kwargs2args( + kwargs_lens=self.kwargs_lens, + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + kwargs_ps=self.kwargs_ps, + kwargs_special=self.kwargs_cosmo, + ) logL = self.Likelihood.logL(args, verbose=True) num_data_evaluate = self.Likelihood.num_data - npt.assert_almost_equal(logL/num_data_evaluate, -1/2., decimal=1) + npt.assert_almost_equal(logL / num_data_evaluate, -1 / 2.0, decimal=1) def test_time_delay_likelihood(self): - kwargs_likelihood = {'time_delay_likelihood': True, - } - likelihood = LikelihoodModule(kwargs_data_joint=self.kwargs_data, kwargs_model=self.kwargs_model, param_class=self.param_class, **kwargs_likelihood) - args = self.param_class.kwargs2args(kwargs_lens=self.kwargs_lens, kwargs_source=self.kwargs_source, - kwargs_lens_light=self.kwargs_lens_light, kwargs_ps=self.kwargs_ps, kwargs_special=self.kwargs_cosmo) + kwargs_likelihood = { + "time_delay_likelihood": True, + } + likelihood = LikelihoodModule( + kwargs_data_joint=self.kwargs_data, + kwargs_model=self.kwargs_model, + param_class=self.param_class, + **kwargs_likelihood + ) + args = self.param_class.kwargs2args( + kwargs_lens=self.kwargs_lens, + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + kwargs_ps=self.kwargs_ps, + kwargs_special=self.kwargs_cosmo, + ) logL = likelihood.logL(args, verbose=True) npt.assert_almost_equal(logL, -1328.821179288249, decimal=-1) def test_check_bounds(self): - penalty, bound_hit = self.Likelihood.check_bounds(args=[0, 1], lowerLimit=[1, 0], upperLimit=[2, 2], - verbose=True) + penalty, bound_hit = self.Likelihood.check_bounds( + args=[0, 1], lowerLimit=[1, 0], upperLimit=[2, 2], verbose=True + ) assert bound_hit def test_pixelbased_modelling(self): ss_source = 2 - numPix_source = self.numPix*ss_source + numPix_source = self.numPix * ss_source n_scales = 3 kwargs_pixelbased = { - 'source_interpolation': 'nearest', - 'supersampling_factor_source': ss_source, # supersampling of pixelated source grid - + "source_interpolation": "nearest", + "supersampling_factor_source": ss_source, # supersampling of pixelated source grid # following choices are to minimize pixel solver runtime (not to get accurate reconstruction!) - 'threshold_decrease_type': 'none', - 'num_iter_source': 2, - 'num_iter_lens': 2, - 'num_iter_global': 2, - 'num_iter_weights': 2, + "threshold_decrease_type": "none", + "num_iter_source": 2, + "num_iter_lens": 2, + "num_iter_global": 2, + "num_iter_weights": 2, } kwargs_likelihood = { - 'image_likelihood': True, - 'kwargs_pixelbased': kwargs_pixelbased, - 'check_positive_flux': True, # effectively not applied, activated for code coverage purposes + "image_likelihood": True, + "kwargs_pixelbased": kwargs_pixelbased, + "check_positive_flux": True, # effectively not applied, activated for code coverage purposes } kernel = PSF(**self.kwargs_psf).kernel_point_source - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel} - kwargs_numerics = {'supersampling_factor': 1} - kwargs_data = {'multi_band_list': [[self.kwargs_band, kwargs_psf, kwargs_numerics]]} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel} + kwargs_numerics = {"supersampling_factor": 1} + kwargs_data = { + "multi_band_list": [[self.kwargs_band, kwargs_psf, kwargs_numerics]] + } kwargs_model = { - 'lens_model_list': ['SPEP'], - 'lens_light_model_list': ['SLIT_STARLETS'], - 'source_light_model_list': ['SLIT_STARLETS'], + "lens_model_list": ["SPEP"], + "lens_light_model_list": ["SLIT_STARLETS"], + "source_light_model_list": ["SLIT_STARLETS"], } - - kwargs_fixed_source = [{'n_scales': n_scales, 'n_pixels': numPix_source**2, 'scale': 1, 'center_x': 0, 'center_y': 0}] - kwargs_fixed_lens_light = [{'n_scales': n_scales, 'n_pixels': self.numPix**2, 'scale': 1, 'center_x': 0, 'center_y': 0}] - kwargs_constraints = {'source_grid_offset': True} - param_class = Param(kwargs_model, - kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, - **kwargs_constraints) - - likelihood = LikelihoodModule(kwargs_data_joint=kwargs_data, kwargs_model=kwargs_model, - param_class=param_class, **kwargs_likelihood) - - kwargs_source = [{'amp': np.ones(n_scales*numPix_source**2)}] - kwargs_lens_light = [{'amp': np.ones(n_scales*self.numPix**2)}] - kwargs_special = {'delta_x_source_grid': 0, 'delta_y_source_grid': 0} - args = param_class.kwargs2args(kwargs_lens=self.kwargs_lens, kwargs_source=kwargs_source, - kwargs_lens_light=kwargs_lens_light, kwargs_special=kwargs_special) + + kwargs_fixed_source = [ + { + "n_scales": n_scales, + "n_pixels": numPix_source**2, + "scale": 1, + "center_x": 0, + "center_y": 0, + } + ] + kwargs_fixed_lens_light = [ + { + "n_scales": n_scales, + "n_pixels": self.numPix**2, + "scale": 1, + "center_x": 0, + "center_y": 0, + } + ] + kwargs_constraints = {"source_grid_offset": True} + param_class = Param( + kwargs_model, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + **kwargs_constraints + ) + + likelihood = LikelihoodModule( + kwargs_data_joint=kwargs_data, + kwargs_model=kwargs_model, + param_class=param_class, + **kwargs_likelihood + ) + + kwargs_source = [{"amp": np.ones(n_scales * numPix_source**2)}] + kwargs_lens_light = [{"amp": np.ones(n_scales * self.numPix**2)}] + kwargs_special = {"delta_x_source_grid": 0, "delta_y_source_grid": 0} + args = param_class.kwargs2args( + kwargs_lens=self.kwargs_lens, + kwargs_source=kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_special=kwargs_special, + ) logL = likelihood.logL(args, verbose=True) num_data_evaluate = likelihood.num_data - npt.assert_almost_equal(logL/num_data_evaluate, -1/2., decimal=1) + npt.assert_almost_equal(logL / num_data_evaluate, -1 / 2.0, decimal=1) + -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_param_groups.py b/test/test_Sampling/test_param_groups.py index 8484e558f..d698a4ca6 100644 --- a/test/test_Sampling/test_param_groups.py +++ b/test/test_Sampling/test_param_groups.py @@ -1,4 +1,4 @@ -__author__ = 'jhodonnell' +__author__ = "jhodonnell" import numpy as np @@ -6,21 +6,19 @@ import unittest import pytest -from lenstronomy.Sampling.param_group import ( - ModelParamGroup, SingleParam, ArrayParam - ) +from lenstronomy.Sampling.param_group import ModelParamGroup, SingleParam, ArrayParam class ExampleSingleParam(SingleParam): - param_names = ['sp1', 'sp2'] - _kwargs_lower = {'sp1': 0, 'sp2': 0} - _kwargs_upper = {'sp1': 10, 'sp2': 10} + param_names = ["sp1", "sp2"] + _kwargs_lower = {"sp1": 0, "sp2": 0} + _kwargs_upper = {"sp1": 10, "sp2": 10} class ExampleArrayParam(ArrayParam): - param_names = {'ap1': 1, 'ap2': 3} - _kwargs_lower = {'ap1': [0], 'ap2': [0]*3} - _kwargs_upper = {'ap1': [10], 'ap2': [10]*3} + param_names = {"ap1": 1, "ap2": 3} + _kwargs_lower = {"ap1": [0], "ap2": [0] * 3} + _kwargs_upper = {"ap1": [10], "ap2": [10] * 3} class TestParamGroup(object): @@ -32,34 +30,34 @@ def test_single_param(self): num, names = sp.num_params({}) assert num == 2 - assert names == ['sp1', 'sp2'] + assert names == ["sp1", "sp2"] - result = sp.set_params({'sp1': 2}, {'sp2': 3}) + result = sp.set_params({"sp1": 2}, {"sp2": 3}) assert result == [2] - result = sp.set_params({'sp1': 2, 'sp2': 3}, {}) + result = sp.set_params({"sp1": 2, "sp2": 3}, {}) assert result == [2, 3] kwargs, i = sp.get_params(result, i=0, kwargs_fixed={}) - assert kwargs['sp1'] == 2 - assert kwargs['sp2'] == 3 + assert kwargs["sp1"] == 2 + assert kwargs["sp2"] == 3 def test_array_param(self): ap = ExampleArrayParam(on=True) num, names = ap.num_params({}) assert num == 4 - assert names == ['ap1'] + ['ap2']*3 + assert names == ["ap1"] + ["ap2"] * 3 - result = ap.set_params({'ap1': [2]}, {'ap2': [1, 1, 1]}) + result = ap.set_params({"ap1": [2]}, {"ap2": [1, 1, 1]}) assert result == [2] - result = ap.set_params({'ap1': [2], 'ap2': [1, 2, 3]}, {}) + result = ap.set_params({"ap1": [2], "ap2": [1, 2, 3]}, {}) assert result == [2, 1, 2, 3] kwargs, i = ap.get_params(result, i=0, kwargs_fixed={}) - assert kwargs['ap1'] == [2] - assert kwargs['ap2'] == [1, 2, 3] + assert kwargs["ap1"] == [2] + assert kwargs["ap2"] == [1, 2, 3] def test_compose(self): sp = ExampleSingleParam(on=True) @@ -71,18 +69,17 @@ def test_compose(self): result = ModelParamGroup.compose_set_params( [sp, ap], - { - 'sp1': 1, 'sp2': 2, - 'ap1': [3], 'ap2': [4, 5, 6] - }, - kwargs_fixed={} + {"sp1": 1, "sp2": 2, "ap1": [3], "ap2": [4, 5, 6]}, + kwargs_fixed={}, ) assert result == [1, 2, 3, 4, 5, 6] - kwargs, i = ModelParamGroup.compose_get_params([sp, ap], result, i=0, kwargs_fixed={}) - assert kwargs['sp1'] == 1 - assert kwargs['ap2'] == [4, 5, 6] + kwargs, i = ModelParamGroup.compose_get_params( + [sp, ap], result, i=0, kwargs_fixed={} + ) + assert kwargs["sp1"] == 1 + assert kwargs["ap2"] == [4, 5, 6] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_parameters.py b/test/test_Sampling/test_parameters.py index a01ccdd48..c7aeabed7 100644 --- a/test/test_Sampling/test_parameters.py +++ b/test/test_Sampling/test_parameters.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import numpy as np import numpy.testing as npt @@ -11,42 +11,68 @@ class TestParam(object): - def setup_method(self): - kwargs_model = {'lens_model_list': ['SPEP'], 'source_light_model_list': ['GAUSSIAN'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION'], - 'multi_plane': True, 'lens_redshift_list': [0.5], 'z_source': 2, 'source_redshift_list': [0.5]} - kwargs_param = {'num_point_source_list': [2], 'lens_redshift_sampling_indexes': [0], - 'source_redshift_sampling_indexes': [0], 'image_plane_source_list': [True]} - kwargs_fixed_lens = [{'gamma': 1.9}] # for SPEP lens - kwargs_fixed_source = [{'sigma': 0.1, 'center_x':0.2, 'center_y': 0.2}] - kwargs_fixed_ps = [{'ra_image': [-1, 1], 'dec_image': [-1, 1]}] + kwargs_model = { + "lens_model_list": ["SPEP"], + "source_light_model_list": ["GAUSSIAN"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "multi_plane": True, + "lens_redshift_list": [0.5], + "z_source": 2, + "source_redshift_list": [0.5], + } + kwargs_param = { + "num_point_source_list": [2], + "lens_redshift_sampling_indexes": [0], + "source_redshift_sampling_indexes": [0], + "image_plane_source_list": [True], + } + kwargs_fixed_lens = [{"gamma": 1.9}] # for SPEP lens + kwargs_fixed_source = [{"sigma": 0.1, "center_x": 0.2, "center_y": 0.2}] + kwargs_fixed_ps = [{"ra_image": [-1, 1], "dec_image": [-1, 1]}] kwargs_fixed_lens_light = [{}] kwargs_fixed_cosmo = [{}] - self.param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, - kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, kwargs_fixed_ps=kwargs_fixed_ps, - kwargs_fixed_special=kwargs_fixed_cosmo, **kwargs_param) + self.param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_fixed_ps=kwargs_fixed_ps, + kwargs_fixed_special=kwargs_fixed_cosmo, + **kwargs_param + ) self.param_class.print_setting() def test_num_param(self): num_param, list = self.param_class.num_param() - assert list[0] == 'theta_E_lens0' + assert list[0] == "theta_E_lens0" assert num_param == 10 - kwargs_model = {'lens_model_list': ['SPEP'], 'source_light_model_list': ['GAUSSIAN'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION']} + kwargs_model = { + "lens_model_list": ["SPEP"], + "source_light_model_list": ["GAUSSIAN"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } kwargs_param = {} - kwargs_fixed_lens = [{'gamma': 1.9}] # for SPEP lens - kwargs_fixed_source = [{'sigma': 0.1, 'center_x': 0.2, 'center_y': 0.2}] - kwargs_fixed_ps = [{'ra_image': [-1, 1], 'dec_image': [-1, 1]}] + kwargs_fixed_lens = [{"gamma": 1.9}] # for SPEP lens + kwargs_fixed_source = [{"sigma": 0.1, "center_x": 0.2, "center_y": 0.2}] + kwargs_fixed_ps = [{"ra_image": [-1, 1], "dec_image": [-1, 1]}] kwargs_fixed_lens_light = [{}] kwargs_fixed_cosmo = [{}] - param_class_linear = Param(kwargs_model, kwargs_fixed_lens, kwargs_fixed_source, - kwargs_fixed_lens_light, kwargs_fixed_ps, kwargs_fixed_cosmo, - linear_solver=True, **kwargs_param) + param_class_linear = Param( + kwargs_model, + kwargs_fixed_lens, + kwargs_fixed_source, + kwargs_fixed_lens_light, + kwargs_fixed_ps, + kwargs_fixed_cosmo, + linear_solver=True, + **kwargs_param + ) num_param, list = param_class_linear.num_param() - assert list[0] == 'theta_E_lens0' + assert list[0] == "theta_E_lens0" print(list) assert len(list) == num_param assert num_param == 9 @@ -56,295 +82,506 @@ def test_num_param_linear(self): assert num_param == 4 def test_get_params(self): - kwargs_true_lens = [{'theta_E': 1.,'gamma':1.9, 'e1':0.01, 'e2':-0.01, 'center_x':0., 'center_y':0.}] #for SPEP lens - kwargs_true_source = [{'amp': 1*2*np.pi*0.1**2,'center_x':0.2, 'center_y':0.2, 'sigma': 0.1}] - kwargs_true_lens_light = [{'center_x': -0.06, 'center_y': 0.4, 'phi_G': 4.8, - 'q': 0.86, 'n_sersic': 1.7, - 'amp': 11.8, 'R_sersic': 0.697, 'phi_G_2': 0}] - kwargs_true_ps = [{'point_amp': [1, 1], 'ra_image': [-1, 1], 'dec_image': [-1, 1]}] - kwargs_special = {'z_sampling': [0.5]} - args = self.param_class.kwargs2args(kwargs_true_lens, kwargs_true_source, - kwargs_lens_light=kwargs_true_lens_light, kwargs_ps=kwargs_true_ps, - kwargs_special=kwargs_special) + kwargs_true_lens = [ + { + "theta_E": 1.0, + "gamma": 1.9, + "e1": 0.01, + "e2": -0.01, + "center_x": 0.0, + "center_y": 0.0, + } + ] # for SPEP lens + kwargs_true_source = [ + { + "amp": 1 * 2 * np.pi * 0.1**2, + "center_x": 0.2, + "center_y": 0.2, + "sigma": 0.1, + } + ] + kwargs_true_lens_light = [ + { + "center_x": -0.06, + "center_y": 0.4, + "phi_G": 4.8, + "q": 0.86, + "n_sersic": 1.7, + "amp": 11.8, + "R_sersic": 0.697, + "phi_G_2": 0, + } + ] + kwargs_true_ps = [ + {"point_amp": [1, 1], "ra_image": [-1, 1], "dec_image": [-1, 1]} + ] + kwargs_special = {"z_sampling": [0.5]} + args = self.param_class.kwargs2args( + kwargs_true_lens, + kwargs_true_source, + kwargs_lens_light=kwargs_true_lens_light, + kwargs_ps=kwargs_true_ps, + kwargs_special=kwargs_special, + ) kwargs_return = self.param_class.args2kwargs(args) - lens_dict_list = kwargs_return['kwargs_lens'] - lens_light_dict_list = kwargs_return['kwargs_lens_light'] + lens_dict_list = kwargs_return["kwargs_lens"] + lens_light_dict_list = kwargs_return["kwargs_lens_light"] lens_dict = lens_dict_list[0] - assert lens_dict['theta_E'] == 1. - assert lens_dict['gamma'] == 1.9 - assert lens_dict['e1'] == 0.01 - assert lens_dict['e2'] == -0.01 - assert lens_dict['center_x'] == 0. - assert lens_dict['center_y'] == 0. - assert lens_light_dict_list[0]['center_x'] == -0.06 + assert lens_dict["theta_E"] == 1.0 + assert lens_dict["gamma"] == 1.9 + assert lens_dict["e1"] == 0.01 + assert lens_dict["e2"] == -0.01 + assert lens_dict["center_x"] == 0.0 + assert lens_dict["center_y"] == 0.0 + assert lens_light_dict_list[0]["center_x"] == -0.06 def test_get_cosmo(self): - kwargs_model = {'lens_model_list': ['SPEP'], 'source_light_model_list': ['GAUSSIAN'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION'], - } - kwargs_param = {'Ddt_sampling': True} - kwargs_fixed_lens = [{'gamma': 1.9}] # for SPEP lens - kwargs_fixed_source = [{'sigma': 0.1, 'center_x': 0.2, 'center_y': 0.2}] - kwargs_fixed_ps = [{'ra_image': [-1, 1], 'dec_image': [-1, 1]}] + kwargs_model = { + "lens_model_list": ["SPEP"], + "source_light_model_list": ["GAUSSIAN"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } + kwargs_param = {"Ddt_sampling": True} + kwargs_fixed_lens = [{"gamma": 1.9}] # for SPEP lens + kwargs_fixed_source = [{"sigma": 0.1, "center_x": 0.2, "center_y": 0.2}] + kwargs_fixed_ps = [{"ra_image": [-1, 1], "dec_image": [-1, 1]}] kwargs_fixed_lens_light = [{}] - kwargs_fixed_cosmo = {'D_dt': 1000} - param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, kwargs_fixed_ps=kwargs_fixed_ps, - kwargs_fixed_special=kwargs_fixed_cosmo, **kwargs_param) + kwargs_fixed_cosmo = {"D_dt": 1000} + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_fixed_ps=kwargs_fixed_ps, + kwargs_fixed_special=kwargs_fixed_cosmo, + **kwargs_param + ) kwargs_true_lens = [ - {'theta_E': 1., 'gamma': 1.9, 'e1':0.01, 'e2':-0.01, 'center_x': 0., 'center_y': 0.}] # for SPEP lens + { + "theta_E": 1.0, + "gamma": 1.9, + "e1": 0.01, + "e2": -0.01, + "center_x": 0.0, + "center_y": 0.0, + } + ] # for SPEP lens kwargs_true_source = [ - {'amp': 1 * 2 * np.pi * 0.1 ** 2, 'center_x': 0.2, 'center_y': 0.2, 'sigma': 0.1}] - kwargs_true_lens_light = [{'center_x': -0.06, 'center_y': 0.4, 'phi_G': 4.8, - 'q': 0.86, 'n_sersic': 1.7, - 'amp': 11.8, 'R_sersic': 0.697, 'phi_G_2': 0}] - kwargs_true_ps = [{'point_amp': [1, 1], 'ra_image': [-1, 1], 'dec_image': [-1, 1]}] - args = param_class.kwargs2args(kwargs_true_lens, kwargs_true_source, - kwargs_lens_light=kwargs_true_lens_light, kwargs_ps=kwargs_true_ps, - kwargs_special={'D_dt': 1000}) + { + "amp": 1 * 2 * np.pi * 0.1**2, + "center_x": 0.2, + "center_y": 0.2, + "sigma": 0.1, + } + ] + kwargs_true_lens_light = [ + { + "center_x": -0.06, + "center_y": 0.4, + "phi_G": 4.8, + "q": 0.86, + "n_sersic": 1.7, + "amp": 11.8, + "R_sersic": 0.697, + "phi_G_2": 0, + } + ] + kwargs_true_ps = [ + {"point_amp": [1, 1], "ra_image": [-1, 1], "dec_image": [-1, 1]} + ] + args = param_class.kwargs2args( + kwargs_true_lens, + kwargs_true_source, + kwargs_lens_light=kwargs_true_lens_light, + kwargs_ps=kwargs_true_ps, + kwargs_special={"D_dt": 1000}, + ) assert param_class.specialParams._D_dt_sampling.on def test_mass_scaling(self): - kwargs_model = {'lens_model_list': ['SIS', 'NFW', 'NFW', 'SIS', 'SERSIC', 'HERNQUIST']} - kwargs_constraints = {'mass_scaling_list': [False, 1, 1, 1, 1, 1]} - kwargs_fixed_lens = [{}, {'alpha_Rs': 0.1}, {'alpha_Rs': 0.3}, {'theta_E': 0.1}, - {'k_eff': 0.3}, {'sigma0': 1}] + kwargs_model = { + "lens_model_list": ["SIS", "NFW", "NFW", "SIS", "SERSIC", "HERNQUIST"] + } + kwargs_constraints = {"mass_scaling_list": [False, 1, 1, 1, 1, 1]} + kwargs_fixed_lens = [ + {}, + {"alpha_Rs": 0.1}, + {"alpha_Rs": 0.3}, + {"theta_E": 0.1}, + {"k_eff": 0.3}, + {"sigma0": 1}, + ] kwargs_fixed_cosmo = {} - param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, kwargs_fixed_special=kwargs_fixed_cosmo - , **kwargs_constraints) - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}, - {'alpha_Rs': 0.1, 'Rs': 5, 'center_x': 1., 'center_y': 0}, - {'alpha_Rs': 0.1, 'Rs': 5, 'center_x': 0, 'center_y': 1.}, - {'theta_E': 0.1, 'center_x': 3, 'center_y': 1.}, - {'k_eff': 0.3, 'R_sersic': 1, 'n_sersic': 2, 'center_x': 3, 'center_y': 1.}, - {'sigma0': 1, 'Rs': 1, 'center_x': 3, 'center_y': 1.}] + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_special=kwargs_fixed_cosmo, + **kwargs_constraints + ) + kwargs_lens = [ + {"theta_E": 1, "center_x": 0, "center_y": 0}, + {"alpha_Rs": 0.1, "Rs": 5, "center_x": 1.0, "center_y": 0}, + {"alpha_Rs": 0.1, "Rs": 5, "center_x": 0, "center_y": 1.0}, + {"theta_E": 0.1, "center_x": 3, "center_y": 1.0}, + { + "k_eff": 0.3, + "R_sersic": 1, + "n_sersic": 2, + "center_x": 3, + "center_y": 1.0, + }, + {"sigma0": 1, "Rs": 1, "center_x": 3, "center_y": 1.0}, + ] kwargs_source = [] kwargs_lens_light = [] kwargs_ps = [] mass_scale = 2 - kwargs_cosmo = {'scale_factor': [mass_scale]} - args = param_class.kwargs2args(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special=kwargs_cosmo) + kwargs_cosmo = {"scale_factor": [mass_scale]} + args = param_class.kwargs2args( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_special=kwargs_cosmo, + ) assert args[-1] == mass_scale - kwargs_return = param_class.args2kwargs(args) - kwargs_lens = kwargs_return['kwargs_lens'] - print(kwargs_lens, 'test') - assert kwargs_lens[0]['theta_E'] == 1 - assert kwargs_lens[1]['alpha_Rs'] == 0.1 * mass_scale - assert kwargs_lens[2]['alpha_Rs'] == 0.3 * mass_scale - assert kwargs_lens[3]['theta_E'] == 0.1 * mass_scale - assert kwargs_lens[4]['k_eff'] == 0.3 * mass_scale - assert kwargs_lens[5]['sigma0'] == 1 * mass_scale + kwargs_lens = kwargs_return["kwargs_lens"] + print(kwargs_lens, "test") + assert kwargs_lens[0]["theta_E"] == 1 + assert kwargs_lens[1]["alpha_Rs"] == 0.1 * mass_scale + assert kwargs_lens[2]["alpha_Rs"] == 0.3 * mass_scale + assert kwargs_lens[3]["theta_E"] == 0.1 * mass_scale + assert kwargs_lens[4]["k_eff"] == 0.3 * mass_scale + assert kwargs_lens[5]["sigma0"] == 1 * mass_scale kwargs_return = param_class.args2kwargs(args, bijective=True) - kwargs_lens = kwargs_return['kwargs_lens'] - assert kwargs_lens[0]['theta_E'] == 1 - assert kwargs_lens[1]['alpha_Rs'] == 0.1 - assert kwargs_lens[2]['alpha_Rs'] == 0.3 + kwargs_lens = kwargs_return["kwargs_lens"] + assert kwargs_lens[0]["theta_E"] == 1 + assert kwargs_lens[1]["alpha_Rs"] == 0.1 + assert kwargs_lens[2]["alpha_Rs"] == 0.3 def test_general_scaling(self): - kwargs_model = {'lens_model_list': ['PJAFFE', 'PJAFFE', 'NFW', 'PJAFFE', 'NFW']} + kwargs_model = {"lens_model_list": ["PJAFFE", "PJAFFE", "NFW", "PJAFFE", "NFW"]} # Scale Rs for two of the PJAFFEs, and sigma0 for a different set of PJAFFEs # Scale alpha_Rs for the NFWs kwargs_constraints = { - 'general_scaling': { - 'Rs': [1, False, False, 1, False], - 'sigma0': [False, 1, False, 1, False], - 'alpha_Rs': [False, False, 1, False, 1], + "general_scaling": { + "Rs": [1, False, False, 1, False], + "sigma0": [False, 1, False, 1, False], + "alpha_Rs": [False, False, 1, False, 1], } } # PJAFFE: sigma0, Ra, Rs, center_x, center_y # NFW: Rs, alpha_Rs, center_x, center_y kwargs_fixed_lens = [ - {'Rs': 2.0, 'center_x': 1.0}, - {'sigma0': 2.0, 'Ra': 2.0, 'Rs': 3.0, 'center_y': 1.5}, - {'alpha_Rs': 0.1}, - {'Ra': 0.1, 'center_x': 0, 'center_y': 0}, - {'Rs': 3, 'center_x': -1, 'center_y': 3}, + {"Rs": 2.0, "center_x": 1.0}, + {"sigma0": 2.0, "Ra": 2.0, "Rs": 3.0, "center_y": 1.5}, + {"alpha_Rs": 0.1}, + {"Ra": 0.1, "center_x": 0, "center_y": 0}, + {"Rs": 3, "center_x": -1, "center_y": 3}, ] kwargs_fixed_cosmo = {} - param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, kwargs_fixed_special=kwargs_fixed_cosmo - , **kwargs_constraints) - kwargs_lens = [{'sigma0': 3, 'Ra': 2, 'center_y': 5}, - {'center_x': 1.}, - {'Rs': 3, 'center_x': 0.0, 'center_y': -1.0}, - {'sigma0': 3, 'Rs': 1.5}, - {'alpha_Rs': 4}] + param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_special=kwargs_fixed_cosmo, + **kwargs_constraints + ) + kwargs_lens = [ + {"sigma0": 3, "Ra": 2, "center_y": 5}, + {"center_x": 1.0}, + {"Rs": 3, "center_x": 0.0, "center_y": -1.0}, + {"sigma0": 3, "Rs": 1.5}, + {"alpha_Rs": 4}, + ] kwargs_source = [] kwargs_lens_light = [] kwargs_ps = [] # Define the scaling and power for each parameter kwargs_cosmo = { - 'Rs_scale_factor': [2.0], - 'Rs_scale_pow': [1.1], - 'sigma0_scale_factor': [3], - 'sigma0_scale_pow': [2.0], - 'alpha_Rs_scale_factor': [0.3], - 'alpha_Rs_scale_pow': [0.5], + "Rs_scale_factor": [2.0], + "Rs_scale_pow": [1.1], + "sigma0_scale_factor": [3], + "sigma0_scale_pow": [2.0], + "alpha_Rs_scale_factor": [0.3], + "alpha_Rs_scale_pow": [0.5], } - args = param_class.kwargs2args(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special=kwargs_cosmo) + args = param_class.kwargs2args( + kwargs_lens, + kwargs_source, + kwargs_lens_light, + kwargs_ps, + kwargs_special=kwargs_cosmo, + ) num, names = param_class.num_param() print(names) print(args) kwargs_return = param_class.args2kwargs(args) - kwargs_lens = kwargs_return['kwargs_lens'] - print('kwargs_lens:', kwargs_lens) - npt.assert_almost_equal(kwargs_lens[0]['Rs'], 2.0 * 2.0**1.1) - npt.assert_almost_equal(kwargs_lens[0]['sigma0'], 3) - npt.assert_almost_equal(kwargs_lens[1]['Rs'], 3.0) - npt.assert_almost_equal(kwargs_lens[1]['sigma0'], 3.0 * 2.0**2.0) - npt.assert_almost_equal(kwargs_lens[2]['alpha_Rs'], 0.3 * 0.1**0.5) - npt.assert_almost_equal(kwargs_lens[3]['Rs'], 2.0 * 1.5**1.1) - npt.assert_almost_equal(kwargs_lens[3]['sigma0'], 3.0 * 3**2.0) - npt.assert_almost_equal(kwargs_lens[4]['alpha_Rs'], 0.3 * 4**0.5) + kwargs_lens = kwargs_return["kwargs_lens"] + print("kwargs_lens:", kwargs_lens) + npt.assert_almost_equal(kwargs_lens[0]["Rs"], 2.0 * 2.0**1.1) + npt.assert_almost_equal(kwargs_lens[0]["sigma0"], 3) + npt.assert_almost_equal(kwargs_lens[1]["Rs"], 3.0) + npt.assert_almost_equal(kwargs_lens[1]["sigma0"], 3.0 * 2.0**2.0) + npt.assert_almost_equal(kwargs_lens[2]["alpha_Rs"], 0.3 * 0.1**0.5) + npt.assert_almost_equal(kwargs_lens[3]["Rs"], 2.0 * 1.5**1.1) + npt.assert_almost_equal(kwargs_lens[3]["sigma0"], 3.0 * 3**2.0) + npt.assert_almost_equal(kwargs_lens[4]["alpha_Rs"], 0.3 * 4**0.5) kwargs_return = param_class.args2kwargs(args, bijective=True) - kwargs_lens = kwargs_return['kwargs_lens'] - npt.assert_almost_equal(kwargs_lens[0]['Rs'], 2.0) - npt.assert_almost_equal(kwargs_lens[1]['sigma0'], 2.0) - npt.assert_almost_equal(kwargs_lens[2]['alpha_Rs'], 0.1) - npt.assert_almost_equal(kwargs_lens[3]['Rs'], 1.5) - npt.assert_almost_equal(kwargs_lens[3]['sigma0'], 3) - npt.assert_almost_equal(kwargs_lens[4]['alpha_Rs'], 4) + kwargs_lens = kwargs_return["kwargs_lens"] + npt.assert_almost_equal(kwargs_lens[0]["Rs"], 2.0) + npt.assert_almost_equal(kwargs_lens[1]["sigma0"], 2.0) + npt.assert_almost_equal(kwargs_lens[2]["alpha_Rs"], 0.1) + npt.assert_almost_equal(kwargs_lens[3]["Rs"], 1.5) + npt.assert_almost_equal(kwargs_lens[3]["sigma0"], 3) + npt.assert_almost_equal(kwargs_lens[4]["alpha_Rs"], 4) def test_joint_lens_with_light(self): - kwargs_model = {'lens_model_list': ['CHAMELEON'], 'lens_light_model_list': ['CHAMELEON']} + kwargs_model = { + "lens_model_list": ["CHAMELEON"], + "lens_light_model_list": ["CHAMELEON"], + } i_light, k_lens = 0, 0 - kwargs_constraints = {'joint_lens_with_light': [[i_light, k_lens, ['w_t', 'w_c', 'center_x', 'center_y', 'e1', 'e2']]]} - kwargs_lens = [{'alpha_1': 10}] - kwargs_lens_light = [{'amp': 1, 'w_t': 0.5, 'w_c': 0.1, 'center_x': 0, 'center_y': 0.3, 'e1': 0.1, 'e2': -0.2}] + kwargs_constraints = { + "joint_lens_with_light": [ + [i_light, k_lens, ["w_t", "w_c", "center_x", "center_y", "e1", "e2"]] + ] + } + kwargs_lens = [{"alpha_1": 10}] + kwargs_lens_light = [ + { + "amp": 1, + "w_t": 0.5, + "w_c": 0.1, + "center_x": 0, + "center_y": 0.3, + "e1": 0.1, + "e2": -0.2, + } + ] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light + ) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] - kwargs_lens_light_out = kwargs_return['kwargs_lens_light'] - assert kwargs_lens_out[0]['w_c'] == kwargs_lens_light[0]['w_c'] - assert kwargs_lens_light_out[0]['w_c'] == kwargs_lens_light[0]['w_c'] + kwargs_lens_out = kwargs_return["kwargs_lens"] + kwargs_lens_light_out = kwargs_return["kwargs_lens_light"] + assert kwargs_lens_out[0]["w_c"] == kwargs_lens_light[0]["w_c"] + assert kwargs_lens_light_out[0]["w_c"] == kwargs_lens_light[0]["w_c"] - kwargs_model = {'lens_model_list': ['SIS'], 'lens_light_model_list': ['SERSIC']} + kwargs_model = {"lens_model_list": ["SIS"], "lens_light_model_list": ["SERSIC"]} i_light, k_lens = 0, 0 - kwargs_constraints = {'joint_lens_with_light': [[i_light, k_lens, ['center_x', - 'center_y']]]} # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kwargs_lens_light = [{'amp': 1, 'R_sersic': 0.5, 'n_sersic': 2, 'center_x': 1, 'center_y': 1}] + kwargs_constraints = { + "joint_lens_with_light": [[i_light, k_lens, ["center_x", "center_y"]]] + } # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kwargs_lens_light = [ + {"amp": 1, "R_sersic": 0.5, "n_sersic": 2, "center_x": 1, "center_y": 1} + ] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light) - #kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light + ) + # kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] + kwargs_lens_out = kwargs_return["kwargs_lens"] - assert kwargs_lens_out[0]['theta_E'] == kwargs_lens[0]['theta_E'] - assert kwargs_lens_out[0]['center_x'] == kwargs_lens_light[0]['center_x'] + assert kwargs_lens_out[0]["theta_E"] == kwargs_lens[0]["theta_E"] + assert kwargs_lens_out[0]["center_x"] == kwargs_lens_light[0]["center_x"] def test_joint_lens_with_light_dict(self): - kwargs_model = {'lens_model_list': ['SHEAR'], 'lens_light_model_list': ['SERSIC']} + kwargs_model = { + "lens_model_list": ["SHEAR"], + "lens_light_model_list": ["SERSIC"], + } i_light, k_lens = 0, 0 - kwargs_constraints = {'joint_lens_with_light': [[i_light, k_lens, {'ra_0': 'center_x', 'dec_0': 'center_y'}]]} - kwargs_lens = [{'gamma1': 0.05, 'gamma2':0.06}] - kwargs_lens_light = [{'amp': 1, 'R_sersic': 1, 'n_sersic': 4, 'center_x': 0.1, 'center_y': 0.3}] + kwargs_constraints = { + "joint_lens_with_light": [ + [i_light, k_lens, {"ra_0": "center_x", "dec_0": "center_y"}] + ] + } + kwargs_lens = [{"gamma1": 0.05, "gamma2": 0.06}] + kwargs_lens_light = [ + {"amp": 1, "R_sersic": 1, "n_sersic": 4, "center_x": 0.1, "center_y": 0.3} + ] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light + ) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] - kwargs_lens_light_out = kwargs_return['kwargs_lens_light'] - assert kwargs_lens_out[0]['gamma1'] == kwargs_lens[0]['gamma1'] - assert kwargs_lens_out[0]['ra_0'] == kwargs_lens_light[0]['center_x'] - assert kwargs_lens_out[0]['dec_0'] == kwargs_lens_light[0]['center_y'] - + kwargs_lens_out = kwargs_return["kwargs_lens"] + kwargs_lens_light_out = kwargs_return["kwargs_lens_light"] + assert kwargs_lens_out[0]["gamma1"] == kwargs_lens[0]["gamma1"] + assert kwargs_lens_out[0]["ra_0"] == kwargs_lens_light[0]["center_x"] + assert kwargs_lens_out[0]["dec_0"] == kwargs_lens_light[0]["center_y"] def test_joint_source_with_point_source(self): - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], 'point_source_model_list': ['SOURCE_POSITION']} + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["SOURCE_POSITION"], + } i_source, k_ps = 0, 0 - kwargs_constraints = {'joint_source_with_point_source': [[k_ps, i_source]]} # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ + kwargs_constraints = { + "joint_source_with_point_source": [[k_ps, i_source]] + } # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kwargs_source = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 1, 'center_y': 1}] - kwargs_ps = [{'ra_source': 0.5, 'dec_source': 0.5}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kwargs_source = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 1, "center_y": 1} + ] + kwargs_ps = [{"ra_source": 0.5, "dec_source": 0.5}] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_ps=kwargs_ps) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_ps=kwargs_ps + ) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] - kwargs_source_out = kwargs_return['kwargs_source'] - #kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) - assert kwargs_lens_out[0]['theta_E'] == kwargs_lens[0]['theta_E'] - assert kwargs_source_out[0]['center_x'] == kwargs_ps[0]['ra_source'] - - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION']} + kwargs_lens_out = kwargs_return["kwargs_lens"] + kwargs_source_out = kwargs_return["kwargs_source"] + # kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) + assert kwargs_lens_out[0]["theta_E"] == kwargs_lens[0]["theta_E"] + assert kwargs_source_out[0]["center_x"] == kwargs_ps[0]["ra_source"] + + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } i_source, k_ps = 0, 0 - kwargs_constraints = {'joint_source_with_point_source': [[k_ps, i_source]]} # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ + kwargs_constraints = { + "joint_source_with_point_source": [[k_ps, i_source]] + } # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kwargs_source = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 1, 'center_y': 1}] - kwargs_ps = [{'ra_image': [0.5], 'dec_image': [0.5]}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kwargs_source = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 1, "center_y": 1} + ] + kwargs_ps = [{"ra_image": [0.5], "dec_image": [0.5]}] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_ps=kwargs_ps) - #kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_ps=kwargs_ps + ) + # kwargs_lens_out, kwargs_source_out, _, kwargs_ps_out, _ = param.args2kwargs(args) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] - kwargs_source_out = kwargs_return['kwargs_source'] - assert kwargs_lens_out[0]['theta_E'] == kwargs_lens[0]['theta_E'] - npt.assert_almost_equal(kwargs_source_out[0]['center_x'], -0.207, decimal=2) + kwargs_lens_out = kwargs_return["kwargs_lens"] + kwargs_source_out = kwargs_return["kwargs_source"] + assert kwargs_lens_out[0]["theta_E"] == kwargs_lens[0]["theta_E"] + npt.assert_almost_equal(kwargs_source_out[0]["center_x"], -0.207, decimal=2) def test_joint_lens_light_with_point_source(self): - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'point_source_model_list': ['LENSED_POSITION'], - 'lens_light_model_list': ['SERSIC']} + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "lens_light_model_list": ["SERSIC"], + } i_lens_light, k_ps = 0, 0 - kwargs_constraints = {'joint_lens_light_with_point_source': [ - [k_ps, i_lens_light]]} # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ + kwargs_constraints = { + "joint_lens_light_with_point_source": [[k_ps, i_lens_light]] + } # list[[i_point_source, k_source, ['param_name1', 'param_name2', ...]], [ - kwargs_lens = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}] - kwargs_source = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 1, 'center_y': 1}] - kwargs_lens_light = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 0.2, 'center_y': 0.2}] - kwargs_ps = [{'ra_image': [0.5], 'dec_image': [0.5]}] + kwargs_lens = [{"theta_E": 1, "center_x": 0, "center_y": 0}] + kwargs_source = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 1, "center_y": 1} + ] + kwargs_lens_light = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 0.2, "center_y": 0.2} + ] + kwargs_ps = [{"ra_image": [0.5], "dec_image": [0.5]}] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_lens_light=kwargs_lens_light, kwargs_ps=kwargs_ps) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, + kwargs_source=kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_ps=kwargs_ps, + ) kwargs_return = param.args2kwargs(args) - kwargs_lens_light_out = kwargs_return['kwargs_lens_light'] - kwargs_ps_out = kwargs_return['kwargs_ps'] - assert kwargs_lens_light_out[0]['center_x'] == kwargs_ps_out[0]['ra_image'] + kwargs_lens_light_out = kwargs_return["kwargs_lens_light"] + kwargs_ps_out = kwargs_return["kwargs_ps"] + assert kwargs_lens_light_out[0]["center_x"] == kwargs_ps_out[0]["ra_image"] def test_logsampling(self): - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'point_source_model_list': ['LENSED_POSITION'], - 'lens_light_model_list': ['SERSIC']} - kwargs_constraints = {'log_sampling_lens': [[0, ['theta_E']]]} - - kwargs_lens = [{'theta_E': 0.1, 'center_x': 0, 'center_y': 0}] - kwargs_source = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 1, 'center_y': 1}] - kwargs_lens_light = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 0.2, 'center_y': 0.2}] - kwargs_ps = [{'ra_image': [0.5], 'dec_image': [0.5]}] + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "lens_light_model_list": ["SERSIC"], + } + kwargs_constraints = {"log_sampling_lens": [[0, ["theta_E"]]]} + + kwargs_lens = [{"theta_E": 0.1, "center_x": 0, "center_y": 0}] + kwargs_source = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 1, "center_y": 1} + ] + kwargs_lens_light = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 0.2, "center_y": 0.2} + ] + kwargs_ps = [{"ra_image": [0.5], "dec_image": [0.5]}] param = Param(kwargs_model=kwargs_model, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, kwargs_lens_light=kwargs_lens_light, kwargs_ps=kwargs_ps) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, + kwargs_source=kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_ps=kwargs_ps, + ) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] + kwargs_lens_out = kwargs_return["kwargs_lens"] assert args[0] == -1 - assert kwargs_lens_out[0]['theta_E'] == 0.1 + assert kwargs_lens_out[0]["theta_E"] == 0.1 def test_with_solver(self): - kwargs_model = {'lens_model_list': ['SPEP'], 'source_light_model_list': ['SERSIC'], - 'point_source_model_list': ['LENSED_POSITION']} + kwargs_model = { + "lens_model_list": ["SPEP"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } i_lens_light, k_ps = 0, 0 - kwargs_constraints = {'solver_type': 'PROFILE', 'num_point_source_list': [4]} - - kwargs_lens = [{'theta_E': 1, 'gamma': 2, 'e1': 0.1, 'e2': 0.1, 'center_x': 0, 'center_y': 0}] - kwargs_source = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 1, 'center_y': 1}] - kwargs_lens_light = [{'amp': 1, 'n_sersic': 2, 'R_sersic': 0.3, 'center_x': 0.2, 'center_y': 0.2}] - lensModel = LensModel(lens_model_list=['SPEP']) + kwargs_constraints = {"solver_type": "PROFILE", "num_point_source_list": [4]} + + kwargs_lens = [ + { + "theta_E": 1, + "gamma": 2, + "e1": 0.1, + "e2": 0.1, + "center_x": 0, + "center_y": 0, + } + ] + kwargs_source = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 1, "center_y": 1} + ] + kwargs_lens_light = [ + {"amp": 1, "n_sersic": 2, "R_sersic": 0.3, "center_x": 0.2, "center_y": 0.2} + ] + lensModel = LensModel(lens_model_list=["SPEP"]) lensEquationSlover = LensEquationSolver(lensModel=lensModel) - x_image, y_image = lensEquationSlover.image_position_from_source(sourcePos_x=0.0, sourcePos_y=0.01, kwargs_lens=kwargs_lens) - print(x_image, y_image, 'test') - kwargs_ps = [{'ra_image': x_image, 'dec_image': y_image}] - param = Param(kwargs_model=kwargs_model, kwargs_lens_init=kwargs_lens, **kwargs_constraints) - args = param.kwargs2args(kwargs_lens=kwargs_lens, kwargs_source=kwargs_source, - kwargs_lens_light=kwargs_lens_light, kwargs_ps=kwargs_ps) - #kwargs_lens_out, kwargs_source_out, kwargs_lens_light_out, kwargs_ps_out, _ = param.args2kwargs(args) + x_image, y_image = lensEquationSlover.image_position_from_source( + sourcePos_x=0.0, sourcePos_y=0.01, kwargs_lens=kwargs_lens + ) + print(x_image, y_image, "test") + kwargs_ps = [{"ra_image": x_image, "dec_image": y_image}] + param = Param( + kwargs_model=kwargs_model, + kwargs_lens_init=kwargs_lens, + **kwargs_constraints + ) + args = param.kwargs2args( + kwargs_lens=kwargs_lens, + kwargs_source=kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_ps=kwargs_ps, + ) + # kwargs_lens_out, kwargs_source_out, kwargs_lens_light_out, kwargs_ps_out, _ = param.args2kwargs(args) kwargs_return = param.args2kwargs(args) - kwargs_lens_out = kwargs_return['kwargs_lens'] - kwargs_ps_out = kwargs_return['kwargs_ps'] + kwargs_lens_out = kwargs_return["kwargs_lens"] + kwargs_ps_out = kwargs_return["kwargs_ps"] dist = param.check_solver(kwargs_lens=kwargs_lens_out, kwargs_ps=kwargs_ps_out) npt.assert_almost_equal(dist, 0, decimal=10) @@ -353,30 +590,44 @@ def test_num_point_source_images(self): assert num == 2 def test_shapelet_lens(self): - kwargs_model = {'lens_model_list': ['SHAPELETS_CART'], 'source_light_model_list': [], - 'lens_light_model_list': [], 'point_source_model_list': []} - kwargs_param = {'num_shapelet_lens': 6} - kwargs_fixed_lens = [{'beta': 1}] # for SPEP lens + kwargs_model = { + "lens_model_list": ["SHAPELETS_CART"], + "source_light_model_list": [], + "lens_light_model_list": [], + "point_source_model_list": [], + } + kwargs_param = {"num_shapelet_lens": 6} + kwargs_fixed_lens = [{"beta": 1}] # for SPEP lens kwargs_fixed_source = [{}] kwargs_fixed_ps = [{}] kwargs_fixed_lens_light = [{}] kwargs_fixed_cosmo = [{}] - self.param_class = Param(kwargs_model, kwargs_fixed_lens=kwargs_fixed_lens, - kwargs_fixed_source=kwargs_fixed_source, - kwargs_fixed_lens_light=kwargs_fixed_lens_light, kwargs_fixed_ps=kwargs_fixed_ps, - kwargs_fixed_special=kwargs_fixed_cosmo, **kwargs_param) + self.param_class = Param( + kwargs_model, + kwargs_fixed_lens=kwargs_fixed_lens, + kwargs_fixed_source=kwargs_fixed_source, + kwargs_fixed_lens_light=kwargs_fixed_lens_light, + kwargs_fixed_ps=kwargs_fixed_ps, + kwargs_fixed_special=kwargs_fixed_cosmo, + **kwargs_param + ) self.param_class.print_setting() -class TestRaise(unittest.TestCase): +class TestRaise(unittest.TestCase): def test_raise(self): - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'point_source_model_list': ['LENSED_POSITION'], - 'lens_light_model_list': ['SERSIC']} - kwargs_constraints = {'log_sampling_lens': [[0, {'theta_E'}]]} # wrong type, dict instead of list + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "lens_light_model_list": ["SERSIC"], + } + kwargs_constraints = { + "log_sampling_lens": [[0, {"theta_E"}]] + } # wrong type, dict instead of list with self.assertRaises(TypeError): param = Param(kwargs_model=kwargs_model, **kwargs_constraints) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_sampler.py b/test/test_Sampling/test_sampler.py index 332bab6ff..ac0600b1c 100644 --- a/test/test_Sampling/test_sampler.py +++ b/test/test_Sampling/test_sampler.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -15,12 +15,9 @@ class TestSampler(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): - # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -30,68 +27,125 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + } psf = PSF(**kwargs_psf_gaussian) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf.kernel_point_source} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": psf.kernel_point_source, + } psf_class = PSF(**kwargs_psf) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_model_list = ['SPEP'] + lens_model_list = ["SPEP"] self.kwargs_lens = [kwargs_spemd] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, - 'e1': 0.1, 'e2': 0.1} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False, 'compute_mode': 'regular'} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light) + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + "compute_mode": "regular", + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light + ) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim - kwargs_data_joint = {'multi_band_list': [[kwargs_data, kwargs_psf, kwargs_numerics]], 'multi_band_type': 'single-band'} + kwargs_data["image_data"] = image_sim + kwargs_data_joint = { + "multi_band_list": [[kwargs_data, kwargs_psf, kwargs_numerics]], + "multi_band_type": "single-band", + } self.data_class = data_class self.psf_class = psf_class - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'fixed_magnification_list': [False], - } - self.kwargs_numerics = { - 'subgrid_res': 1, - 'psf_subgrid': False} - - kwargs_constraints = {'image_plane_source_list': [False] * len(source_model_list)} - - kwargs_likelihood = {'source_marg': False, - 'image_position_uncertainty': 0.004, - 'check_matched_source_position': False, - 'source_position_tolerance': 0.001, - 'source_position_sigma': 0.001, - } + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "fixed_magnification_list": [False], + } + self.kwargs_numerics = {"subgrid_res": 1, "psf_subgrid": False} + + kwargs_constraints = { + "image_plane_source_list": [False] * len(source_model_list) + } + + kwargs_likelihood = { + "source_marg": False, + "image_position_uncertainty": 0.004, + "check_matched_source_position": False, + "source_position_tolerance": 0.001, + "source_position_sigma": 0.001, + } self.param_class = Param(kwargs_model, **kwargs_constraints) - self.Likelihood = LikelihoodModule(kwargs_data_joint=kwargs_data_joint, kwargs_model=kwargs_model, - param_class=self.param_class, **kwargs_likelihood) + self.Likelihood = LikelihoodModule( + kwargs_data_joint=kwargs_data_joint, + kwargs_model=kwargs_model, + param_class=self.param_class, + **kwargs_likelihood + ) self.sampler = Sampler(likelihoodModule=self.Likelihood) def test_pso(self): n_particles = 2 n_iterations = 2 - result, chain = self.sampler.pso(n_particles, n_iterations, lower_start=None, upper_start=None, threadCount=1, init_pos=None, - mpi=False, print_key='PSO') + result, chain = self.sampler.pso( + n_particles, + n_iterations, + lower_start=None, + upper_start=None, + threadCount=1, + init_pos=None, + mpi=False, + print_key="PSO", + ) assert len(result) == 16 @@ -99,22 +153,42 @@ def test_mcmc_emcee(self): n_walkers = 36 n_run = 2 n_burn = 2 - mean_start = self.param_class.kwargs2args(kwargs_lens=self.kwargs_lens, kwargs_source=self.kwargs_source, - kwargs_lens_light=self.kwargs_lens_light) + mean_start = self.param_class.kwargs2args( + kwargs_lens=self.kwargs_lens, + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + ) sigma_start = np.ones_like(mean_start) * 0.1 - samples, dist = self.sampler.mcmc_emcee(n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=False) + samples, dist = self.sampler.mcmc_emcee( + n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=False + ) assert len(samples) == n_walkers * n_run assert len(dist) == len(samples) # test of backup file # 1) run a chain specifiying a backup file name - backup_filename = 'test_mcmc_emcee.h5' - samples_1, dist_1 = self.sampler.mcmc_emcee(n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=False, - backend_filename=backup_filename) + backup_filename = "test_mcmc_emcee.h5" + samples_1, dist_1 = self.sampler.mcmc_emcee( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=False, + backend_filename=backup_filename, + ) assert len(samples_1) == n_walkers * n_run # 2) run a chain starting from the backup of previous run - samples_2, dist_2 = self.sampler.mcmc_emcee(n_walkers, n_run, n_burn, mean_start, sigma_start, mpi=False, - backend_filename=backup_filename, start_from_backend=True) + samples_2, dist_2 = self.sampler.mcmc_emcee( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + mpi=False, + backend_filename=backup_filename, + start_from_backend=True, + ) assert len(samples_2) == len(samples_1) + n_walkers * n_run assert len(dist_2) == len(samples_2) @@ -124,17 +198,28 @@ def test_mcmc_zeus(self): n_walkers = 36 n_run = 2 n_burn = 2 - mean_start = self.param_class.kwargs2args(kwargs_lens=self.kwargs_lens, kwargs_source=self.kwargs_source, - kwargs_lens_light=self.kwargs_lens_light) + mean_start = self.param_class.kwargs2args( + kwargs_lens=self.kwargs_lens, + kwargs_source=self.kwargs_source, + kwargs_lens_light=self.kwargs_lens_light, + ) sigma_start = np.ones_like(mean_start) * 0.1 - samples, dist = self.sampler.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start) + samples, dist = self.sampler.mcmc_zeus( + n_walkers, n_run, n_burn, mean_start, sigma_start + ) assert len(samples) == n_walkers * n_run assert len(dist) == len(samples) # test of backup file - backup_filename = 'test_mcmc_zeus.h5' - samples_1, dist_1 = self.sampler.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start, - backend_filename=backup_filename) + backup_filename = "test_mcmc_zeus.h5" + samples_1, dist_1 = self.sampler.mcmc_zeus( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + backend_filename=backup_filename, + ) assert len(samples_1) == n_walkers * n_run os.remove(backup_filename) @@ -144,19 +229,39 @@ def test_mcmc_zeus(self): splitr_callback = True miniter_callback = True - samples_ac, dist_ac = self.sampler.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start, - autocorrelation_callback=autocorrelation_callback) + samples_ac, dist_ac = self.sampler.mcmc_zeus( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + autocorrelation_callback=autocorrelation_callback, + ) assert len(samples_ac) == n_walkers * n_run # the default nsplits is 2, here we set it to 1 because the test chain is very short # i.e. 4/3 != integer; 4/2 = integer - samples_sp, dist_sp = self.sampler.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start, - splitr_callback=splitr_callback, nsplits=1) + samples_sp, dist_sp = self.sampler.mcmc_zeus( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + splitr_callback=splitr_callback, + nsplits=1, + ) assert len(samples_sp) == n_walkers * n_run - samples_mi, dist_mi = self.sampler.mcmc_zeus(n_walkers, n_run, n_burn, mean_start, sigma_start, - miniter_callback=miniter_callback) + samples_mi, dist_mi = self.sampler.mcmc_zeus( + n_walkers, + n_run, + n_burn, + mean_start, + sigma_start, + miniter_callback=miniter_callback, + ) assert len(samples_mi) == n_walkers * n_run -if __name__ == '__main__': + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Sampling/test_special_param.py b/test/test_Sampling/test_special_param.py index 5919ab598..4f6499709 100644 --- a/test/test_Sampling/test_special_param.py +++ b/test/test_Sampling/test_special_param.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy.testing as npt @@ -7,16 +7,29 @@ class TestParam(object): - def setup_method(self): - self.param = SpecialParam(Ddt_sampling=True, kwargs_fixed=None, point_source_offset=True, num_images=2, - source_size=True, num_tau0=2, num_z_sampling=3, source_grid_offset=True, - kwargs_lower={'z_sampling': [0.05, 0.1, 0.5]}, - kwargs_upper={'z_sampling': [0.2, 1., 1.]} - ) - self.kwargs = {'D_dt': 1988, 'delta_x_image': [0, 0], 'delta_y_image': [0, 0], 'source_size': 0.1, - 'tau0_list': [0, 1], 'z_sampling': np.array([0.1, 0.5, 2]), - 'delta_x_source_grid': 0, 'delta_y_source_grid': 0} + self.param = SpecialParam( + Ddt_sampling=True, + kwargs_fixed=None, + point_source_offset=True, + num_images=2, + source_size=True, + num_tau0=2, + num_z_sampling=3, + source_grid_offset=True, + kwargs_lower={"z_sampling": [0.05, 0.1, 0.5]}, + kwargs_upper={"z_sampling": [0.2, 1.0, 1.0]}, + ) + self.kwargs = { + "D_dt": 1988, + "delta_x_image": [0, 0], + "delta_y_image": [0, 0], + "source_size": 0.1, + "tau0_list": [0, 1], + "z_sampling": np.array([0.1, 0.5, 2]), + "delta_x_source_grid": 0, + "delta_y_source_grid": 0, + } def test_get_setParams(self): args = self.param.set_params(self.kwargs) @@ -25,16 +38,26 @@ def test_get_setParams(self): for k in range(len(args)): npt.assert_almost_equal(args[k], args_new[k], decimal=8) - param_fixed = SpecialParam(Ddt_sampling=True, kwargs_fixed=self.kwargs, point_source_offset=True, num_images=2, - source_size=True, num_z_sampling=3, num_tau0=2) + param_fixed = SpecialParam( + Ddt_sampling=True, + kwargs_fixed=self.kwargs, + point_source_offset=True, + num_images=2, + source_size=True, + num_z_sampling=3, + num_tau0=2, + ) kwargs_new, i = param_fixed.get_params(args=[], i=0) - kwargs_new['D_dt'] = self.kwargs['D_dt'] + kwargs_new["D_dt"] = self.kwargs["D_dt"] - special_param = SpecialParam(num_z_sampling=1, kwargs_lower={'z_sampling': [0.1]}, - kwargs_upper={'z_sampling': [0.2]}) + special_param = SpecialParam( + num_z_sampling=1, + kwargs_lower={"z_sampling": [0.1]}, + kwargs_upper={"z_sampling": [0.2]}, + ) kwargs_test, i = special_param.get_params(args=[0.3], i=0, impose_bound=True) - assert kwargs_test['z_sampling'] == [0.2] + assert kwargs_test["z_sampling"] == [0.2] def test_num_params(self): num, list = self.param.num_param() @@ -42,64 +65,79 @@ def test_num_params(self): def test_mass_scaling(self): kwargs_fixed = {} - param = SpecialParam(kwargs_fixed=kwargs_fixed, mass_scaling=True, num_scale_factor=3) - kwargs = {'scale_factor': [0, 1, 2]} + param = SpecialParam( + kwargs_fixed=kwargs_fixed, mass_scaling=True, num_scale_factor=3 + ) + kwargs = {"scale_factor": [0, 1, 2]} args = param.set_params(kwargs) assert len(args) == 3 num_param, param_list = param.num_param() assert num_param == 3 kwargs_new, _ = param.get_params(args, i=0) - assert kwargs_new['scale_factor'][1] == 1 + assert kwargs_new["scale_factor"][1] == 1 param = SpecialParam(kwargs_fixed=kwargs, mass_scaling=True, num_scale_factor=3) - kwargs_in = {'scale_factor': [9, 9, 9]} + kwargs_in = {"scale_factor": [9, 9, 9]} args = param.set_params(kwargs_in) assert len(args) == 0 kwargs_new, _ = param.get_params(args, i=0) print(kwargs_new) - assert kwargs_new['scale_factor'][1] == 1 + assert kwargs_new["scale_factor"][1] == 1 def test_delta_images(self): - param = SpecialParam(num_images=2, point_source_offset=True, kwargs_fixed={}, - kwargs_lower={'delta_x_image': [-1, -1], 'delta_y_image': [-1, -1]}, - kwargs_upper={'delta_x_image': [1, 1], 'delta_y_image': [1, 1]}) - kwargs = {'delta_x_image': [0.5, 0.5], 'delta_y_image': [0.5, 0.5]} + param = SpecialParam( + num_images=2, + point_source_offset=True, + kwargs_fixed={}, + kwargs_lower={"delta_x_image": [-1, -1], "delta_y_image": [-1, -1]}, + kwargs_upper={"delta_x_image": [1, 1], "delta_y_image": [1, 1]}, + ) + kwargs = {"delta_x_image": [0.5, 0.5], "delta_y_image": [0.5, 0.5]} args = param.set_params(kwargs_special=kwargs) kwargs_new, _ = param.get_params(args, i=0) print(kwargs_new) - assert kwargs_new['delta_x_image'][0] == kwargs['delta_x_image'][0] + assert kwargs_new["delta_x_image"][0] == kwargs["delta_x_image"][0] def test_source_grid_offsets(self): - param = SpecialParam(kwargs_lower={'delta_x_source_grid': -1, 'delta_y_source_grid': 1}, - kwargs_upper={'delta_x_source_grid': 1, 'delta_y_source_grid': 1}, - source_grid_offset=True) - kwargs = {'delta_x_source_grid': 0.1, 'delta_y_source_grid': 0.1} + param = SpecialParam( + kwargs_lower={"delta_x_source_grid": -1, "delta_y_source_grid": 1}, + kwargs_upper={"delta_x_source_grid": 1, "delta_y_source_grid": 1}, + source_grid_offset=True, + ) + kwargs = {"delta_x_source_grid": 0.1, "delta_y_source_grid": 0.1} args = param.set_params(kwargs_special=kwargs) kwargs_new, _ = param.get_params(args, i=0) - assert kwargs_new['delta_x_source_grid'] == kwargs['delta_x_source_grid'] - assert kwargs_new['delta_y_source_grid'] == kwargs['delta_y_source_grid'] - - kwargs_fixed = {'delta_x_source_grid': 0, 'delta_y_source_grid': 0} - param = SpecialParam(kwargs_lower={'delta_x_source_grid': -1, 'delta_y_source_grid': 1}, - kwargs_upper={'delta_x_source_grid': 1, 'delta_y_source_grid': 1}, - source_grid_offset=True, kwargs_fixed=kwargs_fixed) - kwargs = {'delta_x_source_grid': 0.1, 'delta_y_source_grid': 0.1} + assert kwargs_new["delta_x_source_grid"] == kwargs["delta_x_source_grid"] + assert kwargs_new["delta_y_source_grid"] == kwargs["delta_y_source_grid"] + + kwargs_fixed = {"delta_x_source_grid": 0, "delta_y_source_grid": 0} + param = SpecialParam( + kwargs_lower={"delta_x_source_grid": -1, "delta_y_source_grid": 1}, + kwargs_upper={"delta_x_source_grid": 1, "delta_y_source_grid": 1}, + source_grid_offset=True, + kwargs_fixed=kwargs_fixed, + ) + kwargs = {"delta_x_source_grid": 0.1, "delta_y_source_grid": 0.1} args = param.set_params(kwargs_special=kwargs) kwargs_new, _ = param.get_params(args, i=0) - assert kwargs_new['delta_x_source_grid'] == kwargs_fixed['delta_x_source_grid'] - assert kwargs_new['delta_y_source_grid'] == kwargs_fixed['delta_y_source_grid'] + assert kwargs_new["delta_x_source_grid"] == kwargs_fixed["delta_x_source_grid"] + assert kwargs_new["delta_y_source_grid"] == kwargs_fixed["delta_y_source_grid"] def test_general_scaling(self): kwargs_fixed = {} - param = SpecialParam(kwargs_fixed=kwargs_fixed, - general_scaling_params={'param': [False, 1, 1, False, 2]}) - args = param.set_params({'param_scale_factor': [1, 2], 'param_scale_pow': [3, 4]}) + param = SpecialParam( + kwargs_fixed=kwargs_fixed, + general_scaling_params={"param": [False, 1, 1, False, 2]}, + ) + args = param.set_params( + {"param_scale_factor": [1, 2], "param_scale_pow": [3, 4]} + ) assert len(args) == 4 num_param, param_list = param.num_param() assert num_param == 4 kwargs_new, _ = param.get_params(args, i=0) - assert kwargs_new['param_scale_factor'] == [1, 2] - assert kwargs_new['param_scale_pow'] == [3, 4] + assert kwargs_new["param_scale_factor"] == [1, 2] + assert kwargs_new["param_scale_pow"] == [3, 4] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_DES.py b/test/test_SimulationAPI/test_ObservationConfig/test_DES.py index 4e6855a5c..3049084b1 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_DES.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_DES.py @@ -5,13 +5,12 @@ class TestDES(unittest.TestCase): - def setUp(self): self.g = DES() # default is g_band - self.r = DES(band='r') - self.i = DES(band='i') - self.z = DES(band='z') - self.Y = DES(band='Y') + self.r = DES(band="r") + self.i = DES(band="i") + self.z = DES(band="z") + self.Y = DES(band="Y") kwargs_g_band = self.g.kwargs_single_band() kwargs_r_band = self.r.kwargs_single_band() @@ -26,29 +25,33 @@ def setUp(self): self.Y_band = SingleBand(**kwargs_Y_band) # dictionaries mapping DES kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.g.camera) def test_DES_class(self): default = self.g - explicit_g = DES(band='g') + explicit_g = DES(band="g") self.assertEqual(explicit_g.camera, default.camera) self.assertEqual(explicit_g.obs, default.obs) with self.assertRaises(ValueError): - bad_band = DES(band='u') + bad_band = DES(band="u") with self.assertRaises(ValueError): - bad_psf = DES(psf_type='blah') + bad_psf = DES(psf_type="blah") single_year = DES(coadd_years=1) self.assertEqual(single_year.obs["num_exposures"], 3) @@ -58,20 +61,45 @@ def test_DES_class(self): def test_DES_camera(self): # comparing camera settings in DES instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.g.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.g.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_DES_obs(self): # comparing obs settings in DES instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.g.obs[config], getattr(self.g_band, setting), msg=f"{config} did not match") - self.assertEqual(self.r.obs[config], getattr(self.r_band, setting), msg=f"{config} did not match") - self.assertEqual(self.i.obs[config], getattr(self.i_band, setting), msg=f"{config} did not match") - self.assertEqual(self.z.obs[config], getattr(self.z_band, setting), msg=f"{config} did not match") - self.assertEqual(self.Y.obs[config], getattr(self.Y_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.g.obs[config], + getattr(self.g_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.r.obs[config], + getattr(self.r_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.i.obs[config], + getattr(self.i_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.z.obs[config], + getattr(self.z_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.Y.obs[config], + getattr(self.Y_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): kwargs_g = util.merge_dicts(self.g.camera, self.g.obs) self.assertEqual(self.g.kwargs_single_band(), kwargs_g) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_Euclid.py b/test/test_SimulationAPI/test_ObservationConfig/test_Euclid.py index 11465ecdb..875586be8 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_Euclid.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_Euclid.py @@ -5,7 +5,6 @@ class TestEuclid(unittest.TestCase): - def setUp(self): self.VIS = Euclid() @@ -14,29 +13,33 @@ def setUp(self): self.VIS_band = SingleBand(**kwargs_VIS) # dictionaries mapping Euclid kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.VIS.camera) def test_Euclid_class(self): default = self.VIS - explicit = Euclid(band='VIS') + explicit = Euclid(band="VIS") self.assertEqual(explicit.camera, default.camera) self.assertEqual(explicit.obs, default.obs) with self.assertRaises(ValueError): - bad_band = Euclid(band='g') + bad_band = Euclid(band="g") with self.assertRaises(ValueError): - bad_psf = Euclid(psf_type='pixel') + bad_psf = Euclid(psf_type="pixel") single_year = Euclid(coadd_years=2) self.assertEqual(single_year.obs["num_exposures"], 1) @@ -46,17 +49,25 @@ def test_Euclid_class(self): def test_Euclid_camera(self): # comparing camera settings in Euclid instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.VIS.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.VIS.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_Euclid_obs(self): # comparing obs settings in Euclid instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.VIS.obs[config], getattr(self.VIS_band, setting), msg=f"{config} did not match") - + self.assertEqual( + self.VIS.obs[config], + getattr(self.VIS_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): kwargs_VIS = util.merge_dicts(self.VIS.camera, self.VIS.obs) self.assertEqual(self.VIS.kwargs_single_band(), kwargs_VIS) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_HST.py b/test/test_SimulationAPI/test_ObservationConfig/test_HST.py index 021e3d026..aa6669e8a 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_HST.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_HST.py @@ -5,11 +5,10 @@ class TestHST(unittest.TestCase): - def setUp(self): self.TDLMC_F160W = HST() # default is TDLMC_F160W - self.F160W = HST(band='F160W') - self.F160W2 = HST(band='F160W', psf_type='GAUSSIAN') + self.F160W = HST(band="F160W") + self.F160W2 = HST(band="F160W", psf_type="GAUSSIAN") kwargs_TDLMC_F160W = self.TDLMC_F160W.kwargs_single_band() kwargs_F160W = self.F160W.kwargs_single_band() @@ -20,29 +19,33 @@ def setUp(self): self.F160W2_band = SingleBand(**kwargs_F160W2) # dictionaries mapping HST kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.TDLMC_F160W.camera) - def test_HST_class(self): #TODO: update; also text pixel/gaussian + def test_HST_class(self): # TODO: update; also text pixel/gaussian default = self.TDLMC_F160W - explicit_TDLMC_F160W = HST(band='TDLMC_F160W', psf_type='PIXEL') + explicit_TDLMC_F160W = HST(band="TDLMC_F160W", psf_type="PIXEL") self.assertEqual(explicit_TDLMC_F160W.camera, default.camera) self.assertEqual(explicit_TDLMC_F160W.obs, default.obs) with self.assertRaises(ValueError): - bad_band = HST(band='g') + bad_band = HST(band="g") with self.assertRaises(ValueError): - bad_psf = HST(psf_type='blah') + bad_psf = HST(psf_type="blah") with self.assertRaises(ValueError): bad_coadd_years = HST(coadd_years=100) @@ -50,18 +53,37 @@ def test_HST_class(self): #TODO: update; also text pixel/gaussian def test_HST_camera(self): # comparing camera settings in HST instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.TDLMC_F160W.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.TDLMC_F160W.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_HST_obs(self): # comparing obs settings in HST instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.TDLMC_F160W.obs[config], getattr(self.TDLMC_F160W_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F160W.obs[config], getattr(self.F160W_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F160W2.obs[config], getattr(self.F160W2_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.TDLMC_F160W.obs[config], + getattr(self.TDLMC_F160W_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F160W.obs[config], + getattr(self.F160W_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F160W2.obs[config], + getattr(self.F160W2_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): - kwargs_TDLMC_F160W = util.merge_dicts(self.TDLMC_F160W.camera, self.TDLMC_F160W.obs) + kwargs_TDLMC_F160W = util.merge_dicts( + self.TDLMC_F160W.camera, self.TDLMC_F160W.obs + ) self.assertEqual(self.TDLMC_F160W.kwargs_single_band(), kwargs_TDLMC_F160W) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_JWST.py b/test/test_SimulationAPI/test_ObservationConfig/test_JWST.py index 249557809..2ed0dff0c 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_JWST.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_JWST.py @@ -5,11 +5,10 @@ class TestJWST(unittest.TestCase): - def setUp(self): self.F200W = JWST() # default is F200W - self.F356W = JWST(band='F356W') - self.F356W2 = JWST(band='F356W', psf_type='GAUSSIAN') + self.F356W = JWST(band="F356W") + self.F356W2 = JWST(band="F356W", psf_type="GAUSSIAN") kwargs_F200W = self.F200W.kwargs_single_band() kwargs_F356W = self.F356W.kwargs_single_band() @@ -20,29 +19,33 @@ def setUp(self): self.F356W2_band = SingleBand(**kwargs_F356W2) # dictionaries mapping JWST kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.F200W.camera) def test_JWST_class(self): default = self.F200W - explicit_F200W = JWST(band='F200W', psf_type='PIXEL') + explicit_F200W = JWST(band="F200W", psf_type="PIXEL") self.assertEqual(explicit_F200W.camera, default.camera) self.assertEqual(explicit_F200W.obs, default.obs) with self.assertRaises(ValueError): - bad_band = JWST(band='g') + bad_band = JWST(band="g") with self.assertRaises(ValueError): - bad_psf = JWST(psf_type='blah') + bad_psf = JWST(psf_type="blah") with self.assertRaises(ValueError): bad_coadd_years = JWST(coadd_years=100) @@ -50,18 +53,35 @@ def test_JWST_class(self): def test_JWST_camera(self): # comparing camera settings in JWST instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.F200W.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.F200W.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_JWST_obs(self): # comparing obs settings in JWST instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.F200W.obs[config], getattr(self.F200W_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F356W.obs[config], getattr(self.F356W_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F356W2.obs[config], getattr(self.F356W2_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.F200W.obs[config], + getattr(self.F200W_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F356W.obs[config], + getattr(self.F356W_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F356W2.obs[config], + getattr(self.F356W2_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): kwargs_F200W = util.merge_dicts(self.F200W.camera, self.F200W.obs) self.assertEqual(self.F200W.kwargs_single_band(), kwargs_F200W) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_LSST.py b/test/test_SimulationAPI/test_ObservationConfig/test_LSST.py index 3505852dc..2fbd27c48 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_LSST.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_LSST.py @@ -5,14 +5,13 @@ class TestLSST(unittest.TestCase): - def setUp(self): - self.u = LSST(band='u') + self.u = LSST(band="u") self.g = LSST() # default is g_band - self.r = LSST(band='r') - self.i = LSST(band='i') - self.z = LSST(band='z') - self.y = LSST(band='Y') # same as band='y' + self.r = LSST(band="r") + self.i = LSST(band="i") + self.z = LSST(band="z") + self.y = LSST(band="Y") # same as band='y' kwargs_u_band = self.u.kwargs_single_band() kwargs_g_band = self.g.kwargs_single_band() @@ -21,7 +20,6 @@ def setUp(self): kwargs_z_band = self.z.kwargs_single_band() kwargs_y_band = self.y.kwargs_single_band() - self.u_band = SingleBand(**kwargs_u_band) self.g_band = SingleBand(**kwargs_g_band) self.r_band = SingleBand(**kwargs_r_band) @@ -30,32 +28,36 @@ def setUp(self): self.y_band = SingleBand(**kwargs_y_band) # dictionaries mapping LSST kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.g.camera) def test_LSST_class(self): default = self.g - explicit_g = LSST(band='g') + explicit_g = LSST(band="g") self.assertEqual(explicit_g.camera, default.camera) self.assertEqual(explicit_g.obs, default.obs) with self.assertRaises(ValueError): - bad_band_1 = LSST(band='9') + bad_band_1 = LSST(band="9") with self.assertRaises(ValueError): - bad_band_2 = LSST(band='H') + bad_band_2 = LSST(band="H") with self.assertRaises(ValueError): - bad_psf = LSST(psf_type='blah') + bad_psf = LSST(psf_type="blah") single_year = LSST(coadd_years=1) self.assertEqual(single_year.obs["num_exposures"], 20) @@ -65,21 +67,50 @@ def test_LSST_class(self): def test_LSST_camera(self): # comparing camera settings in LSST instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.g.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.g.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_LSST_obs(self): # comparing obs settings in LSST instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.u.obs[config], getattr(self.u_band, setting), msg=f"{config} did not match") - self.assertEqual(self.g.obs[config], getattr(self.g_band, setting), msg=f"{config} did not match") - self.assertEqual(self.r.obs[config], getattr(self.r_band, setting), msg=f"{config} did not match") - self.assertEqual(self.i.obs[config], getattr(self.i_band, setting), msg=f"{config} did not match") - self.assertEqual(self.z.obs[config], getattr(self.z_band, setting), msg=f"{config} did not match") - self.assertEqual(self.y.obs[config], getattr(self.y_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.u.obs[config], + getattr(self.u_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.g.obs[config], + getattr(self.g_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.r.obs[config], + getattr(self.r_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.i.obs[config], + getattr(self.i_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.z.obs[config], + getattr(self.z_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.y.obs[config], + getattr(self.y_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): kwargs_g = util.merge_dicts(self.g.camera, self.g.obs) self.assertEqual(self.g.kwargs_single_band(), kwargs_g) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_Roman.py b/test/test_SimulationAPI/test_ObservationConfig/test_Roman.py index 7845d7b2a..bcb85dce8 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_Roman.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_Roman.py @@ -7,15 +7,14 @@ class TestRoman(unittest.TestCase): - def setUp(self): self.F062 = Roman() # default is F062 - self.F087 = Roman(band='F087', survey_mode='microlensing') - self.F106 = Roman(band='F106', psf_type='GAUSSIAN') - self.F129 = Roman(band='F129', psf_type='GAUSSIAN') - self.F158 = Roman(band='F158', psf_type='GAUSSIAN') - self.F184 = Roman(band='F184', psf_type='GAUSSIAN') - self.F146 = Roman(band='F146', survey_mode='microlensing', psf_type='GAUSSIAN') + self.F087 = Roman(band="F087", survey_mode="microlensing") + self.F106 = Roman(band="F106", psf_type="GAUSSIAN") + self.F129 = Roman(band="F129", psf_type="GAUSSIAN") + self.F158 = Roman(band="F158", psf_type="GAUSSIAN") + self.F184 = Roman(band="F184", psf_type="GAUSSIAN") + self.F146 = Roman(band="F146", survey_mode="microlensing", psf_type="GAUSSIAN") kwargs_F062 = self.F062.kwargs_single_band() kwargs_F087 = self.F087.kwargs_single_band() @@ -34,69 +33,113 @@ def setUp(self): self.F146_band = SingleBand(**kwargs_F146) # dictionaries mapping Roman kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.F062.camera) def test_Roman_class(self): default = self.F062 - explicit_F062 = Roman(band='F062') + explicit_F062 = Roman(band="F062") self.assertEqual(explicit_F062.camera, default.camera) self.assertEqual(explicit_F062.obs, default.obs) with self.assertRaises(ValueError): - bad_band = Roman(band='g') - + bad_band = Roman(band="g") + with self.assertRaises(ValueError): - bad_band_2 = Roman(band='9') + bad_band_2 = Roman(band="9") with self.assertRaises(ValueError): - bad_psf = Roman(psf_type='blah') - + bad_psf = Roman(psf_type="blah") + with self.assertRaises(ValueError): - bad_band_wide = Roman(band='F087') + bad_band_wide = Roman(band="F087") with self.assertRaises(ValueError): - bad_band_microlensing = Roman(band='F062', survey_mode='microlensing') + bad_band_microlensing = Roman(band="F062", survey_mode="microlensing") with self.assertRaises(ValueError): - bad_survey_mode = Roman(survey_mode='blah') + bad_survey_mode = Roman(survey_mode="blah") def test_Roman_camera(self): # comparing camera settings in Roman instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.F062.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.F062.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_Roman_obs(self): # comparing obs settings in HST instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.F062.obs[config], getattr(self.F062_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F087.obs[config], getattr(self.F087_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F106.obs[config], getattr(self.F106_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F129.obs[config], getattr(self.F129_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F158.obs[config], getattr(self.F158_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F184.obs[config], getattr(self.F184_band, setting), msg=f"{config} did not match") - self.assertEqual(self.F146.obs[config], getattr(self.F146_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.F062.obs[config], + getattr(self.F062_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F087.obs[config], + getattr(self.F087_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F106.obs[config], + getattr(self.F106_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F129.obs[config], + getattr(self.F129_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F158.obs[config], + getattr(self.F158_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F184.obs[config], + getattr(self.F184_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.F146.obs[config], + getattr(self.F146_band, setting), + msg=f"{config} did not match", + ) def test_Roman_psf_pixel(self): - self.F062_pixel = Roman(psf_type = 'PIXEL') + self.F062_pixel = Roman(psf_type="PIXEL") import lenstronomy + module_path = os.path.dirname(lenstronomy.__file__) - psf_filename = os.path.join(module_path, 'SimulationAPI/ObservationConfig/PSF_models/F062.fits') + psf_filename = os.path.join( + module_path, "SimulationAPI/ObservationConfig/PSF_models/F062.fits" + ) kernel = pyfits.getdata(psf_filename) - self.assertEqual(self.F062_pixel.obs['kernel_point_source'].all(), kernel.all(), msg="PSF did not match") + self.assertEqual( + self.F062_pixel.obs["kernel_point_source"].all(), + kernel.all(), + msg="PSF did not match", + ) def test_kwargs_single_band(self): kwargs_F062 = util.merge_dicts(self.F062.camera, self.F062.obs) self.assertEqual(self.F062.kwargs_single_band(), kwargs_F062) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_ObservationConfig/test_ZTF.py b/test/test_SimulationAPI/test_ObservationConfig/test_ZTF.py index 06d93b72f..1dd55acd4 100644 --- a/test/test_SimulationAPI/test_ObservationConfig/test_ZTF.py +++ b/test/test_SimulationAPI/test_ObservationConfig/test_ZTF.py @@ -5,11 +5,10 @@ class TestZTF(unittest.TestCase): - def setUp(self): self.g = ZTF() # default is g_band - self.r = ZTF(band='r') - self.i = ZTF(band='i') + self.r = ZTF(band="r") + self.i = ZTF(band="i") kwargs_g_band = self.g.kwargs_single_band() kwargs_r_band = self.r.kwargs_single_band() @@ -20,29 +19,33 @@ def setUp(self): self.i_band = SingleBand(**kwargs_i_band) # dictionaries mapping ZTF kwargs to SingleBand kwargs - self.camera_settings = {'read_noise': '_read_noise', - 'pixel_scale': 'pixel_scale', - 'ccd_gain': 'ccd_gain'} - self.obs_settings = {'exposure_time': '_exposure_time', - 'sky_brightness': '_sky_brightness_', - 'magnitude_zero_point': '_magnitude_zero_point', - 'num_exposures': '_num_exposures', - 'seeing': '_seeing', - 'psf_type': '_psf_type'} + self.camera_settings = { + "read_noise": "_read_noise", + "pixel_scale": "pixel_scale", + "ccd_gain": "ccd_gain", + } + self.obs_settings = { + "exposure_time": "_exposure_time", + "sky_brightness": "_sky_brightness_", + "magnitude_zero_point": "_magnitude_zero_point", + "num_exposures": "_num_exposures", + "seeing": "_seeing", + "psf_type": "_psf_type", + } self.instrument = Instrument(**self.g.camera) def test_ZTF_class(self): default = self.g - explicit_g = ZTF(band='g') + explicit_g = ZTF(band="g") self.assertEqual(explicit_g.camera, default.camera) self.assertEqual(explicit_g.obs, default.obs) with self.assertRaises(ValueError): - bad_band = ZTF(band='z') + bad_band = ZTF(band="z") with self.assertRaises(ValueError): - bad_psf = ZTF(psf_type='blah') + bad_psf = ZTF(psf_type="blah") single_year = ZTF(coadd_years=1) self.assertEqual(single_year.obs["num_exposures"], 13) @@ -52,19 +55,35 @@ def test_ZTF_class(self): def test_ZTF_camera(self): # comparing camera settings in ZTF instance with those in Instrument instance for config, setting in self.camera_settings.items(): - self.assertEqual(self.g.camera[config], getattr(self.instrument, setting), msg=f"{config} did not match") + self.assertEqual( + self.g.camera[config], + getattr(self.instrument, setting), + msg=f"{config} did not match", + ) def test_ZTF_obs(self): # comparing obs settings in ZTF instance with those in SingleBand instance for config, setting in self.obs_settings.items(): - self.assertEqual(self.g.obs[config], getattr(self.g_band, setting), msg=f"{config} did not match") - self.assertEqual(self.r.obs[config], getattr(self.r_band, setting), msg=f"{config} did not match") - self.assertEqual(self.i.obs[config], getattr(self.i_band, setting), msg=f"{config} did not match") + self.assertEqual( + self.g.obs[config], + getattr(self.g_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.r.obs[config], + getattr(self.r_band, setting), + msg=f"{config} did not match", + ) + self.assertEqual( + self.i.obs[config], + getattr(self.i_band, setting), + msg=f"{config} did not match", + ) def test_kwargs_single_band(self): kwargs_g = util.merge_dicts(self.g.camera, self.g.obs) self.assertEqual(self.g.kwargs_single_band(), kwargs_g) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_SimulationAPI/test_data_api.py b/test/test_SimulationAPI/test_data_api.py index ff0fb0dcd..2970d1a35 100644 --- a/test/test_SimulationAPI/test_data_api.py +++ b/test/test_SimulationAPI/test_data_api.py @@ -5,39 +5,60 @@ class TestDataAPI(object): - def setup_method(self): numpix = 10 - self.ccd_gain = 4. + self.ccd_gain = 4.0 self.pixel_scale = 0.13 - self.read_noise = 10. - kwargs_instrument = {'read_noise': self.read_noise, 'pixel_scale': self.pixel_scale, 'ccd_gain': self.ccd_gain} + self.read_noise = 10.0 + kwargs_instrument = { + "read_noise": self.read_noise, + "pixel_scale": self.pixel_scale, + "ccd_gain": self.ccd_gain, + } exposure_time = 100 - sky_brightness = 20. - self.magnitude_zero_point = 21. + sky_brightness = 20.0 + self.magnitude_zero_point = 21.0 num_exposures = 2 seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN', 'kernel_point_source': None} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + "kernel_point_source": None, + } self.kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observations) - self.api = DataAPI(numpix=numpix, data_count_unit='ADU', **self.kwargs_data) + self.api = DataAPI(numpix=numpix, data_count_unit="ADU", **self.kwargs_data) - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'PIXEL', 'kernel_point_source': np.ones((3, 3))} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "PIXEL", + "kernel_point_source": np.ones((3, 3)), + } kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observations) - self.api_pixel = DataAPI(numpix=numpix, data_count_unit='ADU', **kwargs_data) + self.api_pixel = DataAPI(numpix=numpix, data_count_unit="ADU", **kwargs_data) self.ra_at_xy_0 = 0.02 self.dec_at_xy_0 = 0.02 - self.transform_pix2angle = [[-self.pixel_scale,0],[0,self.pixel_scale]] - kwargs_pixel_grid = {'ra_at_xy_0':self.ra_at_xy_0,'dec_at_xy_0':self.dec_at_xy_0, - 'transform_pix2angle':self.transform_pix2angle} - self.api_pixel_grid = DataAPI(numpix=numpix, - kwargs_pixel_grid=kwargs_pixel_grid, - data_count_unit='ADU',**self.kwargs_data) + self.transform_pix2angle = [[-self.pixel_scale, 0], [0, self.pixel_scale]] + kwargs_pixel_grid = { + "ra_at_xy_0": self.ra_at_xy_0, + "dec_at_xy_0": self.dec_at_xy_0, + "transform_pix2angle": self.transform_pix2angle, + } + self.api_pixel_grid = DataAPI( + numpix=numpix, + kwargs_pixel_grid=kwargs_pixel_grid, + data_count_unit="ADU", + **self.kwargs_data + ) def test_data_class(self): data_class = self.api.data_class @@ -45,51 +66,67 @@ def test_data_class(self): def test_psf_class(self): psf_class = self.api.psf_class - assert psf_class.psf_type == 'GAUSSIAN' + assert psf_class.psf_type == "GAUSSIAN" psf_class = self.api_pixel.psf_class - assert psf_class.psf_type == 'PIXEL' + assert psf_class.psf_type == "PIXEL" def test_kwargs_data(self): kwargs_data = self.api.kwargs_data - assert kwargs_data['ra_at_xy_0'] != self.ra_at_xy_0 + assert kwargs_data["ra_at_xy_0"] != self.ra_at_xy_0 kwargs_data = self.api_pixel_grid.kwargs_data - assert kwargs_data['ra_at_xy_0'] == self.ra_at_xy_0 + assert kwargs_data["ra_at_xy_0"] == self.ra_at_xy_0 class TestRaise(unittest.TestCase): - def test_raise(self): numpix = 10 - self.ccd_gain = 4. + self.ccd_gain = 4.0 self.pixel_scale = 0.13 - self.read_noise = 10. - kwargs_instrument = {'read_noise': self.read_noise, 'pixel_scale': self.pixel_scale, 'ccd_gain': self.ccd_gain} + self.read_noise = 10.0 + kwargs_instrument = { + "read_noise": self.read_noise, + "pixel_scale": self.pixel_scale, + "ccd_gain": self.ccd_gain, + } exposure_time = 100 - sky_brightness = 20. - magnitude_zero_point = 21. + sky_brightness = 20.0 + magnitude_zero_point = 21.0 num_exposures = 2 seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'wrong', 'kernel_point_source': None} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "wrong", + "kernel_point_source": None, + } kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observations) - data_api = DataAPI(numpix=numpix, data_count_unit='ADU', **kwargs_data) + data_api = DataAPI(numpix=numpix, data_count_unit="ADU", **kwargs_data) print(data_api._psf_type) with self.assertRaises(ValueError): - data_api = DataAPI(numpix=numpix, data_count_unit='ADU', **kwargs_data) + data_api = DataAPI(numpix=numpix, data_count_unit="ADU", **kwargs_data) psf_class = data_api.psf_class - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'PIXEL', 'kernel_point_source': None} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "PIXEL", + "kernel_point_source": None, + } kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observations) with self.assertRaises(ValueError): - data_api = DataAPI(numpix=numpix, data_count_unit='ADU', **kwargs_data) + data_api = DataAPI(numpix=numpix, data_count_unit="ADU", **kwargs_data) psf_class = data_api.psf_class - - kwargs_data['kernel_point_source'] = np.ones((3, 3)) - kwargs_pixel_grid = {'ra_at_xy_0':0.02,'dec_at_xy_0':0.02} + + kwargs_data["kernel_point_source"] = np.ones((3, 3)) + kwargs_pixel_grid = {"ra_at_xy_0": 0.02, "dec_at_xy_0": 0.02} with self.assertRaises(ValueError): - data_api = DataAPI(numpix=numpix,kwargs_pixel_grid=kwargs_pixel_grid, - **kwargs_data) + data_api = DataAPI( + numpix=numpix, kwargs_pixel_grid=kwargs_pixel_grid, **kwargs_data + ) diff --git a/test/test_SimulationAPI/test_model_api.py b/test/test_SimulationAPI/test_model_api.py index db1606950..ab634614c 100644 --- a/test/test_SimulationAPI/test_model_api.py +++ b/test/test_SimulationAPI/test_model_api.py @@ -5,64 +5,97 @@ class TestModelAPI(object): - def setup_method(self): - self.api = ModelAPI(lens_model_list=['SIS'], z_lens=None, z_source=None, lens_redshift_list=None, - source_light_model_list=['GAUSSIAN'], lens_light_model_list=['SERSIC'], - point_source_model_list=['UNLENSED'], source_redshift_list=None, cosmo=None) + self.api = ModelAPI( + lens_model_list=["SIS"], + z_lens=None, + z_source=None, + lens_redshift_list=None, + source_light_model_list=["GAUSSIAN"], + lens_light_model_list=["SERSIC"], + point_source_model_list=["UNLENSED"], + source_redshift_list=None, + cosmo=None, + ) def test_lens_model_class(self): model = self.api.lens_model_class - assert model.lens_model_list[0] == 'SIS' + assert model.lens_model_list[0] == "SIS" def test_lens_light_model_class(self): model = self.api.lens_light_model_class - assert model.profile_type_list[0] == 'SERSIC' + assert model.profile_type_list[0] == "SERSIC" def test_source_model_class(self): model = self.api.source_model_class - assert model.profile_type_list[0] == 'GAUSSIAN' + assert model.profile_type_list[0] == "GAUSSIAN" def test_point_source_model_class(self): model = self.api.point_source_model_class - assert model.point_source_type_list[0] == 'UNLENSED' + assert model.point_source_type_list[0] == "UNLENSED" def test_source_position(self): - api = ModelAPI(lens_model_list=['SIS'], z_lens=None, z_source=None, lens_redshift_list=None, - source_light_model_list=['GAUSSIAN'], lens_light_model_list=['SERSIC'], - point_source_model_list=['SOURCE_POSITION'], source_redshift_list=None, cosmo=None) + api = ModelAPI( + lens_model_list=["SIS"], + z_lens=None, + z_source=None, + lens_redshift_list=None, + source_light_model_list=["GAUSSIAN"], + lens_light_model_list=["SERSIC"], + point_source_model_list=["SOURCE_POSITION"], + source_redshift_list=None, + cosmo=None, + ) model = api.point_source_model_class - assert model.point_source_type_list[0] == 'SOURCE_POSITION' + assert model.point_source_type_list[0] == "SOURCE_POSITION" def test_physical2lensing_conversion(self): lens_redshift_list = [0.5, 1] z_source_convention = 2 - api = ModelAPI(lens_model_list=['SIS', 'NFW'], lens_redshift_list=lens_redshift_list, - z_source_convention=z_source_convention, cosmo=None, z_source=z_source_convention) - - kwargs_mass = [{'sigma_v': 200, 'center_x': 0, 'center_y': 0}, - {'M200': 10**13, 'concentration': 5, 'center_x': 1, 'center_y': 1}] + api = ModelAPI( + lens_model_list=["SIS", "NFW"], + lens_redshift_list=lens_redshift_list, + z_source_convention=z_source_convention, + cosmo=None, + z_source=z_source_convention, + ) + + kwargs_mass = [ + {"sigma_v": 200, "center_x": 0, "center_y": 0}, + {"M200": 10**13, "concentration": 5, "center_x": 1, "center_y": 1}, + ] kwargs_lens = api.physical2lensing_conversion(kwargs_mass) - theta_E = kwargs_lens[0]['theta_E'] - lens_cosmo = LensCosmo(z_lens=lens_redshift_list[0], z_source=z_source_convention) - theta_E_test = lens_cosmo.sis_sigma_v2theta_E(kwargs_mass[0]['sigma_v']) + theta_E = kwargs_lens[0]["theta_E"] + lens_cosmo = LensCosmo( + z_lens=lens_redshift_list[0], z_source=z_source_convention + ) + theta_E_test = lens_cosmo.sis_sigma_v2theta_E(kwargs_mass[0]["sigma_v"]) npt.assert_almost_equal(theta_E, theta_E_test, decimal=7) - alpha_Rs = kwargs_lens[1]['alpha_Rs'] - lens_cosmo = LensCosmo(z_lens=lens_redshift_list[1], z_source=z_source_convention) - Rs_new , alpha_Rs_new = lens_cosmo.nfw_physical2angle(kwargs_mass[1]['M200'], kwargs_mass[1]['concentration']) + alpha_Rs = kwargs_lens[1]["alpha_Rs"] + lens_cosmo = LensCosmo( + z_lens=lens_redshift_list[1], z_source=z_source_convention + ) + Rs_new, alpha_Rs_new = lens_cosmo.nfw_physical2angle( + kwargs_mass[1]["M200"], kwargs_mass[1]["concentration"] + ) npt.assert_almost_equal(alpha_Rs, alpha_Rs_new, decimal=7) - api = ModelAPI(lens_model_list=['SIS', 'NFW'], z_lens=0.5, z_source_convention=z_source_convention, cosmo=None, - z_source=z_source_convention) + api = ModelAPI( + lens_model_list=["SIS", "NFW"], + z_lens=0.5, + z_source_convention=z_source_convention, + cosmo=None, + z_source=z_source_convention, + ) kwargs_lens = api.physical2lensing_conversion(kwargs_mass) - theta_E = kwargs_lens[0]['theta_E'] + theta_E = kwargs_lens[0]["theta_E"] lens_cosmo = LensCosmo(z_lens=0.5, z_source=z_source_convention) - theta_E_test = lens_cosmo.sis_sigma_v2theta_E(kwargs_mass[0]['sigma_v']) + theta_E_test = lens_cosmo.sis_sigma_v2theta_E(kwargs_mass[0]["sigma_v"]) npt.assert_almost_equal(theta_E, theta_E_test, decimal=7) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_SimulationAPI/test_observation_api.py b/test/test_SimulationAPI/test_observation_api.py index 2685e6f2f..9ea36f6b1 100644 --- a/test/test_SimulationAPI/test_observation_api.py +++ b/test/test_SimulationAPI/test_observation_api.py @@ -1,4 +1,8 @@ -from lenstronomy.SimulationAPI.observation_api import Instrument, Observation, SingleBand +from lenstronomy.SimulationAPI.observation_api import ( + Instrument, + Observation, + SingleBand, +) import lenstronomy.Util.util as util import numpy.testing as npt import numpy as np @@ -6,7 +10,6 @@ class TestInstrumentObservation(object): - def setup_method(self): pass @@ -14,111 +17,173 @@ def test_instrument(self): ccd_gain = 4 pixel_scale = 0.13 read_noise = 10 - kwargs_instrument = {'read_noise': read_noise, 'pixel_scale': pixel_scale, 'ccd_gain': ccd_gain} + kwargs_instrument = { + "read_noise": read_noise, + "pixel_scale": pixel_scale, + "ccd_gain": ccd_gain, + } instrument = Instrument(**kwargs_instrument) assert instrument.ccd_gain == ccd_gain assert instrument.pixel_scale == pixel_scale def test_observations(self): exposure_time = 90 - sky_brightness = 20. - num_exposures = 2, + sky_brightness = 20.0 + num_exposures = (2,) seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN'} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + } observation = Observation(**kwargs_observations) assert observation.exposure_time == exposure_time * num_exposures - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'NONE'} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "NONE", + } observation = Observation(**kwargs_observations) assert observation.exposure_time == exposure_time * num_exposures def test_update_observation(self): exposure_time = 90 - sky_brightness = 20. + sky_brightness = 20.0 num_exposures = 2 seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN'} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + } observation = Observation(**kwargs_observations) exposure_time = 1 - sky_brightness = 1. + sky_brightness = 1.0 num_exposures = 1 seeing = 1 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN', 'kernel_point_source': 1} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + "kernel_point_source": 1, + } observation.update_observation(**kwargs_observations) assert observation.exposure_time == 1 psf = observation.psf_class assert psf.fwhm == 1 def test_psf_class(self): - kwargs_observations = {'exposure_time': 1, 'sky_brightness': 1, - 'num_exposures': 1, - 'psf_type': 'NONE'} + kwargs_observations = { + "exposure_time": 1, + "sky_brightness": 1, + "num_exposures": 1, + "psf_type": "NONE", + } observation = Observation(**kwargs_observations) psf_class = observation.psf_class - assert psf_class.psf_type == 'NONE' + assert psf_class.psf_type == "NONE" class TestRaise(unittest.TestCase): - def test_raise(self): - self.ccd_gain = 4. + self.ccd_gain = 4.0 pixel_scale = 0.13 - self.read_noise = 10. - kwargs_instrument = {'read_noise': self.read_noise, 'pixel_scale': pixel_scale, 'ccd_gain': self.ccd_gain} + self.read_noise = 10.0 + kwargs_instrument = { + "read_noise": self.read_noise, + "pixel_scale": pixel_scale, + "ccd_gain": self.ccd_gain, + } exposure_time = 100 - sky_brightness = 20. - self.magnitude_zero_point = 21. + sky_brightness = 20.0 + self.magnitude_zero_point = 21.0 num_exposures = 2 seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN'} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + } self.kwargs_data = util.merge_dicts(kwargs_instrument, kwargs_observations) with self.assertRaises(ValueError): - SingleBand(data_count_unit='wrong', **self.kwargs_data) + SingleBand(data_count_unit="wrong", **self.kwargs_data) with self.assertRaises(ValueError): - band = SingleBand(pixel_scale=1, exposure_time=1, magnitude_zero_point=1, read_noise=None, ccd_gain=None, - sky_brightness=None, seeing=None, num_exposures=1, psf_type='GAUSSIAN', kernel_point_source=None, - data_count_unit='ADU', background_noise=None) + band = SingleBand( + pixel_scale=1, + exposure_time=1, + magnitude_zero_point=1, + read_noise=None, + ccd_gain=None, + sky_brightness=None, + seeing=None, + num_exposures=1, + psf_type="GAUSSIAN", + kernel_point_source=None, + data_count_unit="ADU", + background_noise=None, + ) out = band.sky_brightness with self.assertRaises(ValueError): - band = SingleBand(pixel_scale=1, exposure_time=1, magnitude_zero_point=1, read_noise=None, ccd_gain=None, - sky_brightness=None, seeing=None, num_exposures=1, psf_type='GAUSSIAN', kernel_point_source=None, - data_count_unit='ADU', background_noise=None) + band = SingleBand( + pixel_scale=1, + exposure_time=1, + magnitude_zero_point=1, + read_noise=None, + ccd_gain=None, + sky_brightness=None, + seeing=None, + num_exposures=1, + psf_type="GAUSSIAN", + kernel_point_source=None, + data_count_unit="ADU", + background_noise=None, + ) out = band.background_noise class TestData(object): - def setup_method(self): - self.ccd_gain = 4. + self.ccd_gain = 4.0 pixel_scale = 0.13 - self.read_noise = 10. - self.kwargs_instrument = {'read_noise': self.read_noise, 'pixel_scale': pixel_scale, 'ccd_gain': self.ccd_gain} + self.read_noise = 10.0 + self.kwargs_instrument = { + "read_noise": self.read_noise, + "pixel_scale": pixel_scale, + "ccd_gain": self.ccd_gain, + } exposure_time = 100 - sky_brightness = 20. - self.magnitude_zero_point = 21. + sky_brightness = 20.0 + self.magnitude_zero_point = 21.0 num_exposures = 2 seeing = 0.9 - kwargs_observations = {'exposure_time': exposure_time, 'sky_brightness': sky_brightness, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': num_exposures, - 'seeing': seeing, 'psf_type': 'GAUSSIAN'} + kwargs_observations = { + "exposure_time": exposure_time, + "sky_brightness": sky_brightness, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": num_exposures, + "seeing": seeing, + "psf_type": "GAUSSIAN", + } self.kwargs_data = util.merge_dicts(self.kwargs_instrument, kwargs_observations) - self.data_adu = SingleBand(data_count_unit='ADU', **self.kwargs_data) - self.data_e_ = SingleBand(data_count_unit='e-', **self.kwargs_data) + self.data_adu = SingleBand(data_count_unit="ADU", **self.kwargs_data) + self.data_e_ = SingleBand(data_count_unit="e-", **self.kwargs_data) def test_sky_brightness(self): sky_adu = self.data_adu.sky_brightness @@ -136,22 +201,30 @@ def test_background_noise(self): assert bkg == 1 def test_flux_noise(self): - flux_iid = 50. + flux_iid = 50.0 flux_adu = flux_iid / self.ccd_gain noise_adu = self.data_adu.flux_noise(flux_adu) noise_e_ = self.data_e_.flux_noise(flux_iid) - assert noise_e_ == 100./200. + assert noise_e_ == 100.0 / 200.0 assert noise_e_ == noise_adu * self.ccd_gain def test_noise_for_model(self): model_adu = np.ones((10, 10)) model_e_ = model_adu * self.ccd_gain - noise_adu = self.data_adu.noise_for_model(model_adu, background_noise=True, poisson_noise=True, seed=42) - noise_adu_2 = self.data_adu.noise_for_model(model_adu, background_noise=True, poisson_noise=True, seed=42) + noise_adu = self.data_adu.noise_for_model( + model_adu, background_noise=True, poisson_noise=True, seed=42 + ) + noise_adu_2 = self.data_adu.noise_for_model( + model_adu, background_noise=True, poisson_noise=True, seed=42 + ) npt.assert_almost_equal(noise_adu, noise_adu_2, decimal=10) - noise_e_ = self.data_e_.noise_for_model(model_e_, background_noise=True, poisson_noise=True, seed=42) - npt.assert_almost_equal(noise_adu, noise_e_/self.ccd_gain, decimal=10) - noise_e_ = self.data_e_.noise_for_model(model_e_, background_noise=True, poisson_noise=True, seed=None) + noise_e_ = self.data_e_.noise_for_model( + model_e_, background_noise=True, poisson_noise=True, seed=42 + ) + npt.assert_almost_equal(noise_adu, noise_e_ / self.ccd_gain, decimal=10) + noise_e_ = self.data_e_.noise_for_model( + model_e_, background_noise=True, poisson_noise=True, seed=None + ) def test_estimate_noise(self): image_adu = np.ones((10, 10)) @@ -162,11 +235,11 @@ def test_estimate_noise(self): def test_magnitude2cps(self): mag_0 = self.data_adu.magnitude2cps(magnitude=self.magnitude_zero_point) - npt.assert_almost_equal(mag_0, 1./self.ccd_gain, decimal=10) + npt.assert_almost_equal(mag_0, 1.0 / self.ccd_gain, decimal=10) mag_0_e_ = self.data_e_.magnitude2cps(magnitude=self.magnitude_zero_point) npt.assert_almost_equal(mag_0_e_, 1, decimal=10) - mag_0 = self.data_adu.magnitude2cps(magnitude=self.magnitude_zero_point+1) + mag_0 = self.data_adu.magnitude2cps(magnitude=self.magnitude_zero_point + 1) npt.assert_almost_equal(mag_0, 0.0995267926383743, decimal=10) mag_0 = self.data_adu.magnitude2cps(magnitude=self.magnitude_zero_point - 1) @@ -181,20 +254,30 @@ def test_flux_iid(self): flux_e_ = flux_adu * self.ccd_gain noise_e_ = self.data_e_.flux_noise(flux_e_) noise_adu = self.data_adu.flux_noise(flux_adu) - npt.assert_almost_equal(noise_e_/self.ccd_gain, noise_adu, decimal=8) + npt.assert_almost_equal(noise_e_ / self.ccd_gain, noise_adu, decimal=8) def test_psf_type(self): - assert self.data_adu._psf_type == 'GAUSSIAN' - kwargs_observations = {'exposure_time': 1, 'sky_brightness': 1, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': 1, - 'seeing': 1, 'psf_type': 'PIXEL'} + assert self.data_adu._psf_type == "GAUSSIAN" + kwargs_observations = { + "exposure_time": 1, + "sky_brightness": 1, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": 1, + "seeing": 1, + "psf_type": "PIXEL", + } kwargs_data = util.merge_dicts(self.kwargs_instrument, kwargs_observations) - data_pixel = SingleBand(data_count_unit='ADU', **kwargs_data) - assert data_pixel._psf_type == 'PIXEL' - - kwargs_observations = {'exposure_time': 1, 'sky_brightness': 1, - 'magnitude_zero_point': self.magnitude_zero_point, 'num_exposures': 1, - 'seeing': 1, 'psf_type': 'NONE'} + data_pixel = SingleBand(data_count_unit="ADU", **kwargs_data) + assert data_pixel._psf_type == "PIXEL" + + kwargs_observations = { + "exposure_time": 1, + "sky_brightness": 1, + "magnitude_zero_point": self.magnitude_zero_point, + "num_exposures": 1, + "seeing": 1, + "psf_type": "NONE", + } kwargs_data = util.merge_dicts(self.kwargs_instrument, kwargs_observations) - data_pixel = SingleBand(data_count_unit='ADU', **kwargs_data) - assert data_pixel._psf_type == 'NONE' + data_pixel = SingleBand(data_count_unit="ADU", **kwargs_data) + assert data_pixel._psf_type == "NONE" diff --git a/test/test_SimulationAPI/test_observation_constructor.py b/test/test_SimulationAPI/test_observation_constructor.py index a82117c94..cf47aae2f 100644 --- a/test/test_SimulationAPI/test_observation_constructor.py +++ b/test/test_SimulationAPI/test_observation_constructor.py @@ -4,11 +4,12 @@ class TestObservationConstructor(unittest.TestCase): - def test_constructor(self): - instrument_name = 'LSST' - observation_name = 'LSST_g_band' - kwargs_data = constructor.observation_constructor(instrument_name=instrument_name, observation_name=observation_name) + instrument_name = "LSST" + observation_name = "LSST_g_band" + kwargs_data = constructor.observation_constructor( + instrument_name=instrument_name, observation_name=observation_name + ) data = SingleBand(**kwargs_data) assert data.pixel_scale == 0.263 assert data.exposure_time == 900 @@ -17,10 +18,16 @@ def test_constructor(self): inst_name_list = constructor.instrument_name_list for obs_name in obs_name_list: for inst_name in inst_name_list: - constructor.observation_constructor(instrument_name=inst_name, observation_name=obs_name) + constructor.observation_constructor( + instrument_name=inst_name, observation_name=obs_name + ) with self.assertRaises(ValueError): - kwargs_data = constructor.observation_constructor(instrument_name='wrong', observation_name='LSST_g_band') + kwargs_data = constructor.observation_constructor( + instrument_name="wrong", observation_name="LSST_g_band" + ) SingleBand(**kwargs_data) with self.assertRaises(ValueError): - kwargs_data = constructor.observation_constructor(instrument_name='LSST', observation_name='wrong') - SingleBand(**kwargs_data) \ No newline at end of file + kwargs_data = constructor.observation_constructor( + instrument_name="LSST", observation_name="wrong" + ) + SingleBand(**kwargs_data) diff --git a/test/test_SimulationAPI/test_point_source_variability.py b/test/test_SimulationAPI/test_point_source_variability.py index d79b188ab..075ca3106 100644 --- a/test/test_SimulationAPI/test_point_source_variability.py +++ b/test/test_SimulationAPI/test_point_source_variability.py @@ -6,7 +6,6 @@ class TestPointSourceVariability(object): - def setup_method(self): pass @@ -15,45 +14,86 @@ def test_image(self): def var_func(time): sigma = 100 mag_0 = 30 - cps = np.exp(-time ** 2 / (2 * sigma ** 2)) + cps = np.exp(-(time**2) / (2 * sigma**2)) mag = data_util.cps2magnitude(cps, magnitude_zero_point=0) mag_norm = data_util.cps2magnitude(1, magnitude_zero_point=0) mag_return = -mag + mag_norm + mag_0 return mag_return - kwargs_model_time_var = {'lens_model_list': ['SPEP', 'SHEAR'], # list of lens models to be used - 'lens_light_model_list': ['SERSIC_ELLIPSE'], - # list of unlensed light models to be used - 'source_light_model_list': ['SERSIC_ELLIPSE'], - # list of extended source models to be used - 'z_lens': 0.5, 'z_source': 2 - } - instrument_name = 'LSST' - observation_name = 'LSST_g_band' - kwargs_single_band = constructor.observation_constructor(instrument_name=instrument_name, - observation_name=observation_name) + kwargs_model_time_var = { + "lens_model_list": ["SPEP", "SHEAR"], # list of lens models to be used + "lens_light_model_list": ["SERSIC_ELLIPSE"], + # list of unlensed light models to be used + "source_light_model_list": ["SERSIC_ELLIPSE"], + # list of extended source models to be used + "z_lens": 0.5, + "z_source": 2, + } + instrument_name = "LSST" + observation_name = "LSST_g_band" + kwargs_single_band = constructor.observation_constructor( + instrument_name=instrument_name, observation_name=observation_name + ) - kwargs_single_band['data_count_unit'] = 'e-' + kwargs_single_band["data_count_unit"] = "e-" kwargs_numerics = {} numpix = 20 # source position source_x, source_y = 0.01, 0.1 # lens light kwargs_lens_light_mag_g = [ - {'magnitude': 100, 'R_sersic': .6, 'n_sersic': 4, 'e1': 0.1, 'e2': -0.1, 'center_x': 0, 'center_y': 0}] + { + "magnitude": 100, + "R_sersic": 0.6, + "n_sersic": 4, + "e1": 0.1, + "e2": -0.1, + "center_x": 0, + "center_y": 0, + } + ] # source light kwargs_source_mag_g = [ - {'magnitude': 100, 'R_sersic': 0.3, 'n_sersic': 1, 'e1': -0.3, 'e2': -0.2, 'center_x': 0, 'center_y': 0}] + { + "magnitude": 100, + "R_sersic": 0.3, + "n_sersic": 1, + "e1": -0.3, + "e2": -0.2, + "center_x": 0, + "center_y": 0, + } + ] kwargs_lens = [ - {'theta_E': 1, 'gamma': 2, 'e1': 0.1, 'e2': -0.1, 'center_x': 0, 'center_y': 0}, # SIE model - {'gamma1': 0.03, 'gamma2': 0.01} # SHEAR model + { + "theta_E": 1, + "gamma": 2, + "e1": 0.1, + "e2": -0.1, + "center_x": 0, + "center_y": 0, + }, # SIE model + {"gamma1": 0.03, "gamma2": 0.01}, # SHEAR model ] - from lenstronomy.SimulationAPI.point_source_variability import PointSourceVariability - ps_var = PointSourceVariability(source_x, source_y, var_func, numpix, kwargs_single_band, kwargs_model_time_var, - kwargs_numerics, - kwargs_lens, kwargs_source_mag_g, kwargs_lens_light_mag_g, kwargs_ps_mag=None) + from lenstronomy.SimulationAPI.point_source_variability import ( + PointSourceVariability, + ) + + ps_var = PointSourceVariability( + source_x, + source_y, + var_func, + numpix, + kwargs_single_band, + kwargs_model_time_var, + kwargs_numerics, + kwargs_lens, + kwargs_source_mag_g, + kwargs_lens_light_mag_g, + kwargs_ps_mag=None, + ) time = 0 image_g = ps_var.image_time(time=time) @@ -63,5 +103,5 @@ def var_func(time): assert len(t_days) == 4 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_SimulationAPI/test_sim_api.py b/test/test_SimulationAPI/test_sim_api.py index 74ad10a45..6e18ad83d 100644 --- a/test/test_SimulationAPI/test_sim_api.py +++ b/test/test_SimulationAPI/test_sim_api.py @@ -5,37 +5,55 @@ class TestModelAPI(object): - def setup_method(self): - numpix = 10 - instrument_name = 'LSST' - observation_name = 'LSST_g_band' - kwargs_single_band = constructor.observation_constructor(instrument_name=instrument_name, - observation_name=observation_name) - kwargs_single_band['data_count_unit'] = 'e-' - kwargs_model = {'lens_model_list': ['SIS'], 'z_lens': None, 'z_source': None, 'lens_redshift_list': None, - 'source_light_model_list': ['GAUSSIAN'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list':['UNLENSED'], - 'source_redshift_list': None} - kwargs_numerics = {'supersampling_factor': 2} + instrument_name = "LSST" + observation_name = "LSST_g_band" + kwargs_single_band = constructor.observation_constructor( + instrument_name=instrument_name, observation_name=observation_name + ) + kwargs_single_band["data_count_unit"] = "e-" + kwargs_model = { + "lens_model_list": ["SIS"], + "z_lens": None, + "z_source": None, + "lens_redshift_list": None, + "source_light_model_list": ["GAUSSIAN"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["UNLENSED"], + "source_redshift_list": None, + } + kwargs_numerics = {"supersampling_factor": 2} self.api = SimAPI(numpix, kwargs_single_band, kwargs_model) def test_image_model_class(self): model = self.api.image_model_class() - assert model.LensModel.lens_model_list[0] == 'SIS' + assert model.LensModel.lens_model_list[0] == "SIS" def test_magnitude2amplitude(self): - kwargs_lens_light_mag = [{'magnitude': 28, 'R_sersic': 1., 'n_sersic': 2, 'center_x': 0, 'center_y': 0}] - kwargs_source_mag = [{'magnitude': 30, 'sigma': 0.3, 'center_x': 0, 'center_y': 0}] - kwargs_ps_mag = [{'magnitude': [30], 'ra_image': [0], 'dec_image': [0]}] - kwargs_lens_light, kwargs_source, kwargs_ps = self.api.magnitude2amplitude(kwargs_lens_light_mag, kwargs_source_mag, - kwargs_ps_mag) - - npt.assert_almost_equal(kwargs_source[0]['amp'], 1, decimal=5) - npt.assert_almost_equal(kwargs_ps[0]['point_amp'][0], 1, decimal=5) - npt.assert_almost_equal(kwargs_lens_light[0]['amp'], 0.38680586575451237, decimal=5) - - -if __name__ == '__main__': + kwargs_lens_light_mag = [ + { + "magnitude": 28, + "R_sersic": 1.0, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } + ] + kwargs_source_mag = [ + {"magnitude": 30, "sigma": 0.3, "center_x": 0, "center_y": 0} + ] + kwargs_ps_mag = [{"magnitude": [30], "ra_image": [0], "dec_image": [0]}] + kwargs_lens_light, kwargs_source, kwargs_ps = self.api.magnitude2amplitude( + kwargs_lens_light_mag, kwargs_source_mag, kwargs_ps_mag + ) + + npt.assert_almost_equal(kwargs_source[0]["amp"], 1, decimal=5) + npt.assert_almost_equal(kwargs_ps[0]["point_amp"][0], 1, decimal=5) + npt.assert_almost_equal( + kwargs_lens_light[0]["amp"], 0.38680586575451237, decimal=5 + ) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/simulation_util.py b/test/test_Util/simulation_util.py index 2bfc71eea..5697c3586 100644 --- a/test/test_Util/simulation_util.py +++ b/test/test_Util/simulation_util.py @@ -10,9 +10,8 @@ def setup_method(self): pass def test_data_configure_simple(self): - # data specifics - sigma_bkg = 1. # background noise per pixel + sigma_bkg = 1.0 # background noise per pixel exp_time = 10 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) numPix = 100 # cutout pixel size deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2) @@ -20,10 +19,12 @@ def test_data_configure_simple(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) assert data_class.pixel_width == deltaPix -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_analysis_util.py b/test/test_Util/test_analysis_util.py index c984fe8f3..7c436526d 100644 --- a/test/test_Util/test_analysis_util.py +++ b/test/test_Util/test_analysis_util.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.analysis_util as analysis_util @@ -11,20 +11,21 @@ class TestCorrelation(object): - def setup_method(self): pass def test_radial_profile(self): x_grid, y_grid = util.make_grid(numPix=20, deltapix=1) profile = Gaussian() - light_grid = profile.function(x_grid, y_grid, amp=1., sigma=5) - I_r, r = analysis_util.radial_profile(light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None) + light_grid = profile.function(x_grid, y_grid, amp=1.0, sigma=5) + I_r, r = analysis_util.radial_profile( + light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None + ) assert I_r[0] == 0 def test_ellipticities(self): x_grid, y_grid = util.make_grid(numPix=200, deltapix=1) - e1, e2 = 0., 0.1 + e1, e2 = 0.0, 0.1 profile = GaussianEllipse() I_xy = profile.function(x_grid, y_grid, amp=1, sigma=10, e1=e1, e2=e2) e1_out, e2_out = analysis_util.ellipticities(I_xy, x_grid, y_grid) @@ -32,7 +33,7 @@ def test_ellipticities(self): npt.assert_almost_equal(e1_out, e1, decimal=3) npt.assert_almost_equal(e2_out, e2, decimal=3) - e1, e2 = 0.1, 0. + e1, e2 = 0.1, 0.0 profile = GaussianEllipse() I_xy = profile.function(x_grid, y_grid, amp=1, sigma=10, e1=e1, e2=e2) e1_out, e2_out = analysis_util.ellipticities(I_xy, x_grid, y_grid) @@ -43,58 +44,59 @@ def test_ellipticities(self): def test_half_light_radius(self): x_grid, y_grid = util.make_grid(numPix=10, deltapix=1) lens_light = np.zeros_like(x_grid) - r_half = analysis_util.half_light_radius(lens_light, x_grid, y_grid, center_x=0, center_y=0) + r_half = analysis_util.half_light_radius( + lens_light, x_grid, y_grid, center_x=0, center_y=0 + ) assert r_half == -1 def test_bic_model(self): - bic=analysis_util.bic_model(0, np.e, 1) + bic = analysis_util.bic_model(0, np.e, 1) npt.assert_almost_equal(bic, 1, decimal=8) def test_azimuthalAverage(self): num_pix = 101 x_grid, y_grid = util.make_grid(numPix=num_pix, deltapix=1) - e1, e2 = 0., 0. + e1, e2 = 0.0, 0.0 profile = GaussianEllipse() - kwargs_profile = {'amp': 1, 'sigma': 50, 'e1': e1, 'e2': e2} + kwargs_profile = {"amp": 1, "sigma": 50, "e1": e1, "e2": e2} I_xy = profile.function(x_grid, y_grid, **kwargs_profile) I_xy = util.array2image(I_xy) I_r, r_new = analysis_util.azimuthalAverage(I_xy, center=None) # r = np.linspace(start=0.5, stop=len(I_r) + 0.5, num=len(I_r)) - #r = np.linspace(start=1, stop=len(I_r), num=len(I_r)) - #npt.assert_almost_equal(r_new, r, decimal=5) + # r = np.linspace(start=1, stop=len(I_r), num=len(I_r)) + # npt.assert_almost_equal(r_new, r, decimal=5) I_r_true = profile.function(0, r_new, **kwargs_profile) # I_r_true_new = profile.function(0, r, **kwargs_profile) npt.assert_almost_equal(I_r / I_r_true, 1, decimal=2) r = np.sqrt(x_grid**2 + y_grid**2) r_max = np.max(r) - I_xy = np.sin(r/r_max * (2*np.pi)) + I_xy = np.sin(r / r_max * (2 * np.pi)) I_xy = util.array2image(I_xy) I_r, r_new = analysis_util.azimuthalAverage(I_xy, center=None) - I_r_true = np.sin(r_new/r_max * (2*np.pi)) - #import matplotlib.pyplot as plt - #plt.plot(r_new, I_r_true, label='true', alpha=0.5) - #plt.plot(r_new, I_r, label='computed r old', alpha=0.5) - #plt.legend() - #plt.show() + I_r_true = np.sin(r_new / r_max * (2 * np.pi)) + # import matplotlib.pyplot as plt + # plt.plot(r_new, I_r_true, label='true', alpha=0.5) + # plt.plot(r_new, I_r, label='computed r old', alpha=0.5) + # plt.legend() + # plt.show() npt.assert_almost_equal(I_r[10:], I_r_true[10:], decimal=1) def test_profile_center(self): - kwargs_list = [{'center_x': 1, 'center_y': 0}] + kwargs_list = [{"center_x": 1, "center_y": 0}] center_x, center_y = analysis_util.profile_center(kwargs_list=kwargs_list) assert center_x == 1 assert center_y == 0 class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): kwargs_list = [{}] analysis_util.profile_center(kwargs_list=kwargs_list) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_class_creator.py b/test/test_Util/test_class_creator.py index 62917e333..b0aeb7371 100644 --- a/test/test_Util/test_class_creator.py +++ b/test/test_Util/test_class_creator.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.class_creator as class_creator @@ -8,91 +8,175 @@ class TestClassCreator(object): - def setup_method(self): - self.kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION'], - 'index_lens_model_list': [[0]], 'index_source_light_model_list': [[0]], - 'index_lens_light_model_list': [[0]], 'index_point_source_model_list': [[0]], - 'band_index': 0, 'source_deflection_scaling_list': [1], 'source_redshift_list': [1], - 'fixed_magnification_list': [True], 'additional_images_list': [False], - 'lens_redshift_list': [0.5], - 'point_source_frame_list': [[0]]} - self.kwargs_model_2 = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION'], - } - self.kwargs_model_3 = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION'], - 'index_lens_model_list': [[0]], 'index_source_light_model_list': [[0]], - 'index_lens_light_model_list': [[0]], 'index_point_source_model_list': [[0]], - 'point_source_frame_list': [[0]] - } - self.kwargs_model_4 = {'lens_model_list': ['SIS', 'SIS'], 'lens_redshift_list': [0.3, 0.4], 'multi_plane': True, - 'observed_convention_index': [0], 'index_lens_model_list': [[0]], 'z_source': 1, - 'optical_depth_model_list': ['UNIFORM'], 'index_optical_depth_model_list': [[0]], - 'tau0_index_list': [0], - 'point_source_frame_list': [[0]]} - - - self.kwargs_psf = {'psf_type': 'NONE'} - self.kwargs_data = {'image_data': np.ones((10, 10))} + self.kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "index_lens_model_list": [[0]], + "index_source_light_model_list": [[0]], + "index_lens_light_model_list": [[0]], + "index_point_source_model_list": [[0]], + "band_index": 0, + "source_deflection_scaling_list": [1], + "source_redshift_list": [1], + "fixed_magnification_list": [True], + "additional_images_list": [False], + "lens_redshift_list": [0.5], + "point_source_frame_list": [[0]], + } + self.kwargs_model_2 = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } + self.kwargs_model_3 = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + "index_lens_model_list": [[0]], + "index_source_light_model_list": [[0]], + "index_lens_light_model_list": [[0]], + "index_point_source_model_list": [[0]], + "point_source_frame_list": [[0]], + } + self.kwargs_model_4 = { + "lens_model_list": ["SIS", "SIS"], + "lens_redshift_list": [0.3, 0.4], + "multi_plane": True, + "observed_convention_index": [0], + "index_lens_model_list": [[0]], + "z_source": 1, + "optical_depth_model_list": ["UNIFORM"], + "index_optical_depth_model_list": [[0]], + "tau0_index_list": [0], + "point_source_frame_list": [[0]], + } + + self.kwargs_psf = {"psf_type": "NONE"} + self.kwargs_data = {"image_data": np.ones((10, 10))} def test_create_class_instances(self): - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances(**self.kwargs_model) - assert lens_model_class.lens_model_list[0] == 'SIS' - - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances( - **self.kwargs_model_2) - assert lens_model_class.lens_model_list[0] == 'SIS' - - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances( - **self.kwargs_model_3) - assert lens_model_class.lens_model_list[0] == 'SIS' - - lens_model_class, source_model_class, lens_light_model_class, point_source_class, extinction_class = class_creator.create_class_instances( - **self.kwargs_model_4) - assert lens_model_class.lens_model_list[0] == 'SIS' + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(**self.kwargs_model) + assert lens_model_class.lens_model_list[0] == "SIS" + + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(**self.kwargs_model_2) + assert lens_model_class.lens_model_list[0] == "SIS" + + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(**self.kwargs_model_3) + assert lens_model_class.lens_model_list[0] == "SIS" + + ( + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + extinction_class, + ) = class_creator.create_class_instances(**self.kwargs_model_4) + assert lens_model_class.lens_model_list[0] == "SIS" assert lens_model_class.lens_model._observed_convention_index[0] == 0 def test_create_image_model(self): - imageModel = class_creator.create_image_model(self.kwargs_data, self.kwargs_psf, kwargs_numerics={}, kwargs_model=self.kwargs_model) - assert imageModel.LensModel.lens_model_list[0] == 'SIS' - - imageModel = class_creator.create_image_model(self.kwargs_data, self.kwargs_psf, kwargs_numerics={}, kwargs_model={}) + imageModel = class_creator.create_image_model( + self.kwargs_data, + self.kwargs_psf, + kwargs_numerics={}, + kwargs_model=self.kwargs_model, + ) + assert imageModel.LensModel.lens_model_list[0] == "SIS" + + imageModel = class_creator.create_image_model( + self.kwargs_data, self.kwargs_psf, kwargs_numerics={}, kwargs_model={} + ) assert imageModel.LensModel.lens_model_list == [] def test_create_im_sim(self): - kwargs_model = {'lens_model_list': ['SIS'], 'source_light_model_list': ['SERSIC'], - 'lens_light_model_list': ['SERSIC'], 'point_source_model_list': ['LENSED_POSITION']} - kwargs_psf = {'psf_type': 'NONE'} - kwargs_data = {'image_data': np.ones((10, 10))} + kwargs_model = { + "lens_model_list": ["SIS"], + "source_light_model_list": ["SERSIC"], + "lens_light_model_list": ["SERSIC"], + "point_source_model_list": ["LENSED_POSITION"], + } + kwargs_psf = {"psf_type": "NONE"} + kwargs_data = {"image_data": np.ones((10, 10))} multi_band_list = [[kwargs_data, kwargs_psf, {}]] - multi_band_type = 'multi-linear' - - multi_band = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, bands_compute=None, - image_likelihood_mask_list=None, band_index=0) - assert multi_band._imageModel_list[0].LensModel.lens_model_list[0] == 'SIS' - multi_band_type = 'joint-linear' - multi_band = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, bands_compute=None, - image_likelihood_mask_list=None, band_index=0) - assert multi_band._imageModel_list[0].LensModel.lens_model_list[0] == 'SIS' - multi_band_type = 'single-band' - multi_band = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model, bands_compute=None, - image_likelihood_mask_list=None, band_index=0) - assert multi_band.LensModel.lens_model_list[0] == 'SIS' + multi_band_type = "multi-linear" + + multi_band = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + ) + assert multi_band._imageModel_list[0].LensModel.lens_model_list[0] == "SIS" + multi_band_type = "joint-linear" + multi_band = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + ) + assert multi_band._imageModel_list[0].LensModel.lens_model_list[0] == "SIS" + multi_band_type = "single-band" + multi_band = class_creator.create_im_sim( + multi_band_list, + multi_band_type, + kwargs_model, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + ) + assert multi_band.LensModel.lens_model_list[0] == "SIS" class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): - class_creator.create_im_sim(multi_band_list=None, multi_band_type='WRONG', kwargs_model=None, - bands_compute=None, image_likelihood_mask_list=None, band_index=0) + class_creator.create_im_sim( + multi_band_list=None, + multi_band_type="WRONG", + kwargs_model=None, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + ) with self.assertRaises(ValueError): - class_creator.create_im_sim(multi_band_list=[[], []], multi_band_type='multi-linear', linear_solver=False, - kwargs_model=None, bands_compute=None, image_likelihood_mask_list=None, band_index=0) - - -if __name__ == '__main__': + class_creator.create_im_sim( + multi_band_list=[[], []], + multi_band_type="multi-linear", + linear_solver=False, + kwargs_model=None, + bands_compute=None, + image_likelihood_mask_list=None, + band_index=0, + ) + + +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_coolest.py b/test/test_Util/test_coolest.py index b73da554a..b3ce7be85 100644 --- a/test/test_Util/test_coolest.py +++ b/test/test_Util/test_coolest.py @@ -4,9 +4,20 @@ import unittest import os -from lenstronomy.Util.coolest_interface import create_lenstronomy_from_coolest,update_coolest_from_lenstronomy,create_kwargs_mcmc_from_chain_list -from lenstronomy.Util.coolest_read_util import degree_coolest_to_radian_lenstronomy,ellibounds_coolest_to_lenstronomy, shearbounds_coolest_to_lenstronomy -from lenstronomy.Util.coolest_update_util import shapelet_amp_lenstronomy_to_coolest, folding_coolest +from lenstronomy.Util.coolest_interface import ( + create_lenstronomy_from_coolest, + update_coolest_from_lenstronomy, + create_kwargs_mcmc_from_chain_list, +) +from lenstronomy.Util.coolest_read_util import ( + degree_coolest_to_radian_lenstronomy, + ellibounds_coolest_to_lenstronomy, + shearbounds_coolest_to_lenstronomy, +) +from lenstronomy.Util.coolest_update_util import ( + shapelet_amp_lenstronomy_to_coolest, + folding_coolest, +) from lenstronomy.LensModel.lens_model import LensModel from lenstronomy.LightModel.light_model import LightModel @@ -20,39 +31,95 @@ class TestCOOLESTinterface(object): - def test_load(self): path = os.getcwd() - if path[-11:] == 'lenstronomy': - path+='/test/test_Util' - kwargs_out = create_lenstronomy_from_coolest(path+"/coolest_template") + if path[-11:] == "lenstronomy": + path += "/test/test_Util" + kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template") print(kwargs_out) kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template_pemd") - kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template_pemd_random") + kwargs_out = create_lenstronomy_from_coolest( + path + "/coolest_template_pemd_random" + ) return def test_update(self): path = os.getcwd() - if path[-11:] == 'lenstronomy': - path+='/test/test_Util' - kwargs_result={"kwargs_lens":[{'gamma1':0.1,'gamma2':-0.05,'ra_0':0.,'dec_0':0.}, - {'theta_E': 0.7, 'e1': -0.15, 'e2': 0.01, - 'center_x': 0.03, 'center_y': 0.01}], - "kwargs_source":[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}, - {'amp': np.array([70., 33., 2.1, 3.9, 15., -16., 2.8, -1.7, -4.1, 0.2]), - 'n_max': 3, 'beta': 0.1, 'center_x': 0.1, 'center_y': 0.0}], - "kwargs_lens_light":[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}, - {'amp':12.,'R_sersic':0.02,'n_sersic':6.,'center_x':0.03, - 'center_y':0.01,'e1':0.,'e2':-0.15}], - "kwargs_ps":[{'point_amp':np.array([0.1]),'ra_image':np.array([0.25]), 'dec_image':np.array([0.2])}]} - update_coolest_from_lenstronomy(path+"/coolest_template",kwargs_result,ending="_update") - kwargs_out = create_lenstronomy_from_coolest(path+"/coolest_template_update") - npt.assert_almost_equal(kwargs_out['kwargs_params']['lens_model'][0][1]['e1'], - kwargs_result['kwargs_lens'][1]['e1'], decimal=4) - npt.assert_almost_equal(kwargs_out['kwargs_params']['lens_model'][0][1]['e2'], - kwargs_result['kwargs_lens'][1]['e2'], decimal=4) + if path[-11:] == "lenstronomy": + path += "/test/test_Util" + kwargs_result = { + "kwargs_lens": [ + {"gamma1": 0.1, "gamma2": -0.05, "ra_0": 0.0, "dec_0": 0.0}, + { + "theta_E": 0.7, + "e1": -0.15, + "e2": 0.01, + "center_x": 0.03, + "center_y": 0.01, + }, + ], + "kwargs_source": [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + }, + { + "amp": np.array( + [70.0, 33.0, 2.1, 3.9, 15.0, -16.0, 2.8, -1.7, -4.1, 0.2] + ), + "n_max": 3, + "beta": 0.1, + "center_x": 0.1, + "center_y": 0.0, + }, + ], + "kwargs_lens_light": [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + }, + { + "amp": 12.0, + "R_sersic": 0.02, + "n_sersic": 6.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": 0.0, + "e2": -0.15, + }, + ], + "kwargs_ps": [ + { + "point_amp": np.array([0.1]), + "ra_image": np.array([0.25]), + "dec_image": np.array([0.2]), + } + ], + } + update_coolest_from_lenstronomy( + path + "/coolest_template", kwargs_result, ending="_update" + ) + kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template_update") + npt.assert_almost_equal( + kwargs_out["kwargs_params"]["lens_model"][0][1]["e1"], + kwargs_result["kwargs_lens"][1]["e1"], + decimal=4, + ) + npt.assert_almost_equal( + kwargs_out["kwargs_params"]["lens_model"][0][1]["e2"], + kwargs_result["kwargs_lens"][1]["e2"], + decimal=4, + ) return @@ -60,73 +127,127 @@ def test_full(self): # use read json ; create an image ; create noise ; do fit (PSO for result + MCMC for chain) # create the kwargs mcmc ; upadte json path = os.getcwd() - if path[-11:] == 'lenstronomy': - path+='/test/test_Util' + if path[-11:] == "lenstronomy": + path += "/test/test_Util" - kwargs_out = create_lenstronomy_from_coolest(path+"/coolest_template") + kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template") # IMAGE specifics - background_rms = .005 # background noise per pixel - exp_time = 500. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) + background_rms = 0.005 # background noise per pixel + exp_time = 500.0 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) # PSF : easier for test to create a gaussian PSF fwhm = 0.05 # full width at half maximum of PSF - psf_type = 'GAUSSIAN' # 'GAUSSIAN', 'PIXEL', 'NONE' + psf_type = "GAUSSIAN" # 'GAUSSIAN', 'PIXEL', 'NONE' # lensing quantities to create an image - lens_model_list = kwargs_out['kwargs_model']['lens_model_list'] - kwargs_sie = {'theta_E': .66, 'center_x': 0.05, 'center_y': 0, 'e1': -0.1, - 'e2': 0.1} # parameters of the deflector lens model - kwargs_shear = {'gamma1': 0.0, 'gamma2': -0.05} # shear values to the source plane + lens_model_list = kwargs_out["kwargs_model"]["lens_model_list"] + kwargs_sie = { + "theta_E": 0.66, + "center_x": 0.05, + "center_y": 0, + "e1": -0.1, + "e2": 0.1, + } # parameters of the deflector lens model + kwargs_shear = { + "gamma1": 0.0, + "gamma2": -0.05, + } # shear values to the source plane kwargs_lens = [kwargs_shear, kwargs_sie] lens_model_class = LensModel(lens_model_list) # Sersic parameters in the initial simulation for the source - source_model_list = kwargs_out['kwargs_model']['source_light_model_list'] - kwargs_sersic = {'amp': 16, 'R_sersic': 0.1, 'n_sersic': 3.5, 'e1': -0.1, 'e2': 0.1, - 'center_x': 0.1, 'center_y': 0} - kwargs_shapelets = {'amp': np.array([ 70., 33., 2.1, 3.9 , 15., -16., 2.8, -1.7, -4.1, 0.2]), - 'n_max': 3, 'beta': 0.1, 'center_x': 0.1, 'center_y': 0.0} + source_model_list = kwargs_out["kwargs_model"]["source_light_model_list"] + kwargs_sersic = { + "amp": 16, + "R_sersic": 0.1, + "n_sersic": 3.5, + "e1": -0.1, + "e2": 0.1, + "center_x": 0.1, + "center_y": 0, + } + kwargs_shapelets = { + "amp": np.array([70.0, 33.0, 2.1, 3.9, 15.0, -16.0, 2.8, -1.7, -4.1, 0.2]), + "n_max": 3, + "beta": 0.1, + "center_x": 0.1, + "center_y": 0.0, + } kwargs_source = [kwargs_sersic, kwargs_shapelets] source_model_class = LightModel(source_model_list) # Sersic parameters in the initial simulation for the lens light - lens_light_model_list = kwargs_out['kwargs_model']['lens_light_model_list'] - kwargs_sersic_lens1 = {'amp': 16, 'R_sersic': 0.6, 'n_sersic': 2.5, 'e1': -0.1, 'e2': 0.1, 'center_x': 0.05, - 'center_y': 0} - kwargs_sersic_lens2 = {'amp': 3, 'R_sersic': 0.7, 'n_sersic': 3, 'e1': -0.1, 'e2': 0.1, 'center_x': 0.05, - 'center_y': 0} - kwargs_lens_light = [kwargs_sersic_lens1,kwargs_sersic_lens2] + lens_light_model_list = kwargs_out["kwargs_model"]["lens_light_model_list"] + kwargs_sersic_lens1 = { + "amp": 16, + "R_sersic": 0.6, + "n_sersic": 2.5, + "e1": -0.1, + "e2": 0.1, + "center_x": 0.05, + "center_y": 0, + } + kwargs_sersic_lens2 = { + "amp": 3, + "R_sersic": 0.7, + "n_sersic": 3, + "e1": -0.1, + "e2": 0.1, + "center_x": 0.05, + "center_y": 0, + } + kwargs_lens_light = [kwargs_sersic_lens1, kwargs_sersic_lens2] lens_light_model_class = LightModel(lens_light_model_list) numPix = 100 - kwargs_out['kwargs_data']['background_rms']=background_rms - kwargs_out['kwargs_data']['exposure_time'] = exp_time - kwargs_out['kwargs_data']['image_data'] = np.zeros((numPix,numPix)) - kwargs_out['kwargs_data'].pop('noise_map') - - data_class = ImageData(**kwargs_out['kwargs_data']) - #PSF - pixel_scale = kwargs_out['kwargs_data']['transform_pix2angle'][1][1] / kwargs_out['kwargs_psf']['point_source_supersampling_factor'] - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': pixel_scale, 'truncation': 3} - kwargs_out['kwargs_psf'] = kwargs_psf - psf_class = PSF(**kwargs_out['kwargs_psf']) - - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} - - - imageModel = ImageModel(data_class, psf_class, lens_model_class=lens_model_class, - source_model_class = source_model_class, lens_light_model_class = lens_light_model_class, - kwargs_numerics = kwargs_numerics) + kwargs_out["kwargs_data"]["background_rms"] = background_rms + kwargs_out["kwargs_data"]["exposure_time"] = exp_time + kwargs_out["kwargs_data"]["image_data"] = np.zeros((numPix, numPix)) + kwargs_out["kwargs_data"].pop("noise_map") + + data_class = ImageData(**kwargs_out["kwargs_data"]) + # PSF + pixel_scale = ( + kwargs_out["kwargs_data"]["transform_pix2angle"][1][1] + / kwargs_out["kwargs_psf"]["point_source_supersampling_factor"] + ) + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": pixel_scale, + "truncation": 3, + } + kwargs_out["kwargs_psf"] = kwargs_psf + psf_class = PSF(**kwargs_out["kwargs_psf"]) + + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + } + + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class=lens_model_class, + source_model_class=source_model_class, + lens_light_model_class=lens_light_model_class, + kwargs_numerics=kwargs_numerics, + ) # generate image - image_model = imageModel.image(kwargs_lens, kwargs_source, kwargs_lens_light=kwargs_lens_light, kwargs_ps=None) + image_model = imageModel.image( + kwargs_lens, + kwargs_source, + kwargs_lens_light=kwargs_lens_light, + kwargs_ps=None, + ) poisson = image_util.add_poisson(image_model, exp_time=exp_time) bkg = image_util.add_background(image_model, sigma_bkd=background_rms) image_real = image_model + poisson + bkg data_class.update_data(image_real) - kwargs_out['kwargs_data']['image_data'] = image_real + kwargs_out["kwargs_data"]["image_data"] = image_real # MODELING # Notes : @@ -134,105 +255,308 @@ def test_full(self): # The following is basically the only lines of code you will need # (after running the "create_lenstronomy_from_coolest" function) when you actually do the # modeling on a pre-existing image (with associated noise and psf proveded) - band_list = [kwargs_out['kwargs_data'], kwargs_out['kwargs_psf'], kwargs_numerics] + band_list = [ + kwargs_out["kwargs_data"], + kwargs_out["kwargs_psf"], + kwargs_numerics, + ] multi_band_list = [band_list] - kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'single-band'} - kwargs_constraints = {'joint_lens_with_light':[[0,1,['center_x','center_y','e1','e2']], - [1,1,['center_x','center_y','e1','e2']]]} - kwargs_likelihood = {'check_bounds':True, 'check_positive_flux': True} - - fitting_seq = FittingSequence(kwargs_data_joint, kwargs_out['kwargs_model'], - kwargs_constraints, kwargs_likelihood, - kwargs_out['kwargs_params']) + kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "single-band", + } + kwargs_constraints = { + "joint_lens_with_light": [ + [0, 1, ["center_x", "center_y", "e1", "e2"]], + [1, 1, ["center_x", "center_y", "e1", "e2"]], + ] + } + kwargs_likelihood = {"check_bounds": True, "check_positive_flux": True} + + fitting_seq = FittingSequence( + kwargs_data_joint, + kwargs_out["kwargs_model"], + kwargs_constraints, + kwargs_likelihood, + kwargs_out["kwargs_params"], + ) n_particules = 200 n_iterations = 10 wr = 5 n_run_mcmc = 10 n_burn_mcmc = 10 - fitting_kwargs_list = [['PSO', {'sigma_scale': 1., 'n_particles': n_particules, 'n_iterations': n_iterations}], - ['MCMC', {'n_burn': n_burn_mcmc, 'n_run': n_run_mcmc, 'walkerRatio': wr, - 'sigma_scale': 0.01}] - ] + fitting_kwargs_list = [ + [ + "PSO", + { + "sigma_scale": 1.0, + "n_particles": n_particules, + "n_iterations": n_iterations, + }, + ], + [ + "MCMC", + { + "n_burn": n_burn_mcmc, + "n_run": n_run_mcmc, + "walkerRatio": wr, + "sigma_scale": 0.01, + }, + ], + ] chain_list = fitting_seq.fit_sequence(fitting_kwargs_list) kwargs_result = fitting_seq.best_fit() - modelPlot = ModelPlot(kwargs_data_joint['multi_band_list'], kwargs_out['kwargs_model'], kwargs_result) + modelPlot = ModelPlot( + kwargs_data_joint["multi_band_list"], + kwargs_out["kwargs_model"], + kwargs_result, + ) modelPlot._imageModel.image_linear_solve(inv_bool=True, **kwargs_result) - #the last 2 lines are meant for solving the linear parameters + # the last 2 lines are meant for solving the linear parameters # use the function to save mcmc chains in userfriendly mode # kwargs_mcmc = create_kwargs_mcmc_from_chain_list(chain_list,kwargs_out['kwargs_model'],kwargs_out['kwargs_params'], # kwargs_out['kwargs_data'],kwargs_out['kwargs_psf'],kwargs_numerics, # kwargs_constraints,idx_chain=1) - kwargs_mcmc = create_kwargs_mcmc_from_chain_list(chain_list, kwargs_out['kwargs_model'], - kwargs_out['kwargs_params'], - kwargs_out['kwargs_data'], kwargs_out['kwargs_psf'], - kwargs_numerics, - kwargs_constraints, idx_chain=1, likelihood_threshold= -100000) + kwargs_mcmc = create_kwargs_mcmc_from_chain_list( + chain_list, + kwargs_out["kwargs_model"], + kwargs_out["kwargs_params"], + kwargs_out["kwargs_data"], + kwargs_out["kwargs_psf"], + kwargs_numerics, + kwargs_constraints, + idx_chain=1, + likelihood_threshold=-100000, + ) # save the results (aka update the COOLEST json) - update_coolest_from_lenstronomy(path+"/coolest_template",kwargs_result, kwargs_mcmc) + update_coolest_from_lenstronomy( + path + "/coolest_template", kwargs_result, kwargs_mcmc + ) return def test_pemd(self): path = os.getcwd() - if path[-11:] == 'lenstronomy': - path+='/test/test_Util' - kwargs_out = create_lenstronomy_from_coolest(path+"/coolest_template_pemd") + if path[-11:] == "lenstronomy": + path += "/test/test_Util" + kwargs_out = create_lenstronomy_from_coolest(path + "/coolest_template_pemd") print(kwargs_out) # kwargs_results to update the COOLEST template - kwargs_result={"kwargs_lens":[{'theta_E': 0.7, 'e1': -0.15, 'e2': 0.01, 'gamma': 2.1, - 'center_x': 0.03, 'center_y': 0.01}], - "kwargs_source":[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}], - "kwargs_lens_light":[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}]} + kwargs_result = { + "kwargs_lens": [ + { + "theta_E": 0.7, + "e1": -0.15, + "e2": 0.01, + "gamma": 2.1, + "center_x": 0.03, + "center_y": 0.01, + } + ], + "kwargs_source": [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + } + ], + "kwargs_lens_light": [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + } + ], + } # kwargs_mcmc to update the COOLEST template. In real cases, this list would be much bigger # as each element is a result from a given point at a given iteration of a MCMC chain - kwargs_mcmc={"args_lens":[[{'theta_E': 0.68, 'e1': -0.10, 'e2': -0.04, 'gamma': 1.9, - 'center_x': 0.02, 'center_y': 0.10}],[{'theta_E': 0.65, 'e1': -0.10, 'e2': -0.04, 'gamma': 1.9, - 'center_x': 0.03, 'center_y': 0.01}],[{'theta_E': 0.65, 'e1': -0.10, 'e2': -0.04, 'gamma': 1.9, - 'center_x': 0.03, 'center_y': 0.01}],[{'theta_E': 0.65, 'e1': -0.10, 'e2': -0.04, 'gamma': 1.9, - 'center_x': 0.03, 'center_y': 0.01}]], - "args_source":[[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}],[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}],[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}],[{'amp':15.,'R_sersic':0.11,'n_sersic':3.6,'center_x':0.02, - 'center_y':-0.03,'e1':0.1,'e2':-0.2}]], - "args_lens_light":[[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}],[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}],[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}],[{'amp':11.,'R_sersic':0.2,'n_sersic':3.,'center_x':0.03, - 'center_y':0.01,'e1':-0.15,'e2':0.01}]]} - update_coolest_from_lenstronomy(path+"/coolest_template_pemd",kwargs_result,ending="_update", - kwargs_mcmc=kwargs_mcmc) - kwargs_out = create_lenstronomy_from_coolest(path+"/coolest_template_pemd_update") + kwargs_mcmc = { + "args_lens": [ + [ + { + "theta_E": 0.68, + "e1": -0.10, + "e2": -0.04, + "gamma": 1.9, + "center_x": 0.02, + "center_y": 0.10, + } + ], + [ + { + "theta_E": 0.65, + "e1": -0.10, + "e2": -0.04, + "gamma": 1.9, + "center_x": 0.03, + "center_y": 0.01, + } + ], + [ + { + "theta_E": 0.65, + "e1": -0.10, + "e2": -0.04, + "gamma": 1.9, + "center_x": 0.03, + "center_y": 0.01, + } + ], + [ + { + "theta_E": 0.65, + "e1": -0.10, + "e2": -0.04, + "gamma": 1.9, + "center_x": 0.03, + "center_y": 0.01, + } + ], + ], + "args_source": [ + [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + } + ], + [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + } + ], + [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + } + ], + [ + { + "amp": 15.0, + "R_sersic": 0.11, + "n_sersic": 3.6, + "center_x": 0.02, + "center_y": -0.03, + "e1": 0.1, + "e2": -0.2, + } + ], + ], + "args_lens_light": [ + [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + } + ], + [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + } + ], + [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + } + ], + [ + { + "amp": 11.0, + "R_sersic": 0.2, + "n_sersic": 3.0, + "center_x": 0.03, + "center_y": 0.01, + "e1": -0.15, + "e2": 0.01, + } + ], + ], + } + update_coolest_from_lenstronomy( + path + "/coolest_template_pemd", + kwargs_result, + ending="_update", + kwargs_mcmc=kwargs_mcmc, + ) + kwargs_out = create_lenstronomy_from_coolest( + path + "/coolest_template_pemd_update" + ) print(kwargs_out) - npt.assert_almost_equal(kwargs_out['kwargs_params']['lens_model'][0][0]['e1'], - kwargs_result['kwargs_lens'][0]['e1'], decimal=4) - npt.assert_almost_equal(kwargs_out['kwargs_params']['lens_model'][0][0]['e2'], - kwargs_result['kwargs_lens'][0]['e2'], decimal=4) + npt.assert_almost_equal( + kwargs_out["kwargs_params"]["lens_model"][0][0]["e1"], + kwargs_result["kwargs_lens"][0]["e1"], + decimal=4, + ) + npt.assert_almost_equal( + kwargs_out["kwargs_params"]["lens_model"][0][0]["e2"], + kwargs_result["kwargs_lens"][0]["e2"], + decimal=4, + ) return + def test_util_functions(self): radian = degree_coolest_to_radian_lenstronomy(None) - radian = degree_coolest_to_radian_lenstronomy(-120.) - npt.assert_almost_equal(radian,np.pi/6., decimal=4) + radian = degree_coolest_to_radian_lenstronomy(-120.0) + npt.assert_almost_equal(radian, np.pi / 6.0, decimal=4) - radian = degree_coolest_to_radian_lenstronomy(120.) - npt.assert_almost_equal(radian,5*np.pi/6., decimal=4) + radian = degree_coolest_to_radian_lenstronomy(120.0) + npt.assert_almost_equal(radian, 5 * np.pi / 6.0, decimal=4) - ellibounds_coolest_to_lenstronomy(0.6,1.0,None,None) - shearbounds_coolest_to_lenstronomy(0.0, 0.1, -90., None) + ellibounds_coolest_to_lenstronomy(0.6, 1.0, None, None) + shearbounds_coolest_to_lenstronomy(0.0, 0.1, -90.0, None) shapelet_amp_lenstronomy_to_coolest(None) - folding_coolest(np.array([-95.,95.])) - folding_coolest(-95.) - folding_coolest(95.) - + folding_coolest(np.array([-95.0, 95.0])) + folding_coolest(-95.0) + folding_coolest(95.0) return diff --git a/test/test_Util/test_correlation.py b/test/test_Util/test_correlation.py index ce5087106..7609b703b 100644 --- a/test/test_Util/test_correlation.py +++ b/test/test_Util/test_correlation.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.correlation as correlation @@ -10,7 +10,6 @@ class TestCorrelation(object): - def setup_method(self): pass @@ -22,31 +21,37 @@ def test_power_spectrum_2D(self): npt.assert_almost_equal(np.max(psd2D), 1, decimal=1) def test_power_spectrum_1D(self): - num_pix = 100 np.random.seed(42) I = np.random.random((num_pix, num_pix)) - #I[5, 5] = 100 - #I[50, 5] = 100 + # I[5, 5] = 100 + # I[50, 5] = 100 psd1D, r = correlation.power_spectrum_1d(I) - #print(np.max(psd1D)) - #print(psd1D) + # print(np.max(psd1D)) + # print(psd1D) - #import matplotlib.pyplot as plt - #plt.plot(psd1D) - #plt.show() - print(np.average(psd1D[: int(num_pix/2.)])) + # import matplotlib.pyplot as plt + # plt.plot(psd1D) + # plt.show() + print(np.average(psd1D[: int(num_pix / 2.0)])) # this tests whether the white noise power-spectrum is flat: - npt.assert_almost_equal(np.average(psd1D[: int(num_pix/2.)]) / np.average(psd1D[int(num_pix/2.):]), 1, decimal=1) + npt.assert_almost_equal( + np.average(psd1D[: int(num_pix / 2.0)]) + / np.average(psd1D[int(num_pix / 2.0) :]), + 1, + decimal=1, + ) num_pix = 10 residuals = np.ones((num_pix, num_pix)) residuals[5, 5] = num_pix**2 psd1D, r = correlation.power_spectrum_1d(residuals) print(psd1D) - npt.assert_almost_equal(psd1D, ((num_pix**2-1.)/num_pix**2)**2, decimal=7) + npt.assert_almost_equal( + psd1D, ((num_pix**2 - 1.0) / num_pix**2) ** 2, decimal=7 + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_data_util.py b/test/test_Util/test_data_util.py index 9c807ed92..4679dafb2 100644 --- a/test/test_Util/test_data_util.py +++ b/test/test_Util/test_data_util.py @@ -7,13 +7,15 @@ def test_absolute2apparent_magnitude(): absolute_magnitude = 0 d_parsec = 10 - apparent_magnitude = data_util.absolute2apparent_magnitude(absolute_magnitude, d_parsec) + apparent_magnitude = data_util.absolute2apparent_magnitude( + absolute_magnitude, d_parsec + ) npt.assert_almost_equal(apparent_magnitude, 0, decimal=8) def test_adu_electron_conversion(): - adu = 1. - gain = 4. + adu = 1.0 + gain = 4.0 e_ = data_util.adu2electrons(adu, ccd_gain=gain) adu_new = data_util.electrons2adu(e_, ccd_gain=gain) npt.assert_almost_equal(adu_new, adu, decimal=9) @@ -28,17 +30,22 @@ def test_magnitude2cps(): def test_bkg_noise(): - readout_noise = 2 exposure_time = 100 sky_brightness = 0.01 pixel_scale = 0.05 num_exposures = 10 - sigma_bkg = data_util.bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scale, num_exposures=num_exposures) + sigma_bkg = data_util.bkg_noise( + readout_noise, + exposure_time, + sky_brightness, + pixel_scale, + num_exposures=num_exposures, + ) exposure_time_tot = num_exposures * exposure_time - readout_noise_tot = num_exposures * readout_noise ** 2 # square of readout noise - sky_per_pixel = sky_brightness * pixel_scale ** 2 + readout_noise_tot = num_exposures * readout_noise**2 # square of readout noise + sky_per_pixel = sky_brightness * pixel_scale**2 sky_brightness_tot = exposure_time_tot * sky_per_pixel sigma_bkg_ = np.sqrt(readout_noise_tot + sky_brightness_tot) / exposure_time_tot npt.assert_almost_equal(sigma_bkg_, sigma_bkg, decimal=8) @@ -51,12 +58,15 @@ def test_flux_noise(): def test_magnitude2amplitude(): from lenstronomy.LightModel.light_model import LightModel - light_model_class = LightModel(['GAUSSIAN']) - kwargs_light_mag = [{'magnitude': 0, 'sigma': 2, 'center_x': 0, 'center_y': 0}] + + light_model_class = LightModel(["GAUSSIAN"]) + kwargs_light_mag = [{"magnitude": 0, "sigma": 2, "center_x": 0, "center_y": 0}] magnitude_zero_point = 2.5 - kwargs_light_amp = data_util.magnitude2amplitude(light_model_class, kwargs_light_mag, magnitude_zero_point) - npt.assert_almost_equal(kwargs_light_amp[0]['amp'], 10, decimal=5) + kwargs_light_amp = data_util.magnitude2amplitude( + light_model_class, kwargs_light_mag, magnitude_zero_point + ) + npt.assert_almost_equal(kwargs_light_amp[0]["amp"], 10, decimal=5) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_derivative_util.py b/test/test_Util/test_derivative_util.py index 155a6247c..12a0383b0 100644 --- a/test/test_Util/test_derivative_util.py +++ b/test/test_Util/test_derivative_util.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.derivative_util as calc_util import pytest @@ -9,9 +9,8 @@ class TestCalcUtil(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): pass @@ -85,7 +84,7 @@ def test_d_x_diffr_dy(self): assert out == 0 def test_d_phi_dx(self): - x, y = np.array([1., 0., -1.]), np.array([1., 1., -1.]) + x, y = np.array([1.0, 0.0, -1.0]), np.array([1.0, 1.0, -1.0]) dx, dy = 0.0001, 0.0001 r, phi = param_util.cart2polar(x, y, center_x=0, center_y=0) @@ -139,5 +138,5 @@ def test_d_r_dxx(self): npt.assert_almost_equal(d_r_dxy_num, d_r_dxy, decimal=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_image_util.py b/test/test_Util/test_image_util.py index a5fa6d221..bd6e53dfa 100644 --- a/test/test_Util/test_image_util.py +++ b/test/test_Util/test_image_util.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.util as util import pytest @@ -67,21 +67,21 @@ def test_add_layer2image_int(): def test_add_background(): image = np.ones((10, 10)) - sigma_bkgd = 1. + sigma_bkgd = 1.0 image_noisy = image_util.add_background(image, sigma_bkgd) - assert abs(np.sum(image_noisy)) < np.sqrt(np.sum(image)*sigma_bkgd)*3 + assert abs(np.sum(image_noisy)) < np.sqrt(np.sum(image) * sigma_bkgd) * 3 def test_add_poisson(): image = np.ones((100, 100)) - exp_time = 100. + exp_time = 100.0 poisson = image_util.add_poisson(image, exp_time) - assert abs(np.sum(poisson)) < np.sqrt(np.sum(image)/exp_time)*10 + assert abs(np.sum(poisson)) < np.sqrt(np.sum(image) / exp_time) * 10 def test_findOverlap(): - x_mins = [0,1,0] - y_mins = [1,2,1] + x_mins = [0, 1, 0] + y_mins = [1, 2, 1] deltapix = 0.5 x_mins, y_mins = image_util.findOverlap(x_mins, y_mins, deltapix) print(x_mins, y_mins) @@ -91,8 +91,8 @@ def test_findOverlap(): def test_coordInImage(): - x_coord = [100,20,-10] - y_coord = [0,-30,5] + x_coord = [100, 20, -10] + y_coord = [0, -30, 5] numPix = 50 deltapix = 1 x_result, y_result = image_util.coordInImage(x_coord, y_coord, numPix, deltapix) @@ -101,10 +101,37 @@ def test_coordInImage(): def test_rebin_coord_transform(): - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=3, deltapix=0.03, subgrid_res=1) - x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=1, deltapix=0.09, subgrid_res=1) - - ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix) + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform(numPix=3, deltapix=0.03, subgrid_res=1) + ( + x_grid, + y_grid, + ra_at_xy_0_re, + dec_at_xy_0_re, + x_at_radec_0_re, + y_at_radec_0_re, + Mpix2coord_re, + Mcoord2pix_re, + ) = util.make_grid_with_coordtransform(numPix=1, deltapix=0.09, subgrid_res=1) + + ( + ra_at_xy_0_resized, + dec_at_xy_0_resized, + x_at_radec_0_resized, + y_at_radec_0_resized, + Mpix2coord_resized, + Mcoord2pix_resized, + ) = image_util.rebin_coord_transform( + 3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix + ) assert ra_at_xy_0_resized == ra_at_xy_0_re assert dec_at_xy_0_resized == dec_at_xy_0_re assert x_at_radec_0_resized == x_at_radec_0_re @@ -112,10 +139,37 @@ def test_rebin_coord_transform(): npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8) npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8) - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=100, deltapix=0.05, subgrid_res=1) - x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=50, deltapix=0.1, subgrid_res=1) - - ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(2, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix) + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform(numPix=100, deltapix=0.05, subgrid_res=1) + ( + x_grid, + y_grid, + ra_at_xy_0_re, + dec_at_xy_0_re, + x_at_radec_0_re, + y_at_radec_0_re, + Mpix2coord_re, + Mcoord2pix_re, + ) = util.make_grid_with_coordtransform(numPix=50, deltapix=0.1, subgrid_res=1) + + ( + ra_at_xy_0_resized, + dec_at_xy_0_resized, + x_at_radec_0_resized, + y_at_radec_0_resized, + Mpix2coord_resized, + Mcoord2pix_resized, + ) = image_util.rebin_coord_transform( + 2, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix + ) assert ra_at_xy_0_resized == ra_at_xy_0_re assert dec_at_xy_0_resized == dec_at_xy_0_re assert x_at_radec_0_resized == x_at_radec_0_re @@ -123,11 +177,38 @@ def test_rebin_coord_transform(): npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8) npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8) - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=99, deltapix=0.1, subgrid_res=1) - x_grid, y_grid, ra_at_xy_0_re, dec_at_xy_0_re, x_at_radec_0_re, y_at_radec_0_re, Mpix2coord_re, Mcoord2pix_re = util.make_grid_with_coordtransform(numPix=33, deltapix=0.3, subgrid_res=1) + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform(numPix=99, deltapix=0.1, subgrid_res=1) + ( + x_grid, + y_grid, + ra_at_xy_0_re, + dec_at_xy_0_re, + x_at_radec_0_re, + y_at_radec_0_re, + Mpix2coord_re, + Mcoord2pix_re, + ) = util.make_grid_with_coordtransform(numPix=33, deltapix=0.3, subgrid_res=1) assert x_at_radec_0 == 49 - ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized = image_util.rebin_coord_transform(3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix) + ( + ra_at_xy_0_resized, + dec_at_xy_0_resized, + x_at_radec_0_resized, + y_at_radec_0_resized, + Mpix2coord_resized, + Mcoord2pix_resized, + ) = image_util.rebin_coord_transform( + 3, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix + ) assert x_at_radec_0_resized == 16 npt.assert_almost_equal(ra_at_xy_0_resized, ra_at_xy_0_re, decimal=8) @@ -137,15 +218,19 @@ def test_rebin_coord_transform(): npt.assert_almost_equal(Mcoord2pix_resized[0][0], Mcoord2pix_re[0][0], decimal=8) npt.assert_almost_equal(Mpix2coord_re[0][0], Mpix2coord_resized[0][0], decimal=8) - x_in, y_in = 10., 10. + x_in, y_in = 10.0, 10.0 ra, dec = util.map_coord2pix(x_in, y_in, ra_at_xy_0, dec_at_xy_0, Mpix2coord) x_out, y_out = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix) assert x_in == x_out assert y_in == y_out - x_in, y_in = 10., 10. - ra, dec = util.map_coord2pix(x_in, y_in, ra_at_xy_0_resized, dec_at_xy_0_resized, Mpix2coord_resized) - x_out, y_out = util.map_coord2pix(ra, dec, x_at_radec_0_resized, y_at_radec_0_resized, Mcoord2pix_resized) + x_in, y_in = 10.0, 10.0 + ra, dec = util.map_coord2pix( + x_in, y_in, ra_at_xy_0_resized, dec_at_xy_0_resized, Mpix2coord_resized + ) + x_out, y_out = util.map_coord2pix( + ra, dec, x_at_radec_0_resized, y_at_radec_0_resized, Mcoord2pix_resized + ) assert x_in == x_out assert y_in == y_out @@ -158,43 +243,48 @@ def test_rotateImage(): angle = 360 im_rot = image_util.rotateImage(img, angle) npt.assert_almost_equal(im_rot[1, 2], 0.5, decimal=10) - npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10) - npt.assert_almost_equal(im_rot[2, 1], 0., decimal=10) + npt.assert_almost_equal(im_rot[2, 2], 1.0, decimal=10) + npt.assert_almost_equal(im_rot[2, 1], 0.0, decimal=10) - angle = 360./2 + angle = 360.0 / 2 im_rot = image_util.rotateImage(img, angle) - npt.assert_almost_equal(im_rot[1, 2], 0., decimal=10) - npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10) + npt.assert_almost_equal(im_rot[1, 2], 0.0, decimal=10) + npt.assert_almost_equal(im_rot[2, 2], 1.0, decimal=10) npt.assert_almost_equal(im_rot[3, 2], 0.5, decimal=10) - angle = 360./4 + angle = 360.0 / 4 im_rot = image_util.rotateImage(img, angle) - npt.assert_almost_equal(im_rot[1, 2], 0., decimal=10) - npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10) + npt.assert_almost_equal(im_rot[1, 2], 0.0, decimal=10) + npt.assert_almost_equal(im_rot[2, 2], 1.0, decimal=10) npt.assert_almost_equal(im_rot[2, 1], 0.5, decimal=10) - angle = 360./8 + angle = 360.0 / 8 im_rot = image_util.rotateImage(img, angle) npt.assert_almost_equal(im_rot[1, 2], 0.23931518624017051, decimal=10) - npt.assert_almost_equal(im_rot[2, 2], 1., decimal=10) + npt.assert_almost_equal(im_rot[2, 2], 1.0, decimal=10) npt.assert_almost_equal(im_rot[2, 1], 0.23931518624017073, decimal=10) def test_re_size_array(): numPix = 9 kernel = np.zeros((numPix, numPix)) - kernel[int((numPix-1)/2), int((numPix-1)/2)] = 1 + kernel[int((numPix - 1) / 2), int((numPix - 1) / 2)] = 1 subgrid_res = 2 input_values = kernel x_in = np.linspace(0, 1, numPix) - x_out = np.linspace(0, 1, numPix*subgrid_res) + x_out = np.linspace(0, 1, numPix * subgrid_res) out_values = image_util.re_size_array(x_in, x_in, input_values, x_out, x_out) kernel_out = out_values - assert kernel_out[int((numPix*subgrid_res-1)/2), int((numPix*subgrid_res-1)/2)] == 0.58477508650519028 + assert ( + kernel_out[ + int((numPix * subgrid_res - 1) / 2), int((numPix * subgrid_res - 1) / 2) + ] + == 0.58477508650519028 + ) def test_symmetry_average(): - image = np.zeros((5,5)) + image = np.zeros((5, 5)) image[2, 3] = 1 symmetry = 2 img_sym = image_util.symmetry_average(image, symmetry) @@ -202,7 +292,7 @@ def test_symmetry_average(): def test_cut_edges(): - image = np.zeros((51,51)) + image = np.zeros((51, 51)) image[25][25] = 1 numPix = 21 resized = image_util.cut_edges(image, numPix) @@ -245,13 +335,15 @@ def test_re_size(): def test_stack_images(): numPix = 10 image1 = np.ones((numPix, numPix)) - image2 = np.ones((numPix, numPix)) / 10. + image2 = np.ones((numPix, numPix)) / 10.0 image_list = [image1, image2] wht1 = np.ones((numPix, numPix)) wht2 = np.ones((numPix, numPix)) * 10 wht_list = [wht1, wht2] sigma_list = [0.1, 0.2] - image_stacked, wht_stacked, sigma_stacked = image_util.stack_images(image_list=image_list, wht_list=wht_list, sigma_list=sigma_list) + image_stacked, wht_stacked, sigma_stacked = image_util.stack_images( + image_list=image_list, wht_list=wht_list, sigma_list=sigma_list + ) assert sigma_stacked == 0.19306145983268458 assert image_stacked[0, 0] == 0.18181818181818182 assert wht_stacked[0, 0] == 5.5 @@ -267,7 +359,16 @@ def test_rebin_image(): ra_coords, dec_coords = util.make_grid(numPix, deltapix=0.05) ra_coords = util.array2image(ra_coords) dec_coords = util.array2image(dec_coords) - image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized = image_util.rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask) + ( + image_resized, + wht_map_resized, + sigma_bkg_resized, + ra_coords_resized, + dec_coords_resized, + idex_mask_resized, + ) = image_util.rebin_image( + bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask + ) assert image_resized[0, 0] == 4 assert wht_map_resized[0, 0] == wht_map[0, 0] assert sigma_bkg_resized == 0.2 @@ -282,8 +383,16 @@ def test_rebin_image(): ra_coords, dec_coords = util.make_grid(numPix, deltapix=0.05) ra_coords = util.array2image(ra_coords) dec_coords = util.array2image(dec_coords) - image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized = image_util.rebin_image( - bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask) + ( + image_resized, + wht_map_resized, + sigma_bkg_resized, + ra_coords_resized, + dec_coords_resized, + idex_mask_resized, + ) = image_util.rebin_image( + bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask + ) assert image_resized[0, 0] == 4 assert wht_map_resized[0, 0] == wht_map[0, 0] assert sigma_bkg_resized == 0.2 @@ -292,6 +401,7 @@ def test_rebin_image(): def test_radial_profile(): from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gauss = Gaussian() x, y = util.make_grid(11, 1) flux = gauss.function(x, y, sigma=10, amp=1) @@ -314,9 +424,7 @@ def test_gradient_map(): class TestRaise(unittest.TestCase): - def test_raise(self): - with self.assertRaises(ValueError): grid2d = np.zeros((7, 7)) x_pos, y_pos = 4, 1 @@ -339,5 +447,5 @@ def test_raise(self): image_util.cut_edges(image, num_pix=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_kernel_util.py b/test/test_Util/test_kernel_util.py index 9c7d5ebb1..c643c07c2 100644 --- a/test/test_Util/test_kernel_util.py +++ b/test/test_Util/test_kernel_util.py @@ -3,6 +3,7 @@ import lenstronomy.Util.image_util as image_util import lenstronomy.Util.util as util from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() import pytest import unittest @@ -20,7 +21,7 @@ def test_fwhm_kernel(): kernel = kernel_util.kernel_norm(kernel) fwhm_kernel = kernel_util.fwhm_kernel(kernel) fwhm = Util.sigma2fwhm(sigma) - npt.assert_almost_equal(fwhm/fwhm_kernel, 1, 2) + npt.assert_almost_equal(fwhm / fwhm_kernel, 1, 2) def test_center_kernel(): @@ -33,9 +34,9 @@ def test_center_kernel(): # kernel being centered kernel_new = kernel_util.center_kernel(kernel, iterations=20) kernel_new = kernel_util.kernel_norm(kernel_new) - npt.assert_almost_equal(kernel_new/kernel, 1, decimal=8) + npt.assert_almost_equal(kernel_new / kernel, 1, decimal=8) # kernel shifted in x - kernel_shifted = shift(kernel, shift=[-.1, 0], order=1) + kernel_shifted = shift(kernel, shift=[-0.1, 0], order=1) kernel_new = kernel_util.center_kernel(kernel_shifted, iterations=5) kernel_new = kernel_util.kernel_norm(kernel_new) npt.assert_almost_equal((kernel_new + 0.00001) / (kernel + 0.00001), 1, decimal=4) @@ -62,10 +63,7 @@ def test_pixelsize_change(): def test_cutout_source(): - """ - test whether a shifted psf can be reproduced sufficiently well - :return: - """ + """Test whether a shifted psf can be reproduced sufficiently well :return:""" kernel_size = 5 image = np.zeros((10, 10)) kernel = np.zeros((kernel_size, kernel_size)) @@ -75,10 +73,12 @@ def test_cutout_source(): x_c, y_c = 5, 5 x_pos = x_c + shift_x y_pos = y_c + shift_y - #kernel_shifted = interp.shift(kernel, [shift_y, shift_x], order=1) + # kernel_shifted = interp.shift(kernel, [shift_y, shift_x], order=1) image = image_util.add_layer2image(image, x_pos, y_pos, kernel, order=1) print(image) - kernel_new = kernel_util.cutout_source(x_pos=x_pos, y_pos=y_pos, image=image, kernelsize=kernel_size) + kernel_new = kernel_util.cutout_source( + x_pos=x_pos, y_pos=y_pos, image=image, kernelsize=kernel_size + ) npt.assert_almost_equal(kernel_new[2, 2], kernel[2, 2], decimal=2) @@ -92,9 +92,11 @@ def test_cutout_source_border(): x_c, y_c = 2, 5 x_pos = x_c + shift_x y_pos = y_c + shift_y - #kernel_shifted = interp.shift(kernel, [shift_y, shift_x], order=1) + # kernel_shifted = interp.shift(kernel, [shift_y, shift_x], order=1) image = image_util.add_layer2image(image, x_pos, y_pos, kernel, order=1) - kernel_new = kernel_util.cutout_source(x_pos=x_pos, y_pos=y_pos, image=image, kernelsize=kernel_size) + kernel_new = kernel_util.cutout_source( + x_pos=x_pos, y_pos=y_pos, image=image, kernelsize=kernel_size + ) nx_new, ny_new = np.shape(kernel_new) print(kernel_new) assert nx_new == kernel_size @@ -115,8 +117,10 @@ def test_de_shift(): shift_x = 0.48 shift_y = 0.2 kernel_shifted = shift(kernel, shift=[-shift_y, -shift_x], order=1) - kernel_de_shifted = kernel_util.de_shift_kernel(kernel_shifted, shift_x, shift_y, iterations=50) - delta_max = np.max(kernel- kernel_de_shifted) + kernel_de_shifted = kernel_util.de_shift_kernel( + kernel_shifted, shift_x, shift_y, iterations=50 + ) + delta_max = np.max(kernel - kernel_de_shifted) assert delta_max < 0.01 npt.assert_almost_equal(kernel_de_shifted[2, 2], kernel[2, 2], decimal=2) @@ -126,7 +130,9 @@ def test_de_shift(): shift_x = 1.48 shift_y = 0.2 kernel_shifted = shift(kernel, shift=[-shift_y, -shift_x], order=1) - kernel_de_shifted = kernel_util.de_shift_kernel(kernel_shifted, shift_x, shift_y, iterations=50) + kernel_de_shifted = kernel_util.de_shift_kernel( + kernel_shifted, shift_x, shift_y, iterations=50 + ) delta_max = np.max(kernel - kernel_de_shifted) assert delta_max < 0.01 npt.assert_almost_equal(kernel_de_shifted[2, 2], kernel[2, 2], decimal=2) @@ -138,7 +144,9 @@ def test_de_shift(): shift_x = 1.48 shift_y = 0.2 kernel_shifted = shift(kernel, shift=[-shift_y, -shift_x], order=1) - kernel_de_shifted = kernel_util.de_shift_kernel(kernel_shifted, shift_x, shift_y, iterations=50) + kernel_de_shifted = kernel_util.de_shift_kernel( + kernel_shifted, shift_x, shift_y, iterations=50 + ) delta_max = np.max(kernel - kernel_de_shifted) assert delta_max < 0.01 npt.assert_almost_equal(kernel_de_shifted[2, 2], kernel[2, 2], decimal=2) @@ -152,7 +160,9 @@ def test_deshift_subgrid(): kernel_subgrid_size = kernel_size * subgrid kernel_subgrid = np.zeros((kernel_subgrid_size, kernel_subgrid_size)) kernel_subgrid[7, 7] = 2 - kernel_subgrid = kernel_util.kernel_gaussian(kernel_subgrid_size, 1./subgrid, fwhm=fwhm) + kernel_subgrid = kernel_util.kernel_gaussian( + kernel_subgrid_size, 1.0 / subgrid, fwhm=fwhm + ) kernel = util.averaging(kernel_subgrid, kernel_subgrid_size, kernel_size) @@ -160,23 +170,26 @@ def test_deshift_subgrid(): shift_y = 0.2 shift_x_subgird = shift_x * subgrid shift_y_subgrid = shift_y * subgrid - kernel_shifted_subgrid = shift(kernel_subgrid, shift=[-shift_y_subgrid, -shift_x_subgird], order=1) - kernel_shifted = util.averaging(kernel_shifted_subgrid, kernel_subgrid_size, kernel_size) - kernel_shifted_highres = kernel_util.subgrid_kernel(kernel_shifted, subgrid_res=subgrid, num_iter=1) - #npt.assert_almost_equal(kernel_shifted_highres[7, 7], kernel_shifted_subgrid[7, 7], decimal=10) + kernel_shifted_subgrid = shift( + kernel_subgrid, shift=[-shift_y_subgrid, -shift_x_subgird], order=1 + ) + kernel_shifted = util.averaging( + kernel_shifted_subgrid, kernel_subgrid_size, kernel_size + ) + kernel_shifted_highres = kernel_util.subgrid_kernel( + kernel_shifted, subgrid_res=subgrid, num_iter=1 + ) + # npt.assert_almost_equal(kernel_shifted_highres[7, 7], kernel_shifted_subgrid[7, 7], decimal=10) def test_shift_long_dist(): - """ - input is a shifted kernel by more than 1 pixel - :return: - """ + """Input is a shifted kernel by more than 1 pixel :return:""" kernel_size = 9 kernel = np.zeros((kernel_size, kernel_size)) - kernel[4, 4] = 2. - shift_x = 2. - shift_y = 1. + kernel[4, 4] = 2.0 + shift_x = 2.0 + shift_y = 1.0 input_kernel = shift(kernel, shift=[-shift_y, -shift_x], order=1) old_style_kernel = shift(input_kernel, shift=[shift_y, shift_x], order=1) shifted_new = kernel_util.de_shift_kernel(input_kernel, shift_x, shift_y) @@ -188,7 +201,7 @@ def test_pixel_kernel(): # point source kernel kernel_size = 9 kernel = np.zeros((kernel_size, kernel_size)) - kernel[4, 4] = 1. + kernel[4, 4] = 1.0 pixel_kernel = kernel_util.pixel_kernel(point_source_kernel=kernel, subgrid_res=1) assert pixel_kernel[4, 4] == kernel[4, 4] @@ -200,20 +213,30 @@ def test_split_kernel(): kernel = np.zeros((9, 9)) kernel[4, 4] = 1 subgrid_res = 3 - subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) + subgrid_kernel = kernel_util.subgrid_kernel( + kernel, subgrid_res=subgrid_res, odd=True + ) subsampling_size = 3 - kernel_hole, kernel_cutout = kernel_util.split_kernel(subgrid_kernel, supersampling_kernel_size=subsampling_size, - supersampling_factor=subgrid_res) + kernel_hole, kernel_cutout = kernel_util.split_kernel( + subgrid_kernel, + supersampling_kernel_size=subsampling_size, + supersampling_factor=subgrid_res, + ) assert kernel_hole[4, 4] == 0 - assert len(kernel_cutout) == subgrid_res*subsampling_size + assert len(kernel_cutout) == subgrid_res * subsampling_size npt.assert_almost_equal(np.sum(kernel_hole) + np.sum(kernel_cutout), 1, decimal=4) subgrid_res = 2 - subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) + subgrid_kernel = kernel_util.subgrid_kernel( + kernel, subgrid_res=subgrid_res, odd=True + ) subsampling_size = 3 - kernel_hole, kernel_cutout = kernel_util.split_kernel(subgrid_kernel, supersampling_kernel_size=subsampling_size, - supersampling_factor=subgrid_res) + kernel_hole, kernel_cutout = kernel_util.split_kernel( + subgrid_kernel, + supersampling_kernel_size=subsampling_size, + supersampling_factor=subgrid_res, + ) assert kernel_hole[4, 4] == 0 assert len(kernel_cutout) == subgrid_res * subsampling_size + 1 @@ -223,25 +246,30 @@ def test_split_kernel(): def test_cutout_source2(): grid2d = np.zeros((20, 20)) grid2d[7:9, 7:9] = 1 - kernel = kernel_util.cutout_source(x_pos=7.5, y_pos=7.5, image=grid2d, kernelsize=5, shift=False) + kernel = kernel_util.cutout_source( + x_pos=7.5, y_pos=7.5, image=grid2d, kernelsize=5, shift=False + ) assert kernel[2, 2] == 1 def test_subgrid_kernel(): - kernel = np.zeros((9, 9)) kernel[4, 4] = 1 subgrid_res = 3 - subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) - kernel_re_sized = image_util.re_size(subgrid_kernel, factor=subgrid_res) *subgrid_res**2 - #import matplotlib.pyplot as plt - #plt.matshow(kernel); plt.show() - #plt.matshow(subgrid_kernel); plt.show() - #plt.matshow(kernel_re_sized);plt.show() - #plt.matshow(kernel_re_sized- kernel);plt.show() + subgrid_kernel = kernel_util.subgrid_kernel( + kernel, subgrid_res=subgrid_res, odd=True + ) + kernel_re_sized = ( + image_util.re_size(subgrid_kernel, factor=subgrid_res) * subgrid_res**2 + ) + # import matplotlib.pyplot as plt + # plt.matshow(kernel); plt.show() + # plt.matshow(subgrid_kernel); plt.show() + # plt.matshow(kernel_re_sized);plt.show() + # plt.matshow(kernel_re_sized- kernel);plt.show() npt.assert_almost_equal(kernel_re_sized[4, 4], 1, decimal=2) assert np.max(subgrid_kernel) == subgrid_kernel[13, 13] - #assert kernel_re_sized[4, 4] == 1 + # assert kernel_re_sized[4, 4] == 1 def test_subgrid_rebin(): @@ -249,21 +277,28 @@ def test_subgrid_rebin(): subgrid_res = 3 sigma = 1 - x_grid, y_gird = Util.make_grid(kernel_size, 1./subgrid_res, subgrid_res) + x_grid, y_gird = Util.make_grid(kernel_size, 1.0 / subgrid_res, subgrid_res) flux = gaussian.function(x_grid, y_gird, amp=1, sigma=sigma) kernel = Util.array2image(flux) print(np.shape(kernel)) - kernel = util.averaging(kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) + kernel = util.averaging( + kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size + ) kernel = kernel_util.kernel_norm(kernel) - subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) - kernel_pixel = util.averaging(subgrid_kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) + subgrid_kernel = kernel_util.subgrid_kernel( + kernel, subgrid_res=subgrid_res, odd=True + ) + kernel_pixel = util.averaging( + subgrid_kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size + ) kernel_pixel = kernel_util.kernel_norm(kernel_pixel) - assert np.sum((kernel_pixel - kernel)**2) < 0.1 + assert np.sum((kernel_pixel - kernel) ** 2) < 0.1 def test_mge_kernel(): from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian + mg = MultiGaussian() fraction_list = [0.2, 0.7, 0.1] sigmas_scaled = [5, 10, 15] @@ -275,64 +310,67 @@ def test_mge_kernel(): print(amps, sigmas, norm) kernel_new = mg.function(x, y, amp=amps, sigma=sigmas) kernel_new = util.array2image(kernel_new) - #npt.assert_almost_equal(sigmas_scaled, sigmas) - #npt.assert_almost_equal(amps, fraction_list) + # npt.assert_almost_equal(sigmas_scaled, sigmas) + # npt.assert_almost_equal(amps, fraction_list) npt.assert_almost_equal(kernel_new, kernel, decimal=3) def test_kernel_average_pixel(): gaussian = Gaussian() subgrid_res = 3 - x_grid, y_gird = Util.make_grid(9, 1., subgrid_res) + x_grid, y_gird = Util.make_grid(9, 1.0, subgrid_res) sigma = 2 flux = gaussian.function(x_grid, y_gird, amp=1, sigma=sigma) kernel_super = Util.array2image(flux) - kernel_pixel = kernel_util.kernel_average_pixel(kernel_super, supersampling_factor=subgrid_res) + kernel_pixel = kernel_util.kernel_average_pixel( + kernel_super, supersampling_factor=subgrid_res + ) npt.assert_almost_equal(np.sum(kernel_pixel), np.sum(kernel_super)) - kernel_pixel = kernel_util.kernel_average_pixel(kernel_super, supersampling_factor=2) + kernel_pixel = kernel_util.kernel_average_pixel( + kernel_super, supersampling_factor=2 + ) npt.assert_almost_equal(np.sum(kernel_pixel), np.sum(kernel_super)) def test_averaging_even_kernel(): - subgrid_res = 4 - x_grid, y_gird = Util.make_grid(19, 1., 1) + x_grid, y_gird = Util.make_grid(19, 1.0, 1) sigma = 1.5 flux = gaussian.function(x_grid, y_gird, amp=1, sigma=sigma) kernel_super = Util.array2image(flux) kernel_pixel = kernel_util.averaging_even_kernel(kernel_super, subgrid_res) - npt.assert_almost_equal(np.sum(kernel_pixel) * subgrid_res ** 2, 1, decimal=5) + npt.assert_almost_equal(np.sum(kernel_pixel) * subgrid_res**2, 1, decimal=5) assert len(kernel_pixel) == 5 - x_grid, y_gird = Util.make_grid(17, 1., 1) + x_grid, y_gird = Util.make_grid(17, 1.0, 1) sigma = 1.5 amp = 2 flux = gaussian.function(x_grid, y_gird, amp=amp, sigma=sigma) kernel_super = Util.array2image(flux) kernel_pixel = kernel_util.averaging_even_kernel(kernel_super, subgrid_res) - npt.assert_almost_equal(np.sum(kernel_pixel) * subgrid_res ** 2, amp, decimal=5) + npt.assert_almost_equal(np.sum(kernel_pixel) * subgrid_res**2, amp, decimal=5) assert len(kernel_pixel) == 5 def test_degrade_kernel(): - - x_grid, y_gird = Util.make_grid(19*5, 1., 1) + x_grid, y_gird = Util.make_grid(19 * 5, 1.0, 1) sigma = 1.5 amp = 2 flux = gaussian.function(x_grid, y_gird, amp=2, sigma=sigma) - kernel_super = Util.array2image(flux)/np.sum(flux) * amp + kernel_super = Util.array2image(flux) / np.sum(flux) * amp for degrading_factor in range(7): - kernel_degraded = kernel_util.degrade_kernel(kernel_super, degrading_factor=degrading_factor+1) + kernel_degraded = kernel_util.degrade_kernel( + kernel_super, degrading_factor=degrading_factor + 1 + ) npt.assert_almost_equal(np.sum(kernel_degraded), amp, decimal=8) def test_match_kernel_sixe(): - image = np.ones((21, 21)) size = 11 image_match = kernel_util.match_kernel_size(image, size) @@ -356,12 +394,13 @@ def test_match_kernel_sixe(): def test_kernel_moffat(): - kernel = kernel_util.kernel_moffat(num_pix=10, delta_pix=0.1, fwhm=0.5, moffat_beta=1) + kernel = kernel_util.kernel_moffat( + num_pix=10, delta_pix=0.1, fwhm=0.5, moffat_beta=1 + ) npt.assert_almost_equal(np.sum(kernel), 1, decimal=3) class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): kernel = np.zeros((2, 2)) @@ -369,9 +408,13 @@ def test_raise(self): with self.assertRaises(ValueError): kernel_super = np.ones((9, 9)) - kernel_util.split_kernel(kernel_super, supersampling_kernel_size=2, supersampling_factor=3) + kernel_util.split_kernel( + kernel_super, supersampling_kernel_size=2, supersampling_factor=3 + ) with self.assertRaises(ValueError): - kernel_util.split_kernel(kernel_super, supersampling_kernel_size=3, supersampling_factor=0) + kernel_util.split_kernel( + kernel_super, supersampling_kernel_size=3, supersampling_factor=0 + ) with self.assertRaises(ValueError): image = np.ones((10, 10)) kernel_util.cutout_source(x_pos=3, y_pos=2, image=image, kernelsize=2) @@ -381,5 +424,5 @@ def test_raise(self): kernel_util.fwhm_kernel(kernel=np.ones((5, 5))) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_magnification_finite_util.py b/test/test_Util/test_magnification_finite_util.py index d4e2dfbf6..8e9b968b8 100644 --- a/test/test_Util/test_magnification_finite_util.py +++ b/test/test_Util/test_magnification_finite_util.py @@ -1,78 +1,120 @@ -from lenstronomy.Util.magnification_finite_util import auto_raytracing_grid_size, \ - auto_raytracing_grid_resolution, setup_mag_finite +from lenstronomy.Util.magnification_finite_util import ( + auto_raytracing_grid_size, + auto_raytracing_grid_resolution, + setup_mag_finite, +) from lenstronomy.LensModel.lens_model import LensModel import numpy.testing as npt import pytest class TestMagnificationFiniteUtil(object): - def test_grid_resolution(self): - - source_size = 1. + source_size = 1.0 res_scale = 0.03 - ref = 15. + ref = 15.0 power = 1.2 resolution = auto_raytracing_grid_resolution(source_size, res_scale, ref, power) - npt.assert_almost_equal(resolution, res_scale * (source_size/ref) ** power) + npt.assert_almost_equal(resolution, res_scale * (source_size / ref) ** power) def test_grid_size(self): - - source_size = 20. + source_size = 20.0 size = auto_raytracing_grid_size(source_size, grid_size_scale=0.04, power=0.9) - npt.assert_almost_equal(size, 0.04 * source_size ** 0.9) + npt.assert_almost_equal(size, 0.04 * source_size**0.9) def test_setup(self): - cosmo = None - lens_model = LensModel(['EPL']) + lens_model = LensModel(["EPL"]) grid_radius_arcsec = None grid_resolution = None - source_fwhm_parsec = 30. - source_light_model = 'SINGLE_GAUSSIAN' - z_source = 2. - source_x, source_y = 0., 0. + source_fwhm_parsec = 30.0 + source_light_model = "SINGLE_GAUSSIAN" + z_source = 2.0 + source_x, source_y = 0.0, 0.0 dx, dy, amp_scale, size_scale = None, None, None, None - gridx, gridy, source_model, kwargs_source, grid_resolution, grid_radius_arcsec = setup_mag_finite(cosmo, lens_model, grid_radius_arcsec, grid_resolution, - source_fwhm_parsec, source_light_model, - z_source, source_x, source_y, dx, dy, - amp_scale, size_scale) - npt.assert_equal(True, len(source_model.func_list)==1) + ( + gridx, + gridy, + source_model, + kwargs_source, + grid_resolution, + grid_radius_arcsec, + ) = setup_mag_finite( + cosmo, + lens_model, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, + ) + npt.assert_equal(True, len(source_model.func_list) == 1) npt.assert_equal(True, grid_resolution is not None) npt.assert_equal(True, grid_radius_arcsec is not None) grid_resolution = 0.001 grid_radius_arcsec = 0.05 - dx, dy, amp_scale, size_scale = 0., 0.1, 1., 1. - source_light_model = 'DOUBLE_GAUSSIAN' - gridx, gridy, source_model, kwargs_source, grid_resolution, grid_radius_arcsec = setup_mag_finite(cosmo, lens_model, grid_radius_arcsec, grid_resolution, - source_fwhm_parsec, source_light_model, - z_source, source_x, source_y, dx, dy, - amp_scale, size_scale) + dx, dy, amp_scale, size_scale = 0.0, 0.1, 1.0, 1.0 + source_light_model = "DOUBLE_GAUSSIAN" + ( + gridx, + gridy, + source_model, + kwargs_source, + grid_resolution, + grid_radius_arcsec, + ) = setup_mag_finite( + cosmo, + lens_model, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, + ) npt.assert_equal(True, len(source_model.func_list) == 2) - npt.assert_equal(kwargs_source[1]['center_y'], kwargs_source[0]['center_y'] + dy) - npt.assert_equal(kwargs_source[1]['center_x'], kwargs_source[0]['center_x'] + dx) + npt.assert_equal( + kwargs_source[1]["center_y"], kwargs_source[0]["center_y"] + dy + ) + npt.assert_equal( + kwargs_source[1]["center_x"], kwargs_source[0]["center_x"] + dx + ) npt.assert_equal(True, grid_resolution == 0.001) npt.assert_equal(True, grid_radius_arcsec == 0.05) - source_light_model = 'trash' - npt.assert_raises(Exception, setup_mag_finite, - cosmo, - lens_model, - grid_radius_arcsec, - grid_resolution, - source_fwhm_parsec, - source_light_model, - z_source, - source_x, - source_y, dx, - dy, - amp_scale, - size_scale - ) + source_light_model = "trash" + npt.assert_raises( + Exception, + setup_mag_finite, + cosmo, + lens_model, + grid_radius_arcsec, + grid_resolution, + source_fwhm_parsec, + source_light_model, + z_source, + source_x, + source_y, + dx, + dy, + amp_scale, + size_scale, + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_mask_util.py b/test/test_Util/test_mask_util.py index 3d4d01512..b4b6a854e 100644 --- a/test/test_Util/test_mask_util.py +++ b/test/test_Util/test_mask_util.py @@ -17,10 +17,14 @@ def test_get_mask(): def test_mask_half_moon(): x, y = util.make_grid(numPix=100, deltapix=1) - mask = mask_util.mask_half_moon(x, y, center_x=0, center_y=0, r_in=5, r_out=10, phi0=0, delta_phi=np.pi) + mask = mask_util.mask_half_moon( + x, y, center_x=0, center_y=0, r_in=5, r_out=10, phi0=0, delta_phi=np.pi + ) assert mask[0] == 0 - mask = mask_util.mask_half_moon(x, y, center_x=0, center_y=0, r_in=5, r_out=10, phi0=0, delta_phi=-np.pi) + mask = mask_util.mask_half_moon( + x, y, center_x=0, center_y=0, r_in=5, r_out=10, phi0=0, delta_phi=-np.pi + ) assert mask[0] == 0 @@ -43,5 +47,5 @@ def test_mask_shell(): assert np.sum(mask) == 948 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_multi_gauss_expansion.py b/test/test_Util/test_multi_gauss_expansion.py index b9a36fc54..e20a1683d 100644 --- a/test/test_Util/test_multi_gauss_expansion.py +++ b/test/test_Util/test_multi_gauss_expansion.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.multi_gauss_expansion as mge @@ -9,28 +9,32 @@ from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian import pytest + class TestMGE(object): - """ - tests the Gaussian methods - """ + """Tests the Gaussian methods.""" + def setup_method(self): self.sersic = Sersic() self.multiGaussian = MultiGaussian() def test_mge_1d_sersic(self): n_comp = 30 - r_sersic = 1. + r_sersic = 1.0 n_sersic = 3.7 - I0_sersic = 1. - rs = np.logspace(-2., 1., 50) * r_sersic - ss = self.sersic.function(rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic) + I0_sersic = 1.0 + rs = np.logspace(-2.0, 1.0, 50) * r_sersic + ss = self.sersic.function( + rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic + ) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) - ss_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - #print((ss - ss_mge)/ss) - for i in range(10, len(ss)-10): - #print(rs[i]) - npt.assert_almost_equal((ss_mge[i]-ss[i])/ss[i], 0, decimal=1) + ss_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + # print((ss - ss_mge)/ss) + for i in range(10, len(ss) - 10): + # print(rs[i]) + npt.assert_almost_equal((ss_mge[i] - ss[i]) / ss[i], 0, decimal=1) amplitudes, sigmas, norm = mge.mge_1d(rs, np.zeros_like(rs), N=n_comp) assert amplitudes[0] == 0 @@ -38,174 +42,381 @@ def test_mge_1d_sersic(self): amplitudes, sigmas, norm = mge.mge_1d(rs, np.zeros_like(rs), N=0) assert amplitudes[0] == 0 - def test_mge_sersic_radius(self): n_comp = 30 - r_sersic = .5 + r_sersic = 0.5 n_sersic = 3.7 - I0_sersic = 1. - rs = np.logspace(-2., 1., 50) * r_sersic - ss = self.sersic.function(rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic) + I0_sersic = 1.0 + rs = np.logspace(-2.0, 1.0, 50) * r_sersic + ss = self.sersic.function( + rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic + ) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) - ss_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - print((ss - ss_mge)/(ss+ ss_mge)) - for i in range(10, len(ss)-10): - #print(rs[i]) - npt.assert_almost_equal((ss_mge[i]-ss[i])/(ss[i]), 0, decimal=1) + ss_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + print((ss - ss_mge) / (ss + ss_mge)) + for i in range(10, len(ss) - 10): + # print(rs[i]) + npt.assert_almost_equal((ss_mge[i] - ss[i]) / (ss[i]), 0, decimal=1) def test_mge_sersic_n_sersic(self): n_comp = 20 r_sersic = 1.5 - n_sersic = .5 - I0_sersic = 1. - rs = np.logspace(-2., 1., 50) * r_sersic - ss = self.sersic.function(rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic) + n_sersic = 0.5 + I0_sersic = 1.0 + rs = np.logspace(-2.0, 1.0, 50) * r_sersic + ss = self.sersic.function( + rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic + ) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) - ss_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - for i in range(10, len(ss)-10): - npt.assert_almost_equal((ss_mge[i]-ss[i])/(ss[i]+ss_mge[i]), 0, decimal=1) + ss_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + for i in range(10, len(ss) - 10): + npt.assert_almost_equal( + (ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=1 + ) n_comp = 20 r_sersic = 1.5 - n_sersic =3.5 - I0_sersic = 1. - rs = np.logspace(-2., 1., 50) * r_sersic - ss = self.sersic.function(rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic) + n_sersic = 3.5 + I0_sersic = 1.0 + rs = np.logspace(-2.0, 1.0, 50) * r_sersic + ss = self.sersic.function( + rs, np.zeros_like(rs), amp=I0_sersic, n_sersic=n_sersic, R_sersic=r_sersic + ) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) - ss_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - for i in range(10, len(ss)-10): - npt.assert_almost_equal((ss_mge[i]-ss[i])/(ss[i]+ss_mge[i]), 0, decimal=1) + ss_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + for i in range(10, len(ss) - 10): + npt.assert_almost_equal( + (ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=1 + ) def test_hernquist(self): hernquist = Hernquist() n_comp = 20 sigma0 = 1 r_eff = 1.5 - rs = np.logspace(-2., 1., 50) * r_eff * 0.5 + rs = np.logspace(-2.0, 1.0, 50) * r_eff * 0.5 ss = hernquist.function(rs, np.zeros_like(rs), sigma0, Rs=r_eff) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) - ss_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - for i in range(10, len(ss)-10): - npt.assert_almost_equal((ss_mge[i]-ss[i])/(ss[i]+ss_mge[i]), 0, decimal=2) + ss_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + for i in range(10, len(ss) - 10): + npt.assert_almost_equal( + (ss_mge[i] - ss[i]) / (ss[i] + ss_mge[i]), 0, decimal=2 + ) def test_hernquist_deprojection(self): hernquist = Hernquist() n_comp = 20 sigma0 = 1 r_eff = 1.5 - rs = np.logspace(-2., 1., 50) * r_eff * 0.5 + rs = np.logspace(-2.0, 1.0, 50) * r_eff * 0.5 ss = hernquist.function(rs, np.zeros_like(rs), sigma0, Rs=r_eff) amplitudes, sigmas, norm = mge.mge_1d(rs, ss, N=n_comp) amplitudes_3d, sigmas_3d = mge.de_projection_3d(amplitudes, sigmas) - ss_3d_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes_3d, sigma=sigmas_3d) + ss_3d_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes_3d, sigma=sigmas_3d + ) ss_3d_mulit = self.multiGaussian.light_3d(rs, amp=amplitudes, sigma=sigmas) for i in range(10, len(ss_3d_mge)): - npt.assert_almost_equal((ss_3d_mge[i] - ss_3d_mulit[i]) / (ss_3d_mulit[i] + ss_3d_mge[i]), 0, decimal=1) + npt.assert_almost_equal( + (ss_3d_mge[i] - ss_3d_mulit[i]) / (ss_3d_mulit[i] + ss_3d_mge[i]), + 0, + decimal=1, + ) ss_3d = hernquist.light_3d(rs, sigma0, Rs=r_eff) - for i in range(10, len(ss_3d) -10): - npt.assert_almost_equal((ss_3d_mge[i] - ss_3d[i]) / (ss_3d[i] + ss_3d_mge[i]), 0, decimal=1) + for i in range(10, len(ss_3d) - 10): + npt.assert_almost_equal( + (ss_3d_mge[i] - ss_3d[i]) / (ss_3d[i] + ss_3d_mge[i]), 0, decimal=1 + ) def test_spemd(self): from lenstronomy.LensModel.Profiles.spep import SPEP - from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import MultiGaussianKappa + from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import ( + MultiGaussianKappa, + ) + spep = SPEP() mge_kappa = MultiGaussianKappa() n_comp = 8 theta_E = 1.41 - kwargs = {'theta_E': theta_E, 'e1': 0, - 'e2': 0, 'gamma': 1.61} - rs = np.logspace(-2., 1., 100) * theta_E + kwargs = {"theta_E": theta_E, "e1": 0, "e2": 0, "gamma": 1.61} + rs = np.logspace(-2.0, 1.0, 100) * theta_E f_xx, f_xy, f_yx, f_yy = spep.hessian(rs, 0, **kwargs) - kappa = 1/2. * (f_xx + f_yy) + kappa = 1 / 2.0 * (f_xx + f_yy) amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp) - kappa_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) + kappa_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) for i in range(0, 80): - npt.assert_almost_equal(kappa_mge[i], 1./2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1) - npt.assert_almost_equal((kappa[i] - kappa_mge[i])/kappa[i], 0, decimal=1) + npt.assert_almost_equal( + kappa_mge[i], 1.0 / 2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1 + ) + npt.assert_almost_equal((kappa[i] - kappa_mge[i]) / kappa[i], 0, decimal=1) f_ = spep.function(theta_E, 0, **kwargs) f_mge = mge_kappa.function(theta_E, 0, sigma=sigmas, amp=amplitudes) - npt.assert_almost_equal(f_mge/f_, 1, decimal=2) + npt.assert_almost_equal(f_mge / f_, 1, decimal=2) def test_example(self): n_comp = 10 - rs = np.array([0.01589126, 0.01703967, 0.01827108, 0.01959148, - 0.0210073 , 0.02252544, 0.02415329, 0.02589879, - 0.02777042, 0.02977731, 0.03192923, 0.03423667, - 0.03671086, 0.03936385, 0.04220857, 0.04525886, - 0.0485296 , 0.0520367 , 0.05579724, 0.05982956, - 0.06415327, 0.06878945, 0.07376067, 0.07909115, - 0.08480685, 0.09093561, 0.09750727, 0.10455385, - 0.11210966, 0.12021152, 0.12889887, 0.13821403, - 0.14820238, 0.15891255, 0.17039672, 0.18271082, - 0.19591482, 0.21007304, 0.22525444, 0.24153295, - 0.25898787, 0.2777042 , 0.29777311, 0.31929235, - 0.34236672, 0.36710861, 0.39363853, 0.42208569, - 0.45258865, 0.48529597, 0.52036697, 0.55797244, - 0.59829556, 0.64153272, 0.6878945 , 0.73760673, - 0.79091152, 0.8480685 , 0.90935605, 0.97507269, - 1.04553848, 1.12109664, 1.20211518, 1.28898871, - 1.38214034, 1.48202378, 1.58912553, 1.70396721, - 1.82710819, 1.95914822, 2.10073042, 2.25254437, - 2.4153295 , 2.58987865, 2.77704199, 2.9777311 , - 3.19292345, 3.42366716, 3.67108607, 3.93638527, - 4.22085689, 4.5258865 , 4.85295974, 5.20366966, - 5.57972441, 5.98295559, 6.41532717, 6.87894505, - 7.37606729, 7.90911519, 8.48068497, 9.09356051, - 9.75072687, 10.45538481, 11.21096643, 12.02115183, - 12.88988708, 13.82140341, 14.82023784, 15.89125526]) - kappa = np.array([ 12.13776067, 11.60484966, 11.09533396, 10.60818686, - 10.14242668, 9.69711473, 9.27135349, 8.86428482, - 8.47508818, 8.10297905, 7.7472073 , 7.40705574, - 7.08183863, 6.77090034, 6.47361399, 6.18938022, - 5.917626 , 5.65780342, 5.40938864, 5.1718808 , - 4.94480104, 4.72769151, 4.52011448, 4.3216514 , - 4.13190214, 3.9504841 , 3.77703149, 3.61119459, - 3.45263901, 3.30104507, 3.1561071 , 3.01753287, - 2.88504297, 2.75837025, 2.63725931, 2.52146595, - 2.41075668, 2.30490829, 2.20370736, 2.10694982, - 2.01444058, 1.92599312, 1.84142909, 1.76057799, - 1.6832768 , 1.60936965, 1.53870751, 1.47114792, - 1.40655465, 1.34479745, 1.28575181, 1.22929867, - 1.17532421, 1.12371958, 1.07438074, 1.02720821, - 0.98210687, 0.93898578, 0.897758 , 0.85834039, - 0.82065349, 0.78462129, 0.75017114, 0.71723359, - 0.68574222, 0.65563353, 0.62684681, 0.59932403, - 0.57300967, 0.5478507 , 0.52379638, 0.5007982 , - 0.47880979, 0.45778683, 0.43768691, 0.41846951, - 0.40009589, 0.38252899, 0.3657334 , 0.34967525, - 0.33432216, 0.31964317, 0.30560868, 0.29219041, - 0.27936129, 0.26709545, 0.25536817, 0.24415579, - 0.23343571, 0.22318631, 0.21338694, 0.20401782, - 0.19506006, 0.18649562, 0.17830721, 0.17047832, - 0.16299318, 0.15583668, 0.14899441, 0.14245255]) + rs = np.array( + [ + 0.01589126, + 0.01703967, + 0.01827108, + 0.01959148, + 0.0210073, + 0.02252544, + 0.02415329, + 0.02589879, + 0.02777042, + 0.02977731, + 0.03192923, + 0.03423667, + 0.03671086, + 0.03936385, + 0.04220857, + 0.04525886, + 0.0485296, + 0.0520367, + 0.05579724, + 0.05982956, + 0.06415327, + 0.06878945, + 0.07376067, + 0.07909115, + 0.08480685, + 0.09093561, + 0.09750727, + 0.10455385, + 0.11210966, + 0.12021152, + 0.12889887, + 0.13821403, + 0.14820238, + 0.15891255, + 0.17039672, + 0.18271082, + 0.19591482, + 0.21007304, + 0.22525444, + 0.24153295, + 0.25898787, + 0.2777042, + 0.29777311, + 0.31929235, + 0.34236672, + 0.36710861, + 0.39363853, + 0.42208569, + 0.45258865, + 0.48529597, + 0.52036697, + 0.55797244, + 0.59829556, + 0.64153272, + 0.6878945, + 0.73760673, + 0.79091152, + 0.8480685, + 0.90935605, + 0.97507269, + 1.04553848, + 1.12109664, + 1.20211518, + 1.28898871, + 1.38214034, + 1.48202378, + 1.58912553, + 1.70396721, + 1.82710819, + 1.95914822, + 2.10073042, + 2.25254437, + 2.4153295, + 2.58987865, + 2.77704199, + 2.9777311, + 3.19292345, + 3.42366716, + 3.67108607, + 3.93638527, + 4.22085689, + 4.5258865, + 4.85295974, + 5.20366966, + 5.57972441, + 5.98295559, + 6.41532717, + 6.87894505, + 7.37606729, + 7.90911519, + 8.48068497, + 9.09356051, + 9.75072687, + 10.45538481, + 11.21096643, + 12.02115183, + 12.88988708, + 13.82140341, + 14.82023784, + 15.89125526, + ] + ) + kappa = np.array( + [ + 12.13776067, + 11.60484966, + 11.09533396, + 10.60818686, + 10.14242668, + 9.69711473, + 9.27135349, + 8.86428482, + 8.47508818, + 8.10297905, + 7.7472073, + 7.40705574, + 7.08183863, + 6.77090034, + 6.47361399, + 6.18938022, + 5.917626, + 5.65780342, + 5.40938864, + 5.1718808, + 4.94480104, + 4.72769151, + 4.52011448, + 4.3216514, + 4.13190214, + 3.9504841, + 3.77703149, + 3.61119459, + 3.45263901, + 3.30104507, + 3.1561071, + 3.01753287, + 2.88504297, + 2.75837025, + 2.63725931, + 2.52146595, + 2.41075668, + 2.30490829, + 2.20370736, + 2.10694982, + 2.01444058, + 1.92599312, + 1.84142909, + 1.76057799, + 1.6832768, + 1.60936965, + 1.53870751, + 1.47114792, + 1.40655465, + 1.34479745, + 1.28575181, + 1.22929867, + 1.17532421, + 1.12371958, + 1.07438074, + 1.02720821, + 0.98210687, + 0.93898578, + 0.897758, + 0.85834039, + 0.82065349, + 0.78462129, + 0.75017114, + 0.71723359, + 0.68574222, + 0.65563353, + 0.62684681, + 0.59932403, + 0.57300967, + 0.5478507, + 0.52379638, + 0.5007982, + 0.47880979, + 0.45778683, + 0.43768691, + 0.41846951, + 0.40009589, + 0.38252899, + 0.3657334, + 0.34967525, + 0.33432216, + 0.31964317, + 0.30560868, + 0.29219041, + 0.27936129, + 0.26709545, + 0.25536817, + 0.24415579, + 0.23343571, + 0.22318631, + 0.21338694, + 0.20401782, + 0.19506006, + 0.18649562, + 0.17830721, + 0.17047832, + 0.16299318, + 0.15583668, + 0.14899441, + 0.14245255, + ] + ) amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp) def test_nfw_sersic(self): - kwargs_lens_nfw = {'alpha_Rs': 1.4129647849966354, 'Rs': 7.0991113634274736} - kwargs_lens_sersic = {'k_eff': 0.24100561407593576, 'n_sersic': 1.8058507329346063, 'R_sersic': 1.0371803141813705} + kwargs_lens_nfw = {"alpha_Rs": 1.4129647849966354, "Rs": 7.0991113634274736} + kwargs_lens_sersic = { + "k_eff": 0.24100561407593576, + "n_sersic": 1.8058507329346063, + "R_sersic": 1.0371803141813705, + } from lenstronomy.LensModel.Profiles.nfw import NFW from lenstronomy.LensModel.Profiles.sersic import Sersic + nfw = NFW() sersic = Sersic() theta_E = 1.5 n_comp = 10 - rs = np.logspace(-2., 1., 100) * theta_E + rs = np.logspace(-2.0, 1.0, 100) * theta_E f_xx_nfw, f_xy_nfw, f_yx_nfw, f_yy_nfw = nfw.hessian(rs, 0, **kwargs_lens_nfw) f_xx_s, f_xy_s, f_yx_s, f_yy_s = sersic.hessian(rs, 0, **kwargs_lens_sersic) - kappa = 1 / 2. * (f_xx_nfw + f_xx_s + f_yy_nfw + f_yy_s) + kappa = 1 / 2.0 * (f_xx_nfw + f_xx_s + f_yy_nfw + f_yy_s) amplitudes, sigmas, norm = mge.mge_1d(rs, kappa, N=n_comp) - kappa_mge = self.multiGaussian.function(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) - from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import MultiGaussianKappa + kappa_mge = self.multiGaussian.function( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) + from lenstronomy.LensModel.Profiles.multi_gaussian_kappa import ( + MultiGaussianKappa, + ) + mge_kappa = MultiGaussianKappa() - f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian(rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas) + f_xx_mge, f_xy_mge, f_yx_mge, f_yy_mge = mge_kappa.hessian( + rs, np.zeros_like(rs), amp=amplitudes, sigma=sigmas + ) for i in range(0, 80): - npt.assert_almost_equal(kappa_mge[i], 1. / 2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1) + npt.assert_almost_equal( + kappa_mge[i], 1.0 / 2 * (f_xx_mge[i] + f_yy_mge[i]), decimal=1 + ) npt.assert_almost_equal((kappa[i] - kappa_mge[i]) / kappa[i], 0, decimal=1) f_nfw = nfw.function(theta_E, 0, **kwargs_lens_nfw) @@ -214,5 +425,5 @@ def test_nfw_sersic(self): npt.assert_almost_equal(f_mge / (f_nfw + f_s), 1, decimal=2) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_package_util.py b/test/test_Util/test_package_util.py index 2bf466616..21a6e1871 100644 --- a/test/test_Util/test_package_util.py +++ b/test/test_Util/test_package_util.py @@ -7,44 +7,44 @@ def test_short_and_laconic(): ls.short() # We can access submodules as symbols... - assert hasattr(ls, 'LensModel') + assert hasattr(ls, "LensModel") assert isinstance(ls.LensModel, types.ModuleType) # ... also recursively. - assert hasattr(ls.LensModel, 'lens_model') + assert hasattr(ls.LensModel, "lens_model") assert isinstance(ls.LensModel.lens_model, types.ModuleType) # Symbols inside the module are available - assert 'LensModel' in ls.LensModel.lens_model.__all__ - assert hasattr(ls.LensModel.lens_model, 'LensModel') + assert "LensModel" in ls.LensModel.lens_model.__all__ + assert hasattr(ls.LensModel.lens_model, "LensModel") assert isinstance(ls.LensModel.lens_model.LensModel, type) ls.laconic() # We can access lenstronomy symbols directy - assert hasattr(ls, 'MultiBandImageReconstruction') + assert hasattr(ls, "MultiBandImageReconstruction") assert isinstance(ls.MultiBandImageReconstruction, type) # Non-lenstronomy symbols are not accessible - assert not hasattr(ls, 'np') - assert not hasattr(ls, 'numpy') + assert not hasattr(ls, "np") + assert not hasattr(ls, "numpy") # Clashing symbols are not accessible - assert not hasattr(ls, 'PSF') + assert not hasattr(ls, "PSF") # 'short' type access still works - assert hasattr(ls, 'Analysis') + assert hasattr(ls, "Analysis") assert isinstance(ls.Analysis, types.ModuleType) - assert hasattr(ls.Analysis, 'image_reconstruction') + assert hasattr(ls.Analysis, "image_reconstruction") assert isinstance(ls.Analysis.image_reconstruction, types.ModuleType) - assert hasattr(ls.Analysis.image_reconstruction, - 'MultiBandImageReconstruction') - assert isinstance(ls.Analysis.image_reconstruction.MultiBandImageReconstruction, - type) + assert hasattr(ls.Analysis.image_reconstruction, "MultiBandImageReconstruction") + assert isinstance( + ls.Analysis.image_reconstruction.MultiBandImageReconstruction, type + ) # Key classes clashing with submodule names are # accessible with an extra underscore - assert hasattr(ls, 'LensModel') + assert hasattr(ls, "LensModel") assert isinstance(ls.LensModel, types.ModuleType) - assert hasattr(ls, 'LensModel_') + assert hasattr(ls, "LensModel_") assert isinstance(ls.LensModel_, type) diff --git a/test/test_Util/test_param_util.py b/test/test_Util/test_param_util.py index 525216f21..fac985d05 100644 --- a/test/test_Util/test_param_util.py +++ b/test/test_Util/test_param_util.py @@ -43,7 +43,7 @@ def test_phi_q2_ellipticity(): assert e1 == 0 assert e2 == 0 - phi, q = 2., 0.95 + phi, q = 2.0, 0.95 e1, e2 = param_util.phi_q2_ellipticity(phi, q) npt.assert_almost_equal(e1, -0.016760092842656733, decimal=8) npt.assert_almost_equal(e2, -0.019405192187382792, decimal=8) @@ -64,8 +64,8 @@ def test_ellipticity2phi_q(): e1 = np.array([0.3, 0.9]) e2 = np.array([0.0, 0.9]) phi, q = param_util.ellipticity2phi_q(e1, e2) - assert np.allclose(phi, [0.0, 0.39269908], atol=1.e-08) - assert np.allclose(q, [0.53846153, 5.00025001e-05], atol=1.e-08) + assert np.allclose(phi, [0.0, 0.39269908], atol=1.0e-08) + assert np.allclose(q, [0.53846153, 5.00025001e-05], atol=1.0e-08) def test_ellipticity2phi_q_symmetry(): @@ -100,22 +100,24 @@ def test_ellipticity2phi_q_symmetry(): def test_transform_e1e2(): e1 = 0.01 - e2 = 0. - x = 0. - y = 1. - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=0, center_y=0) - x_new = (1-e1) * x - e2 * y + e2 = 0.0 + x = 0.0 + y = 1.0 + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=0, center_y=0 + ) + x_new = (1 - e1) * x - e2 * y y_new = -e2 * x + (1 + e1) * y - det = np.sqrt((1 - e1) * (1 + e1) + e2 ** 2) + det = np.sqrt((1 - e1) * (1 + e1) + e2**2) npt.assert_almost_equal(x_, x_new / det, decimal=5) npt.assert_almost_equal(y_, y_new / det, decimal=5) def test_phi_gamma_ellipticity(): - phi = -1. + phi = -1.0 gamma = 0.1 e1, e2 = param_util.shear_polar2cartesian(phi, gamma) - print(e1, e2, 'e1, e2') + print(e1, e2, "e1, e2") phi_out, gamma_out = param_util.shear_cartesian2polar(e1, e2) npt.assert_almost_equal(phi_out, phi, decimal=8) npt.assert_almost_equal(gamma_out, gamma_out, decimal=8) @@ -135,7 +137,9 @@ def test_displace_eccentricity(): e1 = 0.1 e2 = -0 center_x, center_y = 0, 0 - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=center_x, center_y=center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=center_x, center_y=center_y + ) phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x @@ -156,7 +160,9 @@ def test_displace_eccentricity(): e1 = 0.1 e2 = 0 center_x, center_y = 0, 0 - x_, y_ = param_util.transform_e1e2_product_average(x, y, e1, e2, center_x=center_x, center_y=center_y) + x_, y_ = param_util.transform_e1e2_product_average( + x, y, e1, e2, center_x=center_x, center_y=center_y + ) phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x @@ -180,9 +186,13 @@ def test_transform_e1e2_square_average(): e2 = 0 center_x, center_y = 0, 0 - x_, y_ = param_util.transform_e1e2_square_average(x, y, e1, e2, center_x=center_x, center_y=center_y) - npt.assert_almost_equal(np.sum(x**2 + y**2), np.sum(x_**2+y_**2), decimal=8) + x_, y_ = param_util.transform_e1e2_square_average( + x, y, e1, e2, center_x=center_x, center_y=center_y + ) + npt.assert_almost_equal( + np.sum(x**2 + y**2), np.sum(x_**2 + y_**2), decimal=8 + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_prob_density.py b/test/test_Util/test_prob_density.py index dd5a2b638..20c14330a 100644 --- a/test/test_Util/test_prob_density.py +++ b/test/test_Util/test_prob_density.py @@ -1,5 +1,4 @@ -__author__ = 'sibirrer' - +__author__ = "sibirrer" """ Tests for `prob_density` module. """ @@ -14,17 +13,16 @@ class TestSkewGaussian(object): - def setup_method(self): self.skewGassian = SkewGaussian() np.random.seed(seed=42) def test_pdf(self): x = 1 - y = self.skewGassian.pdf(x, e=0., w=1., a=0.) + y = self.skewGassian.pdf(x, e=0.0, w=1.0, a=0.0) assert y == 0.24197072451914337 x = np.array([0, 1]) - y = self.skewGassian.pdf(x, e=0., w=1., a=0.) + y = self.skewGassian.pdf(x, e=0.0, w=1.0, a=0.0) assert y[0] == 0.3989422804014327 assert y[1] == 0.24197072451914337 @@ -38,17 +36,16 @@ def test_pdf_skew(self): class TestKDE1D(object): - def setup_method(self): np.random.seed(seed=42) def gauss(self, x, mean, simga): - return np.exp(-((x-mean)/(simga))**2/2) / np.sqrt(2*np.pi) / simga + return np.exp(-(((x - mean) / (simga)) ** 2) / 2) / np.sqrt(2 * np.pi) / simga def test_likelihood(self): x_array = np.linspace(0.5, 1.5, 3000) - sigma = .1 - mean = 1. + sigma = 0.1 + mean = 1.0 sample = np.random.normal(loc=mean, scale=sigma, size=50000) kde = KDE1D(values=sample) @@ -68,16 +65,24 @@ def test_compute_lower_upper_errors(): median, _ = prob_density.compute_lower_upper_errors(sample, num_sigma=0) npt.assert_almost_equal(median, 0, decimal=2) - median, [[lower_sigma1, upper_sigma1]] = prob_density.compute_lower_upper_errors(sample, num_sigma=1) + median, [[lower_sigma1, upper_sigma1]] = prob_density.compute_lower_upper_errors( + sample, num_sigma=1 + ) npt.assert_almost_equal(lower_sigma1, 1, decimal=2) npt.assert_almost_equal(upper_sigma1, 1, decimal=2) - median, [[lower_sigma1, upper_sigma1], [lower_sigma2, upper_sigma2]] = prob_density.compute_lower_upper_errors(sample, num_sigma=2) + median, [ + [lower_sigma1, upper_sigma1], + [lower_sigma2, upper_sigma2], + ] = prob_density.compute_lower_upper_errors(sample, num_sigma=2) npt.assert_almost_equal(lower_sigma2, 2, decimal=2) npt.assert_almost_equal(upper_sigma2, 2, decimal=2) - median, [[lower_sigma1, upper_sigma1], [lower_sigma2, upper_sigma2], - [lower_sigma3, upper_sigma3]] = prob_density.compute_lower_upper_errors(sample, num_sigma=3) + median, [ + [lower_sigma1, upper_sigma1], + [lower_sigma2, upper_sigma2], + [lower_sigma3, upper_sigma3], + ] = prob_density.compute_lower_upper_errors(sample, num_sigma=3) npt.assert_almost_equal(lower_sigma2, 2, decimal=2) npt.assert_almost_equal(upper_sigma2, 2, decimal=2) @@ -86,7 +91,6 @@ def test_compute_lower_upper_errors(): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): skewGassian = SkewGaussian() @@ -95,5 +99,5 @@ def test_raise(self): prob_density.compute_lower_upper_errors(sample=None, num_sigma=4) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_sampling_util.py b/test/test_Util/test_sampling_util.py index 98ba67680..5c77ee7bc 100644 --- a/test/test_Util/test_sampling_util.py +++ b/test/test_Util/test_sampling_util.py @@ -9,7 +9,7 @@ def test_unit2uniform(): lower, upper = -5, 15 cube = np.linspace(0, 1, 3) cube = sampling_util.unit2uniform(cube, lower, upper) - npt.assert_equal(cube, [lower, (lower+upper)/2., upper]) + npt.assert_equal(cube, [lower, (lower + upper) / 2.0, upper]) def test_uniform2unit(): @@ -21,9 +21,9 @@ def test_uniform2unit(): def test_cube2args_uniform(): n_dims = 3 - l, u = -5., 15. + l, u = -5.0, 15.0 lowers, uppers = l * np.ones(n_dims), u * np.ones(n_dims) - truth = [l, (l+u)/2., u] + truth = [l, (l + u) / 2.0, u] cube = [0, 0.5, 1] sampling_util.cube2args_uniform(cube, lowers, uppers, n_dims, copy=False) @@ -41,25 +41,28 @@ def test_cube2args_uniform(): def test_cube2args_gaussian(): n_dims = 3 - l, u = -5., 15. - m, s = 5., 1. + l, u = -5.0, 15.0 + m, s = 5.0, 1.0 lowers, uppers = [l] * n_dims, [u] * n_dims - means, sigmas = [m] * n_dims, [s] * n_dims + means, sigmas = [m] * n_dims, [s] * n_dims truth = [l, m, u] cube = [0, 0.5, 1] - sampling_util.cube2args_gaussian(cube, lowers, uppers, - means, sigmas, n_dims, copy=False) + sampling_util.cube2args_gaussian( + cube, lowers, uppers, means, sigmas, n_dims, copy=False + ) npt.assert_equal(cube, truth) cube = [0, 0.5, 1] - sampling_util.cube2args_gaussian(cube, lowers, uppers, - means, sigmas, n_dims, copy=True) + sampling_util.cube2args_gaussian( + cube, lowers, uppers, means, sigmas, n_dims, copy=True + ) # they should NOT be equal because cube was not modified in-place npt.assert_equal(np.any(np.not_equal(cube, truth)), True) - cube = sampling_util.cube2args_gaussian(cube, lowers, uppers, - means, sigmas, n_dims, copy=True) + cube = sampling_util.cube2args_gaussian( + cube, lowers, uppers, means, sigmas, n_dims, copy=True + ) # here they should npt.assert_equal(cube, truth) @@ -69,26 +72,28 @@ def test_scale_limits(): lowers, uppers = np.array(lowers_list), np.array(uppers_list) widths = uppers - lowers scale_factor = 0.5 - lowers_s, uppers_s = sampling_util.scale_limits(lowers_list, uppers_list, scale_factor) + lowers_s, uppers_s = sampling_util.scale_limits( + lowers_list, uppers_list, scale_factor + ) npt.assert_equal(lowers_s, np.array([2.5, 1.5, 7.5])) npt.assert_equal(uppers_s, np.array([7.5, 6.5, 12.5])) - npt.assert_equal(widths*scale_factor, (uppers_s - lowers_s)) + npt.assert_equal(widths * scale_factor, (uppers_s - lowers_s)) def test_sample_ball(): p0 = np.ones(10) std = np.ones(10) - sample = sampling_util.sample_ball(p0, std, size=10000, dist='normal') + sample = sampling_util.sample_ball(p0, std, size=10000, dist="normal") mean = np.mean(sample, axis=0) npt.assert_almost_equal(mean, p0, decimal=1) sigma = np.std(sample, axis=0) npt.assert_almost_equal(sigma, std, decimal=1) - sample = sampling_util.sample_ball(p0, std, size=10000, dist='uniform') + sample = sampling_util.sample_ball(p0, std, size=10000, dist="uniform") mean = np.mean(sample, axis=0) npt.assert_almost_equal(mean, p0, decimal=1) sigma = np.std(sample, axis=0) - npt.assert_almost_equal(sigma, std*0.607, decimal=1) + npt.assert_almost_equal(sigma, std * 0.607, decimal=1) def test_sample_ball_truncated(): @@ -97,7 +102,9 @@ def test_sample_ball_truncated(): lower_limit = np.array([0, 0]) upper_limit = np.array([3, 4]) - samples = sampling_util.sample_ball_truncated(mean, sigma, lower_limit, upper_limit, size=1000) + samples = sampling_util.sample_ball_truncated( + mean, sigma, lower_limit, upper_limit, size=1000 + ) assert len(samples) == 1000 @@ -110,12 +117,12 @@ def test_sample_ball_truncated(): class TestRaise(unittest.TestCase): - def test_raise(self): - with self.assertRaises(ValueError): - sampling_util.sample_ball(p0=np.ones(10), std=np.ones(10), size=1000, dist='BAD') + sampling_util.sample_ball( + p0=np.ones(10), std=np.ones(10), size=1000, dist="BAD" + ) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Util/test_util.py b/test/test_Util/test_util.py index 37e6dc7cc..308c27ecd 100644 --- a/test/test_Util/test_util.py +++ b/test/test_Util/test_util.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import lenstronomy.Util.util as util @@ -9,7 +9,6 @@ def test_estimate_theta_E(): - x = np.array([-0.45328229, 0.57461556, 0.53757501, -0.42312438]) y = np.array([0.69582971, -0.51226356, 0.37577509, -0.40245467]) @@ -18,10 +17,9 @@ def test_estimate_theta_E(): def test_sort_img_index(): + ximg, yimg = np.array([1, 2, 3, 4]), np.array([0, 0, 1, 2]) - ximg,yimg = np.array([1,2,3,4]),np.array([0,0,1,2]) - - xref,yref = np.array([2,3,1,4]),np.array([0,1,0,2]) + xref, yref = np.array([2, 3, 1, 4]), np.array([0, 1, 0, 2]) indexes = util.sort_image_index(ximg, yimg, xref, yref) @@ -32,7 +30,7 @@ def test_sort_img_index(): indexes = util.sort_image_index(xref, yref, xref, yref) - npt.assert_allclose(np.array(indexes),[0,1,2,3]) + npt.assert_allclose(np.array(indexes), [0, 1, 2, 3]) def test_map_coord2pix(): @@ -60,28 +58,28 @@ def test_map_coord2pix(): def test_make_grid(): numPix = 11 - deltapix = 1. + deltapix = 1.0 grid = util.make_grid(numPix, deltapix) assert grid[0][0] == -5 - assert np.sum(grid[0]) == 0. + assert np.sum(grid[0]) == 0.0 - x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.) - assert np.sum(x_grid) == 0. + x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.0) + assert np.sum(x_grid) == 0.0 assert x_grid[0] == -5.25 x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=1, left_lower=True) - assert x_grid[0] == 0. - assert y_grid[0] == 0. + assert x_grid[0] == 0.0 + assert y_grid[0] == 0.0 # Similar tests for a non-rectangular grid x_grid, y_grid = util.make_grid((numPix, numPix - 1), deltapix) - assert x_grid[0] == -5. + assert x_grid[0] == -5.0 assert y_grid[0] == -4.5 assert np.sum(x_grid) == np.sum(y_grid) == 0 - x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.) + x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.0) assert np.sum(x_grid) == np.sum(y_grid) == 0 assert x_grid[0] == -5.25 @@ -94,7 +92,10 @@ def test_make_grid_transform(): numPix = 11 theta = np.pi / 2 deltaPix = 0.05 - Mpix2coord = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) * deltaPix + Mpix2coord = ( + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + * deltaPix + ) ra_coord, dec_coord = util.make_grid_transformed(numPix, Mpix2coord) ra2d = util.array2image(ra_coord) assert ra2d[5, 5] == 0 @@ -104,8 +105,19 @@ def test_make_grid_transform(): def test_grid_with_coords(): numPix = 11 - deltaPix = 1. - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix, deltaPix, subgrid_res=1, left_lower=False) + deltaPix = 1.0 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=False + ) ra = 0 dec = 0 x, y = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix) @@ -113,8 +125,19 @@ def test_grid_with_coords(): assert y == 5 numPix = 11 - deltaPix = .1 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix, deltaPix, subgrid_res=1, left_lower=False) + deltaPix = 0.1 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=False + ) ra = 0 dec = 0 x, y = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix) @@ -122,47 +145,100 @@ def test_grid_with_coords(): assert y == 5 numPix = 11 - deltaPix = 1. - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=True) + deltaPix = 1.0 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=True + ) x_, y_ = 0, 0 ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord) assert ra == 5 assert dec == -5 numPix = 11 - deltaPix = 1. - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=False) + deltaPix = 1.0 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=False + ) x_, y_ = 0, 0 ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord) assert ra == -5 assert dec == -5 numPix = 11 - deltaPix = .1 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix, deltaPix, subgrid_res=1, left_lower=False) + deltaPix = 0.1 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=False + ) x_, y_ = 0, 0 ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord) - assert ra == .5 - assert dec == -.5 + assert ra == 0.5 + assert dec == -0.5 x__, y__ = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix) assert x__ == x_ assert y__ == y_ numPix = 11 - deltaPix = .1 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix, deltaPix, subgrid_res=1, left_lower=True) + deltaPix = 0.1 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=True + ) assert ra_at_xy_0 == 0 assert dec_at_xy_0 == 0 numPix = 11 - deltaPix = .1 - x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( - numPix, deltaPix, subgrid_res=1, left_lower=True, center_ra=2, center_dec=3) + deltaPix = 0.1 + ( + x_grid, + y_grid, + ra_at_xy_0, + dec_at_xy_0, + x_at_radec_0, + y_at_radec_0, + Mpix2coord, + Mcoord2pix, + ) = util.make_grid_with_coordtransform( + numPix, deltaPix, subgrid_res=1, left_lower=True, center_ra=2, center_dec=3 + ) assert ra_at_xy_0 == 2 assert dec_at_xy_0 == 3 + def test_array2image(): array = np.linspace(1, 100, 100) image = util.array2image(array) @@ -171,8 +247,8 @@ def test_array2image(): def test_image2array(): - image = np.zeros((10,10)) - image[1,2] = 1 + image = np.zeros((10, 10)) + image[1, 2] = 1 array = util.image2array(image) assert array[12] == 1 @@ -204,7 +280,7 @@ def test_cube2array2cube(): cube = np.zeros((2, 10, 10)) ns, nx, ny = np.shape(cube) assert nx == ny # condition required - nxy = nx*ny + nxy = nx * ny cube[1, 2, 2] = 1 array = util.cube2array(cube) cube_new = util.array2cube(array, ns, nxy) @@ -227,15 +303,15 @@ def test_get_axes(): def test_symmetry(): - array = np.linspace(0,10,100) + array = np.linspace(0, 10, 100) image = util.array2image(array) array_new = util.image2array(image) assert array_new[42] == array[42] def test_displaceAbs(): - x = np.array([0,1,2]) - y = np.array([3,2,1]) + x = np.array([0, 1, 2]) + y = np.array([3, 2, 1]) sourcePos_x = 1 sourcePos_y = 2 result = util.displaceAbs(x, y, sourcePos_x, sourcePos_y) @@ -244,38 +320,38 @@ def test_displaceAbs(): def test_get_distance(): - x_mins = np.array([1.]) - y_mins = np.array([1.]) - x_true = np.array([0.]) - y_true = np.array([0.]) + x_mins = np.array([1.0]) + y_mins = np.array([1.0]) + x_true = np.array([0.0]) + y_true = np.array([0.0]) dist = util.get_distance(x_mins, y_mins, x_true, y_true) assert dist == 2 - x_mins = np.array([1.,2]) - y_mins = np.array([1.,1]) - x_true = np.array([0.]) - y_true = np.array([0.]) + x_mins = np.array([1.0, 2]) + y_mins = np.array([1.0, 1]) + x_true = np.array([0.0]) + y_true = np.array([0.0]) dist = util.get_distance(x_mins, y_mins, x_true, y_true) assert dist == 10000000000 - x_mins = np.array([1.,2]) - y_mins = np.array([1.,1]) - x_true = np.array([0.,1]) - y_true = np.array([0.,2]) + x_mins = np.array([1.0, 2]) + y_mins = np.array([1.0, 1]) + x_true = np.array([0.0, 1]) + y_true = np.array([0.0, 2]) dist = util.get_distance(x_mins, y_mins, x_true, y_true) assert dist == 6 - x_mins = np.array([1.,2,0]) - y_mins = np.array([1.,1,0]) - x_true = np.array([0.,1,1]) - y_true = np.array([0.,2,1]) + x_mins = np.array([1.0, 2, 0]) + y_mins = np.array([1.0, 1, 0]) + x_true = np.array([0.0, 1, 1]) + y_true = np.array([0.0, 2, 1]) dist = util.get_distance(x_mins, y_mins, x_true, y_true) assert dist == 2 def test_selectBest(): - array = np.array([4,3,6,1,3]) - select = np.array([2,4,7,3,3]) + array = np.array([4, 3, 6, 1, 3]) + select = np.array([2, 4, 7, 3, 3]) numSelect = 4 array_select = util.selectBest(array, select, numSelect, highest=True) assert array_select[0] == 6 @@ -307,8 +383,8 @@ def test_select_best(): def test_compare_distance(): - x_mapped = np.array([4,3,6,1,3]) - y_mapped = np.array([2,4,7,3,3]) + x_mapped = np.array([4, 3, 6, 1, 3]) + y_mapped = np.array([2, 4, 7, 3, 3]) X2 = util.compare_distance(x_mapped, y_mapped) assert X2 == 140 @@ -351,7 +427,7 @@ def test_make_subgrid(): x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=1) x_sub_grid, y_sub_grid = util.make_subgrid(x_grid, y_grid, subgrid_res=2) assert np.sum(x_grid) == 0 - assert len(x_grid) == 101*101 + assert len(x_grid) == 101 * 101 assert x_sub_grid[0] == -50.25 assert y_sub_grid[17] == -50.25 @@ -362,7 +438,7 @@ def test_make_subgrid(): def test_fwhm2sigma(): fwhm = 0.5 sigma = util.fwhm2sigma(fwhm) - assert sigma == fwhm/ (2 * np.sqrt(2 * np.log(2))) + assert sigma == fwhm / (2 * np.sqrt(2 * np.log(2))) def test_points_on_circle(): @@ -372,7 +448,7 @@ def test_points_on_circle(): assert ra[0] == 1 assert dec[0] == 0 - ra_, dec_ = util.points_on_circle(radius, points-1, connect_ends=False) + ra_, dec_ = util.points_on_circle(radius, points - 1, connect_ends=False) npt.assert_almost_equal(ra[:-1], ra_, decimal=8) npt.assert_almost_equal(dec[:-1], dec_, decimal=8) @@ -413,7 +489,6 @@ def test_area(): class TestRaise(unittest.TestCase): - def test_raise(self): with self.assertRaises(ValueError): array = np.ones(5) @@ -425,9 +500,13 @@ def test_raise(self): x, y = np.ones(6), np.ones(6) util.get_axes(x, y) with self.assertRaises(ValueError): - util.selectBest(array=np.ones(6), criteria=np.ones(5), numSelect=1, highest=True) + util.selectBest( + array=np.ones(6), criteria=np.ones(5), numSelect=1, highest=True + ) with self.assertRaises(ValueError): - util.select_best(array=np.ones(6), criteria=np.ones(5), num_select=1, highest=True) + util.select_best( + array=np.ones(6), criteria=np.ones(5), num_select=1, highest=True + ) with self.assertRaises(ValueError): util.convert_bool_list(n=2, k=[3, 7]) with self.assertRaises(ValueError): @@ -442,5 +521,5 @@ def test_raise_make_grid(self): util.make_grid(numPix=[1.1, 1], deltapix=1) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Workflow/test_alignment_matching.py b/test/test_Workflow/test_alignment_matching.py index 573a79fc1..f5d2a6fdc 100644 --- a/test/test_Workflow/test_alignment_matching.py +++ b/test/test_Workflow/test_alignment_matching.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import copy @@ -13,7 +13,6 @@ class TestAlignmentMatching(object): - def setup(self): np.random.seed(41) # data specifics @@ -25,65 +24,115 @@ def setup(self): # PSF specification - self.kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) - transform_pix2angle = self.kwargs_data['transform_pix2angle'] + self.kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) + transform_pix2angle = self.kwargs_data["transform_pix2angle"] self.kwargs_data2 = copy.deepcopy(self.kwargs_data) self.delta_x_offset = 0.2 - self.kwargs_data2['ra_at_xy_0'] += self.delta_x_offset + self.kwargs_data2["ra_at_xy_0"] += self.delta_x_offset self.phi_rot = 0.1 cos_phi, sin_phi = np.cos(self.phi_rot), np.sin(self.phi_rot) rot_matrix = np.array([[cos_phi, -sin_phi], [sin_phi, cos_phi]]) transform_pix2angle_rot = np.dot(transform_pix2angle, rot_matrix) - self.kwargs_data2['transform_pix2angle'] = transform_pix2angle_rot + self.kwargs_data2["transform_pix2angle"] = transform_pix2angle_rot data_class = ImageData(**self.kwargs_data) data_class2 = ImageData(**self.kwargs_data2) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + "truncation": 3, + } psf_class = PSF(**kwargs_psf) - kwargs_numerics = {'supersampling_factor': 1} - - self.kwargs_lens_light = [{'amp': 100., 'R_sersic': 0.5, 'n_sersic': 2, 'e1': 0.3, 'e2': -0.2, - 'center_x': 0, 'center_y': 0}] - lens_light_model_list = ['SERSIC_ELLIPSE'] + kwargs_numerics = {"supersampling_factor": 1} + + self.kwargs_lens_light = [ + { + "amp": 100.0, + "R_sersic": 0.5, + "n_sersic": 2, + "e1": 0.3, + "e2": -0.2, + "center_x": 0, + "center_y": 0, + } + ] + lens_light_model_list = ["SERSIC_ELLIPSE"] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - imageModel = ImageModel(data_class, psf_class, lens_light_model_class=lens_light_model_class) - imageModel2 = ImageModel(data_class2, psf_class, lens_light_model_class=lens_light_model_class) - image_sim = sim_util.simulate_simple(imageModel, kwargs_lens_light=self.kwargs_lens_light) - image_sim2 = sim_util.simulate_simple(imageModel2, kwargs_lens_light=self.kwargs_lens_light) - self.kwargs_data['image_data'] = image_sim + imageModel = ImageModel( + data_class, psf_class, lens_light_model_class=lens_light_model_class + ) + imageModel2 = ImageModel( + data_class2, psf_class, lens_light_model_class=lens_light_model_class + ) + image_sim = sim_util.simulate_simple( + imageModel, kwargs_lens_light=self.kwargs_lens_light + ) + image_sim2 = sim_util.simulate_simple( + imageModel2, kwargs_lens_light=self.kwargs_lens_light + ) + self.kwargs_data["image_data"] = image_sim self.kwargs_data2_offset = copy.deepcopy(self.kwargs_data) - self.kwargs_data2_offset['image_data'] = image_sim2 + self.kwargs_data2_offset["image_data"] = image_sim2 image_band1 = [self.kwargs_data, kwargs_psf, kwargs_numerics] image_band2 = [self.kwargs_data2_offset, kwargs_psf, kwargs_numerics] multi_band_list = [image_band1, image_band2] - self.kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'joint-linear'} + self.kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "joint-linear", + } - self.kwargs_model = {'lens_light_model_list': lens_light_model_list} + self.kwargs_model = {"lens_light_model_list": lens_light_model_list} self.kwargs_constraints = {} self.kwargs_likelihood = {} - lens_light_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1}] - lens_light_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2}] - lens_light_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2}] - lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{}], lens_light_lower, lens_light_upper - self.kwargs_params = {'lens_light_model': lens_light_param} + lens_light_sigma = [ + {"R_sersic": 0.05, "n_sersic": 0.5, "center_x": 0.1, "center_y": 0.1} + ] + lens_light_lower = [ + {"R_sersic": 0.01, "n_sersic": 0.5, "center_x": -2, "center_y": -2} + ] + lens_light_upper = [ + {"R_sersic": 10, "n_sersic": 5.5, "center_x": 2, "center_y": 2} + ] + lens_light_param = ( + self.kwargs_lens_light, + lens_light_sigma, + [{}], + lens_light_lower, + lens_light_upper, + ) + self.kwargs_params = {"lens_light_model": lens_light_param} def test_flux_calibration(self): - - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, self.kwargs_params) + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + self.kwargs_params, + ) fitting_list = [] - kwargs_align = {'n_particles': 20, 'n_iterations': 40, 'compute_bands': [False, True], - 'align_offset': True, 'align_rotation': True, 'delta_shift': 0.3, 'delta_rot': 0.5} - - fitting_list.append(['align_images', kwargs_align]) + kwargs_align = { + "n_particles": 20, + "n_iterations": 40, + "compute_bands": [False, True], + "align_offset": True, + "align_rotation": True, + "delta_shift": 0.3, + "delta_rot": 0.5, + } + + fitting_list.append(["align_images", kwargs_align]) chain_list = fittingSequence.fit_sequence(fitting_list) multi_band_list_new = fittingSequence.multi_band_list kwargs_data2_new = multi_band_list_new[1][0] - ra_shift = kwargs_data2_new['ra_shift'] + ra_shift = kwargs_data2_new["ra_shift"] npt.assert_almost_equal(ra_shift, self.delta_x_offset, decimal=1) - phi_rot = kwargs_data2_new['phi_rot'] + phi_rot = kwargs_data2_new["phi_rot"] npt.assert_almost_equal(phi_rot, self.phi_rot, decimal=1) diff --git a/test/test_Workflow/test_fitting_sequence.py b/test/test_Workflow/test_fitting_sequence.py index ff6affec4..774f6cef4 100644 --- a/test/test_Workflow/test_fitting_sequence.py +++ b/test/test_Workflow/test_fitting_sequence.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import copy @@ -16,12 +16,9 @@ class TestFittingSequence(object): - """ - test the fitting sequences - """ + """Test the fitting sequences.""" def setup_method(self): - # data specifics sigma_bkg = 0.05 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -31,178 +28,345 @@ def setup_method(self): # PSF specification - self.kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + self.kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**self.kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + "truncation": 3, + } psf_gaussian = PSF(**kwargs_psf_gaussian) - self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf_gaussian.kernel_point_source, 'psf_error_map': np.zeros_like(psf_gaussian.kernel_point_source)} + self.kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": psf_gaussian.kernel_point_source, + "psf_error_map": np.zeros_like(psf_gaussian.kernel_point_source), + } psf_class = PSF(**self.kwargs_psf) # 'EXTERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angle (in radian) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': 0.1, 'e2': 0.1} + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angle (in radian) + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_model_list = ['SPEP', 'SHEAR'] + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 3, 'center_x': 0, 'center_y': 0, - 'e1': 0.1, 'e2': 0.1} + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0, + "center_y": 0, + "e1": 0.1, + "e2": 0.1, + } - lens_light_model_list = ['SERSIC'] + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0, 'dec_source': 0.0, - 'source_amp': 1.}] # quasar point source position in the source plane and intrinsic brightness - point_source_list = ['SOURCE_POSITION'] - point_source_class = PointSource(point_source_type_list=point_source_list, fixed_magnification_list=[True]) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False, 'compute_mode': 'regular', - 'point_source_supersampling_factor': 1} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0, "dec_source": 0.0, "source_amp": 1.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_list = ["SOURCE_POSITION"] + point_source_class = PointSource( + point_source_type_list=point_source_list, fixed_magnification_list=[True] + ) + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + "compute_mode": "regular", + "point_source_supersampling_factor": 1, + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) self.data_class = data_class self.psf_class = psf_class - self.kwargs_data['image_data'] = image_sim - self.kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list, - 'lens_light_model_list': lens_light_model_list, - 'point_source_model_list': point_source_list, - 'fixed_magnification_list': [False], - 'index_lens_model_list': [[0, 1]], - 'point_source_frame_list': [[0]] - } + self.kwargs_data["image_data"] = image_sim + self.kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + "lens_light_model_list": lens_light_model_list, + "point_source_model_list": point_source_list, + "fixed_magnification_list": [False], + "index_lens_model_list": [[0, 1]], + "point_source_frame_list": [[0]], + } self.kwargs_numerics = kwargs_numerics num_source_model = len(source_model_list) self.kwargs_constraints = { - 'num_point_source_list': [4], - 'image_plane_source_list': [False] * num_source_model, - 'solver_type': 'NONE', # 'PROFILE', 'PROFILE_SHEAR', 'ELLIPSE', 'CENTER' - } - - self.kwargs_likelihood = {'force_no_add_image': True, - 'source_marg': True, - 'linear_prior': [1], - 'image_position_uncertainty': 0.004, - 'check_matched_source_position': False, - 'source_position_tolerance': 0.001, - 'source_position_sigma': 0.001, - 'check_positive_flux': True, - } - - lens_sigma = [{'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1}, - {'gamma1': 0.1, 'gamma2': 0.1}] - lens_lower = [{'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}, - {'gamma1': -0.3, 'gamma2': -0.3}] - lens_upper = [{'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}, - {'gamma1': 0.3, 'gamma2': 0.3}] - source_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1}] - source_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}] - source_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}] - - lens_light_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1}] - lens_light_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2}] - lens_light_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2}] - ps_sigma = [{'ra_source': 1, 'dec_source': 1, 'point_amp': 1}] - - lens_param = self.kwargs_lens, lens_sigma, [{}, {'ra_0': 0, 'dec_0': 0}], lens_lower, lens_upper - source_param = self.kwargs_source, source_sigma, [{}], source_lower, source_upper - lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{'center_x': 0}], lens_light_lower, lens_light_upper + "num_point_source_list": [4], + "image_plane_source_list": [False] * num_source_model, + "solver_type": "NONE", # 'PROFILE', 'PROFILE_SHEAR', 'ELLIPSE', 'CENTER' + } + + self.kwargs_likelihood = { + "force_no_add_image": True, + "source_marg": True, + "linear_prior": [1], + "image_position_uncertainty": 0.004, + "check_matched_source_position": False, + "source_position_tolerance": 0.001, + "source_position_sigma": 0.001, + "check_positive_flux": True, + } + + lens_sigma = [ + { + "theta_E": 0.1, + "gamma": 0.1, + "e1": 0.1, + "e2": 0.1, + "center_x": 0.1, + "center_y": 0.1, + }, + {"gamma1": 0.1, "gamma2": 0.1}, + ] + lens_lower = [ + { + "theta_E": 0.0, + "gamma": 1.5, + "center_x": -2, + "center_y": -2, + "e1": -0.4, + "e2": -0.4, + }, + {"gamma1": -0.3, "gamma2": -0.3}, + ] + lens_upper = [ + { + "theta_E": 10.0, + "gamma": 2.5, + "center_x": 2, + "center_y": 2, + "e1": 0.4, + "e2": 0.4, + }, + {"gamma1": 0.3, "gamma2": 0.3}, + ] + source_sigma = [ + { + "R_sersic": 0.05, + "n_sersic": 0.5, + "center_x": 0.1, + "center_y": 0.1, + "e1": 0.1, + "e2": 0.1, + } + ] + source_lower = [ + { + "R_sersic": 0.01, + "n_sersic": 0.5, + "center_x": -2, + "center_y": -2, + "e1": -0.4, + "e2": -0.4, + } + ] + source_upper = [ + { + "R_sersic": 10, + "n_sersic": 5.5, + "center_x": 2, + "center_y": 2, + "e1": 0.4, + "e2": 0.4, + } + ] + + lens_light_sigma = [ + {"R_sersic": 0.05, "n_sersic": 0.5, "center_x": 0.1, "center_y": 0.1} + ] + lens_light_lower = [ + {"R_sersic": 0.01, "n_sersic": 0.5, "center_x": -2, "center_y": -2} + ] + lens_light_upper = [ + {"R_sersic": 10, "n_sersic": 5.5, "center_x": 2, "center_y": 2} + ] + ps_sigma = [{"ra_source": 1, "dec_source": 1, "point_amp": 1}] + + lens_param = ( + self.kwargs_lens, + lens_sigma, + [{}, {"ra_0": 0, "dec_0": 0}], + lens_lower, + lens_upper, + ) + source_param = ( + self.kwargs_source, + source_sigma, + [{}], + source_lower, + source_upper, + ) + lens_light_param = ( + self.kwargs_lens_light, + lens_light_sigma, + [{"center_x": 0}], + lens_light_lower, + lens_light_upper, + ) ps_param = self.kwargs_ps, ps_sigma, [{}], self.kwargs_ps, self.kwargs_ps - self.kwargs_params = {'lens_model': lens_param, - 'source_model': source_param, - 'lens_light_model': lens_light_param, - 'point_source_model': ps_param, - # 'special': special_param - } + self.kwargs_params = { + "lens_model": lens_param, + "source_model": source_param, + "lens_light_model": lens_light_param, + "point_source_model": ps_param, + # 'special': special_param + } image_band = [self.kwargs_data, self.kwargs_psf, self.kwargs_numerics] multi_band_list = [image_band] - self.kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear'} + self.kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "multi-linear", + } def test_simulationAPI_image(self): npt.assert_almost_equal(self.data_class.data[4, 4], 0.1, decimal=0) def test_simulationAPI_psf(self): - npt.assert_almost_equal(np.sum(self.psf_class.kernel_point_source), 1, decimal=6) + npt.assert_almost_equal( + np.sum(self.psf_class.kernel_point_source), 1, decimal=6 + ) def test_fitting_sequence(self): - - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, self.kwargs_params) + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + self.kwargs_params, + ) kwargs_result = fittingSequence.best_fit(bijective=False) - lens_temp = kwargs_result['kwargs_lens'] - npt.assert_almost_equal(lens_temp[0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=2) + lens_temp = kwargs_result["kwargs_lens"] + npt.assert_almost_equal( + lens_temp[0]["theta_E"], self.kwargs_lens[0]["theta_E"], decimal=2 + ) logL = fittingSequence.best_fit_likelihood - print(logL, 'test') - #print(lens_temp, source_temp, lens_light_temp, ps_temp, special_temp) + print(logL, "test") + # print(lens_temp, source_temp, lens_light_temp, ps_temp, special_temp) assert logL < 0 bic = fittingSequence.bic assert bic > 0 - #npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4) + # npt.assert_almost_equal(bic, 20000000220.29376, decimal=-4) - #npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4) + # npt.assert_almost_equal(logL, -10000000061.792593, decimal=-4) n_p = 2 n_i = 2 fitting_list = [] - kwargs_pso = {'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i} - fitting_list.append(['PSO', kwargs_pso]) - kwargs_align = {'delta_shift':0.2, 'n_particles': 2, 'n_iterations': 2} - fitting_list.append(['align_images', kwargs_align]) - kwargs_psf_iter = {'num_iter': 2, 'psf_iter_factor': 0.5, 'stacking_method': 'mean', 'new_procedure': False} - fitting_list.append(['psf_iteration', kwargs_psf_iter]) - fitting_list.append(['restart', None]) - fitting_list.append(['fix_not_computed', {'free_bands': [True]}]) + kwargs_pso = {"sigma_scale": 1, "n_particles": n_p, "n_iterations": n_i} + fitting_list.append(["PSO", kwargs_pso]) + kwargs_align = {"delta_shift": 0.2, "n_particles": 2, "n_iterations": 2} + fitting_list.append(["align_images", kwargs_align]) + kwargs_psf_iter = { + "num_iter": 2, + "psf_iter_factor": 0.5, + "stacking_method": "mean", + "new_procedure": False, + } + fitting_list.append(["psf_iteration", kwargs_psf_iter]) + fitting_list.append(["restart", None]) + fitting_list.append(["fix_not_computed", {"free_bands": [True]}]) n_sersic_overwrite = 4 - kwargs_update = {'lens_light_add_fixed': [[0, ['n_sersic'], [n_sersic_overwrite]]], - 'lens_light_remove_fixed': [[0, ['center_x']]], - 'change_source_lower_limit': [[0, ['n_sersic'], [0.1]]], - 'change_source_upper_limit': [[0, ['n_sersic'], [10]]]} - fitting_list.append(['update_settings', kwargs_update]) + kwargs_update = { + "lens_light_add_fixed": [[0, ["n_sersic"], [n_sersic_overwrite]]], + "lens_light_remove_fixed": [[0, ["center_x"]]], + "change_source_lower_limit": [[0, ["n_sersic"], [0.1]]], + "change_source_upper_limit": [[0, ["n_sersic"], [10]]], + } + fitting_list.append(["update_settings", kwargs_update]) chain_list = fittingSequence.fit_sequence(fitting_list) - lens_fixed, source_fixed, lens_light_fixed, ps_fixed, special_fixed, extinction_fixed = fittingSequence._updateManager.fixed_kwargs + ( + lens_fixed, + source_fixed, + lens_light_fixed, + ps_fixed, + special_fixed, + extinction_fixed, + ) = fittingSequence._updateManager.fixed_kwargs kwargs_result = fittingSequence.best_fit(bijective=False) - npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'], self.kwargs_lens[0]['theta_E'], decimal=1) - npt.assert_almost_equal(fittingSequence._updateManager._lens_light_fixed[0]['n_sersic'], n_sersic_overwrite, decimal=8) - npt.assert_almost_equal(lens_light_fixed[0]['n_sersic'], 4, decimal=-1) - assert fittingSequence._updateManager._lower_kwargs[1][0]['n_sersic'] == 0.1 - assert fittingSequence._updateManager._upper_kwargs[1][0]['n_sersic'] == 10 + npt.assert_almost_equal( + kwargs_result["kwargs_lens"][0]["theta_E"], + self.kwargs_lens[0]["theta_E"], + decimal=1, + ) + npt.assert_almost_equal( + fittingSequence._updateManager._lens_light_fixed[0]["n_sersic"], + n_sersic_overwrite, + decimal=8, + ) + npt.assert_almost_equal(lens_light_fixed[0]["n_sersic"], 4, decimal=-1) + assert fittingSequence._updateManager._lower_kwargs[1][0]["n_sersic"] == 0.1 + assert fittingSequence._updateManager._upper_kwargs[1][0]["n_sersic"] == 10 # test 'set_param_value' fitting sequence fitting_list = [ - ['set_param_value', {'lens': [[1, ['gamma1'], [0.013]]]}], - ['set_param_value', {'lens_light': [[0, ['center_x'], [0.009]]]}], - ['set_param_value', {'source': [[0, ['n_sersic'], [2.993]]]}], - ['set_param_value', {'ps': [[0, ['ra_source'], [0.007]]]}] + ["set_param_value", {"lens": [[1, ["gamma1"], [0.013]]]}], + ["set_param_value", {"lens_light": [[0, ["center_x"], [0.009]]]}], + ["set_param_value", {"source": [[0, ["n_sersic"], [2.993]]]}], + ["set_param_value", {"ps": [[0, ["ra_source"], [0.007]]]}], ] fittingSequence.fit_sequence(fitting_list) kwargs_set = fittingSequence._updateManager.parameter_state - assert kwargs_set['kwargs_lens'][1]['gamma1'] == 0.013 - assert kwargs_set['kwargs_lens_light'][0]['center_x'] == 0.009 - assert kwargs_set['kwargs_source'][0]['n_sersic'] == 2.993 - assert kwargs_set['kwargs_ps'][0]['ra_source'] == 0.007 + assert kwargs_set["kwargs_lens"][1]["gamma1"] == 0.013 + assert kwargs_set["kwargs_lens_light"][0]["center_x"] == 0.009 + assert kwargs_set["kwargs_source"][0]["n_sersic"] == 2.993 + assert kwargs_set["kwargs_ps"][0]["ra_source"] == 0.007 from unittest import TestCase + t = TestCase() with t.assertRaises(ValueError): fitting_list_two = [] - fitting_list_two.append(['fake_mcmc_method', kwargs_pso]) + fitting_list_two.append(["fake_mcmc_method", kwargs_pso]) fittingSequence.fit_sequence(fitting_list_two) def test_cobaya(self): @@ -217,66 +381,114 @@ def test_cobaya(self): fwhm = 0.5 # full width half max of PSF # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + "truncation": 3, + } psf_gaussian = PSF(**kwargs_psf_gaussian) # make a lens - lens_model_list = ['SIS'] - kwargs_lens = [{'theta_E': 1.5, 'center_x': 0.0, 'center_y': 0.0}] + lens_model_list = ["SIS"] + kwargs_lens = [{"theta_E": 1.5, "center_x": 0.0, "center_y": 0.0}] lens_model_class = LensModel(lens_model_list=lens_model_list) # make a source - source_model_list = ['SERSIC'] - kwargs_source = [{'amp': 1., 'R_sersic': 0.3, 'n_sersic': 3.0, 'center_x': 0.1, 'center_y': 0.1}] + source_model_list = ["SERSIC"] + kwargs_source = [ + { + "amp": 1.0, + "R_sersic": 0.3, + "n_sersic": 3.0, + "center_x": 0.1, + "center_y": 0.1, + } + ] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + } - imageModel = ImageModel(data_class, psf_gaussian, lens_model_class, source_model_class, kwargs_numerics=kwargs_numerics) + imageModel = ImageModel( + data_class, + psf_gaussian, + lens_model_class, + source_model_class, + kwargs_numerics=kwargs_numerics, + ) image_sim = sim_util.simulate_simple(imageModel, kwargs_lens, kwargs_source) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim - kwargs_model = {'lens_model_list': lens_model_list, 'source_light_model_list': source_model_list} + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + } - lens_fixed = [{'center_x': 0.0, 'center_y': 0.0}] - lens_sigma = [{'theta_E': 0.01}] - lens_lower = [{'theta_E': 0.1}] - lens_upper = [{'theta_E': 3.0}] + lens_fixed = [{"center_x": 0.0, "center_y": 0.0}] + lens_sigma = [{"theta_E": 0.01}] + lens_lower = [{"theta_E": 0.1}] + lens_upper = [{"theta_E": 3.0}] source_fixed = [{}] - source_sigma = [{'R_sersic': 0.01, 'n_sersic': 0.01, 'center_x': 0.01, 'center_y': 0.01}] - source_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -1, 'center_y': -1}] - source_upper = [{'R_sersic': 1.0, 'n_sersic': 6.0, 'center_x': 1, 'center_y': 1}] + source_sigma = [ + {"R_sersic": 0.01, "n_sersic": 0.01, "center_x": 0.01, "center_y": 0.01} + ] + source_lower = [ + {"R_sersic": 0.01, "n_sersic": 0.5, "center_x": -1, "center_y": -1} + ] + source_upper = [ + {"R_sersic": 1.0, "n_sersic": 6.0, "center_x": 1, "center_y": 1} + ] lens_param = [kwargs_lens, lens_sigma, lens_fixed, lens_lower, lens_upper] - source_param = [kwargs_source, source_sigma, source_fixed, source_lower, source_upper] + source_param = [ + kwargs_source, + source_sigma, + source_fixed, + source_lower, + source_upper, + ] - kwargs_params = {'lens_model': lens_param, - 'source_model': source_param} + kwargs_params = {"lens_model": lens_param, "source_model": source_param} kwargs_constraints = {} multi_band_list = [[kwargs_data, kwargs_psf_gaussian, kwargs_numerics]] - kwargs_data_joint = {'multi_band_list': multi_band_list, - 'multi_band_type': 'multi-linear'} - - kwargs_likelihood = {'source_marg': False} + kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "multi-linear", + } - fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model, - kwargs_constraints, kwargs_likelihood, - kwargs_params) + kwargs_likelihood = {"source_marg": False} - kwargs_cobaya = {'proposal_widths': [0.001, 0.001, 0.001, 0.001, 0.001], - 'Rminus1_stop': 100, # does this need to be large? can we run in test mode? - 'force_overwrite': True} + fittingSequence = FittingSequence( + kwargs_data_joint, + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + ) + + kwargs_cobaya = { + "proposal_widths": [0.001, 0.001, 0.001, 0.001, 0.001], + "Rminus1_stop": 100, # does this need to be large? can we run in test mode? + "force_overwrite": True, + } - chain_list = fittingSequence.fit_sequence([['metropolis_hastings', kwargs_cobaya]]) + chain_list = fittingSequence.fit_sequence( + [["metropolis_hastings", kwargs_cobaya]] + ) def test_zeus(self): np.random.seed(42) @@ -292,69 +504,172 @@ def test_zeus(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) - kwargs_psf_gaussian = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3} + kwargs_psf_gaussian = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + "truncation": 3, + } psf_gaussian = PSF(**kwargs_psf_gaussian) # make a lens - lens_model_list = ['EPL'] - kwargs_epl = {'theta_E': 0.6, 'gamma': 2.6, 'center_x': 0.0, 'center_y': 0.0, 'e1': 0.1, 'e2': 0.1} + lens_model_list = ["EPL"] + kwargs_epl = { + "theta_E": 0.6, + "gamma": 2.6, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.1, + "e2": 0.1, + } kwargs_lens = [kwargs_epl] lens_model_class = LensModel(lens_model_list=lens_model_list) # make a source - source_model_list = ['SERSIC_ELLIPSE'] - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': 0.6, 'n_sersic': 3, 'center_x': 0.0, 'center_y': 0.0, - 'e1': 0.1, 'e2': 0.1} + source_model_list = ["SERSIC_ELLIPSE"] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 3, + "center_x": 0.0, + "center_y": 0.0, + "e1": 0.1, + "e2": 0.1, + } kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - kwargs_numerics = {'supersampling_factor': 1, 'supersampling_convolution': False} + kwargs_numerics = { + "supersampling_factor": 1, + "supersampling_convolution": False, + } - imageModel = ImageModel(data_class, psf_gaussian, lens_model_class, source_model_class, kwargs_numerics=kwargs_numerics) + imageModel = ImageModel( + data_class, + psf_gaussian, + lens_model_class, + source_model_class, + kwargs_numerics=kwargs_numerics, + ) image_sim = sim_util.simulate_simple(imageModel, kwargs_lens, kwargs_source) data_class.update_data(image_sim) - kwargs_data['image_data'] = image_sim + kwargs_data["image_data"] = image_sim - kwargs_model = {'lens_model_list': lens_model_list, - 'source_light_model_list': source_model_list} + kwargs_model = { + "lens_model_list": lens_model_list, + "source_light_model_list": source_model_list, + } lens_fixed = [{}] - lens_sigma = [{'theta_E': 0.1, 'gamma': 0.1, 'e1': 0.1, 'e2': 0.1, 'center_x': 0.1, 'center_y': 0.1}] - lens_lower = [{'theta_E': 0., 'gamma': 1.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}] - lens_upper = [{'theta_E': 10., 'gamma': 2.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}] + lens_sigma = [ + { + "theta_E": 0.1, + "gamma": 0.1, + "e1": 0.1, + "e2": 0.1, + "center_x": 0.1, + "center_y": 0.1, + } + ] + lens_lower = [ + { + "theta_E": 0.0, + "gamma": 1.5, + "center_x": -2, + "center_y": -2, + "e1": -0.4, + "e2": -0.4, + } + ] + lens_upper = [ + { + "theta_E": 10.0, + "gamma": 2.5, + "center_x": 2, + "center_y": 2, + "e1": 0.4, + "e2": 0.4, + } + ] source_fixed = [{}] - source_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1, 'e1': 0.1, 'e2': 0.1}] - source_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2, 'e1': -0.4, 'e2': -0.4}] - source_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2, 'e1': 0.4, 'e2': 0.4}] + source_sigma = [ + { + "R_sersic": 0.05, + "n_sersic": 0.5, + "center_x": 0.1, + "center_y": 0.1, + "e1": 0.1, + "e2": 0.1, + } + ] + source_lower = [ + { + "R_sersic": 0.01, + "n_sersic": 0.5, + "center_x": -2, + "center_y": -2, + "e1": -0.4, + "e2": -0.4, + } + ] + source_upper = [ + { + "R_sersic": 10, + "n_sersic": 5.5, + "center_x": 2, + "center_y": 2, + "e1": 0.4, + "e2": 0.4, + } + ] lens_param = [kwargs_lens, lens_sigma, lens_fixed, lens_lower, lens_upper] - source_param = [kwargs_source, source_sigma, source_fixed, source_lower, source_upper] + source_param = [ + kwargs_source, + source_sigma, + source_fixed, + source_lower, + source_upper, + ] - kwargs_params = {'lens_model': lens_param, - 'source_model': source_param} + kwargs_params = {"lens_model": lens_param, "source_model": source_param} kwargs_constraints = {} multi_band_list = [[kwargs_data, kwargs_psf_gaussian, kwargs_numerics]] - kwargs_data_joint = {'multi_band_list': multi_band_list, - 'multi_band_type': 'multi-linear'} + kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "multi-linear", + } - kwargs_likelihood = {'source_marg': False} + kwargs_likelihood = {"source_marg": False} - fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model, - kwargs_constraints, kwargs_likelihood, - kwargs_params) + fittingSequence = FittingSequence( + kwargs_data_joint, + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + ) fitting_list = [] - kwargs_zeus = {'sampler_type': 'ZEUS', 'n_burn': 2, 'n_run': 2, 'walkerRatio': 4, 'backend_filename': 'test_mcmc_zeus.h5'} + kwargs_zeus = { + "sampler_type": "ZEUS", + "n_burn": 2, + "n_run": 2, + "walkerRatio": 4, + "backend_filename": "test_mcmc_zeus.h5", + } - fitting_list.append(['MCMC', kwargs_zeus]) + fitting_list.append(["MCMC", kwargs_zeus]) chain_list = fittingSequence.fit_sequence(fitting_list) @@ -362,106 +677,140 @@ def test_multinest(self): # Nested sampler tests # further decrease the parameter space for nested samplers to run faster - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, self.kwargs_params) + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + self.kwargs_params, + ) fitting_list = [] - kwargs_update = {'ps_add_fixed': [[0, ['ra_source', 'dec_source'], [0, 0]]], - 'lens_light_add_fixed': [[0, ['n_sersic', 'R_sersic', 'center_x', 'center_y'], [4, .1, 0, 0]]], - 'source_add_fixed': [[0, ['R_sersic', 'e1', 'e2', 'center_x', 'center_y'], [.6, .1, .1, 0, 0]]], - 'lens_add_fixed': [[0, ['gamma', 'theta_E', 'e1', 'e2', 'center_x', 'center_y'], [1.8, 1., .1, .1, 0, 0]], - [1, ['gamma1', 'gamma2'], [0.01, 0.01]]], - 'change_source_lower_limit': [[0, ['n_sersic'], [2.9]]], - 'change_source_upper_limit': [[0, ['n_sersic'], [3.1]]] + kwargs_update = { + "ps_add_fixed": [[0, ["ra_source", "dec_source"], [0, 0]]], + "lens_light_add_fixed": [ + [0, ["n_sersic", "R_sersic", "center_x", "center_y"], [4, 0.1, 0, 0]] + ], + "source_add_fixed": [ + [ + 0, + ["R_sersic", "e1", "e2", "center_x", "center_y"], + [0.6, 0.1, 0.1, 0, 0], + ] + ], + "lens_add_fixed": [ + [ + 0, + ["gamma", "theta_E", "e1", "e2", "center_x", "center_y"], + [1.8, 1.0, 0.1, 0.1, 0, 0], + ], + [1, ["gamma1", "gamma2"], [0.01, 0.01]], + ], + "change_source_lower_limit": [[0, ["n_sersic"], [2.9]]], + "change_source_upper_limit": [[0, ["n_sersic"], [3.1]]], } - fitting_list.append(['update_settings', kwargs_update]) + fitting_list.append(["update_settings", kwargs_update]) kwargs_multinest = { - 'sampler_type': 'MULTINEST', - 'kwargs_run': { - 'n_live_points': 10, - 'evidence_tolerance': 0.5, - 'sampling_efficiency': 0.8, # 1 for posterior-only, 0 for evidence-only - 'importance_nested_sampling': False, - 'multimodal': True, - 'const_efficiency_mode': False, # reduce sampling_efficiency to 5% when True + "sampler_type": "MULTINEST", + "kwargs_run": { + "n_live_points": 10, + "evidence_tolerance": 0.5, + "sampling_efficiency": 0.8, # 1 for posterior-only, 0 for evidence-only + "importance_nested_sampling": False, + "multimodal": True, + "const_efficiency_mode": False, # reduce sampling_efficiency to 5% when True }, - 'remove_output_dir': True, + "remove_output_dir": True, } - fitting_list.append(['nested_sampling', kwargs_multinest]) + fitting_list.append(["nested_sampling", kwargs_multinest]) chain_list2 = fittingSequence.fit_sequence(fitting_list) kwargs_fixed = fittingSequence._updateManager.fixed_kwargs - npt.assert_almost_equal(kwargs_fixed[0][1]['gamma1'], 0.01, decimal=2) - assert fittingSequence._updateManager._lower_kwargs[1][0]['n_sersic'] == 2.9 - assert fittingSequence._updateManager._upper_kwargs[1][0]['n_sersic'] == 3.1 + npt.assert_almost_equal(kwargs_fixed[0][1]["gamma1"], 0.01, decimal=2) + assert fittingSequence._updateManager._lower_kwargs[1][0]["n_sersic"] == 2.9 + assert fittingSequence._updateManager._upper_kwargs[1][0]["n_sersic"] == 3.1 - kwargs_test = {'kwargs_lens': 1} + kwargs_test = {"kwargs_lens": 1} fittingSequence.update_state(kwargs_test) kwargs_out = fittingSequence.best_fit(bijective=True) - assert kwargs_out['kwargs_lens'] == 1 + assert kwargs_out["kwargs_lens"] == 1 def test_dynesty(self): np.random.seed(42) kwargs_params = copy.deepcopy(self.kwargs_params) - kwargs_params['lens_model'][0][0]['theta_E'] += 0.01 - kwargs_params['lens_model'][0][0]['gamma'] += 0.01 - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, kwargs_params) + kwargs_params["lens_model"][0][0]["theta_E"] += 0.01 + kwargs_params["lens_model"][0][0]["gamma"] += 0.01 + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + kwargs_params, + ) fitting_list = [] kwargs_dynesty = { - 'sampler_type': 'DYNESTY', - 'kwargs_run': { - 'dlogz_init': 0.01, - 'nlive_init': 20, - 'nlive_batch': 20, - 'maxbatch': 1, + "sampler_type": "DYNESTY", + "kwargs_run": { + "dlogz_init": 0.01, + "nlive_init": 20, + "nlive_batch": 20, + "maxbatch": 1, }, } - fitting_list.append(['nested_sampling', kwargs_dynesty]) + fitting_list.append(["nested_sampling", kwargs_dynesty]) chain_list = fittingSequence.fit_sequence(fitting_list) def test_nautilus(self): np.random.seed(42) kwargs_params = copy.deepcopy(self.kwargs_params) fittingSequence = FittingSequence( - self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, kwargs_params) + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + kwargs_params, + ) fitting_list = [] kwargs_nautilus = { - 'prior_type': 'uniform', - 'verbose': True, - 'f_live': 1.0, - 'n_eff': 0.0, - 'n_live': 2, - 'seed': 42 + "prior_type": "uniform", + "verbose": True, + "f_live": 1.0, + "n_eff": 0.0, + "n_live": 2, + "seed": 42, } - fitting_list.append(['Nautilus', kwargs_nautilus]) + fitting_list.append(["Nautilus", kwargs_nautilus]) chain_list = fittingSequence.fit_sequence(fitting_list) def test_dypolychord(self): - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, self.kwargs_params) + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + self.kwargs_params, + ) fitting_list = [] kwargs_dypolychord = { - 'sampler_type': 'DYPOLYCHORD', - 'kwargs_run': { - 'ninit': 8, - 'nlive_const': 10, + "sampler_type": "DYPOLYCHORD", + "kwargs_run": { + "ninit": 8, + "nlive_const": 10, #'seed_increment': 1, - 'resume_dyn_run': False, + "resume_dyn_run": False, #'init_step': 10, }, - 'polychord_settings': { - 'seed': 1, + "polychord_settings": { + "seed": 1, #'num_repeats': 20 }, - 'dypolychord_dynamic_goal': 0.8, # 1 for posterior-only, 0 for evidence-only - 'remove_output_dir': True, + "dypolychord_dynamic_goal": 0.8, # 1 for posterior-only, 0 for evidence-only + "remove_output_dir": True, } - fitting_list.append(['nested_sampling', kwargs_dypolychord]) + fitting_list.append(["nested_sampling", kwargs_dypolychord]) chain_list = fittingSequence.fit_sequence(fitting_list) def test_minimizer(self): @@ -469,39 +818,70 @@ def test_minimizer(self): n_i = 2 fitting_list = [] - kwargs_simplex = {'n_iterations': n_i, 'method': 'Nelder-Mead'} - fitting_list.append(['SIMPLEX', kwargs_simplex]) - kwargs_simplex = {'n_iterations': n_i, 'method': 'Powell'} - fitting_list.append(['SIMPLEX', kwargs_simplex]) - kwargs_pso = {'sigma_scale': 1, 'n_particles': n_p, 'n_iterations': n_i} - fitting_list.append(['PSO', kwargs_pso]) - kwargs_mcmc = {'sigma_scale': 1, 'n_burn': 1, 'n_run': 1, 'n_walkers': 10, 'sampler_type': 'EMCEE'} - fitting_list.append(['MCMC', kwargs_mcmc]) - kwargs_mcmc['re_use_samples'] = True - kwargs_mcmc['init_samples'] = np.array([[np.random.normal(1, 0.001)] for i in range(100)]) - fitting_list.append(['MCMC', kwargs_mcmc]) - - def custom_likelihood(kwargs_lens, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, - kwargs_special=None, kwargs_extinction=None): - theta_E = kwargs_lens[0]['theta_E'] - return -(theta_E - 1.)**2 / 0.1**2 / 2 - kwargs_likelihood = {'custom_logL_addition': custom_likelihood} - - kwargs_data_joint = {'multi_band_list': []} - kwargs_model = {'lens_model_list': ['SIS']} + kwargs_simplex = {"n_iterations": n_i, "method": "Nelder-Mead"} + fitting_list.append(["SIMPLEX", kwargs_simplex]) + kwargs_simplex = {"n_iterations": n_i, "method": "Powell"} + fitting_list.append(["SIMPLEX", kwargs_simplex]) + kwargs_pso = {"sigma_scale": 1, "n_particles": n_p, "n_iterations": n_i} + fitting_list.append(["PSO", kwargs_pso]) + kwargs_mcmc = { + "sigma_scale": 1, + "n_burn": 1, + "n_run": 1, + "n_walkers": 10, + "sampler_type": "EMCEE", + } + fitting_list.append(["MCMC", kwargs_mcmc]) + kwargs_mcmc["re_use_samples"] = True + kwargs_mcmc["init_samples"] = np.array( + [[np.random.normal(1, 0.001)] for i in range(100)] + ) + fitting_list.append(["MCMC", kwargs_mcmc]) + + def custom_likelihood( + kwargs_lens, + kwargs_source=None, + kwargs_lens_light=None, + kwargs_ps=None, + kwargs_special=None, + kwargs_extinction=None, + ): + theta_E = kwargs_lens[0]["theta_E"] + return -((theta_E - 1.0) ** 2) / 0.1**2 / 2 + + kwargs_likelihood = {"custom_logL_addition": custom_likelihood} + + kwargs_data_joint = {"multi_band_list": []} + kwargs_model = {"lens_model_list": ["SIS"]} kwargs_constraints = {} - lens_param = [{'theta_E': 1, 'center_x': 0, 'center_y': 0}], [{'theta_E': 0.1, 'center_x': 0.1, 'center_y': 0.1}], [{'center_x': 0, 'center_y': 0}], [{'theta_E': 0, 'center_x': -10, 'center_y': -10}], [{'theta_E': 10, 'center_x': 10, 'center_y': 10}] - - kwargs_params = {'lens_model': lens_param} - fittingSequence = FittingSequence(kwargs_data_joint, kwargs_model, kwargs_constraints, - kwargs_likelihood, kwargs_params) - args = fittingSequence.param_class.kwargs2args(kwargs_lens=[{'theta_E': 1, 'center_x': 0, 'center_y': 0}]) + lens_param = ( + [{"theta_E": 1, "center_x": 0, "center_y": 0}], + [{"theta_E": 0.1, "center_x": 0.1, "center_y": 0.1}], + [{"center_x": 0, "center_y": 0}], + [{"theta_E": 0, "center_x": -10, "center_y": -10}], + [{"theta_E": 10, "center_x": 10, "center_y": 10}], + ) + + kwargs_params = {"lens_model": lens_param} + fittingSequence = FittingSequence( + kwargs_data_joint, + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + ) + args = fittingSequence.param_class.kwargs2args( + kwargs_lens=[{"theta_E": 1, "center_x": 0, "center_y": 0}] + ) kwargs_result = fittingSequence.param_class.args2kwargs(args) print(kwargs_result) - print(args, 'test args') + print(args, "test args") chain_list = fittingSequence.fit_sequence(fitting_list) kwargs_result = fittingSequence.best_fit(bijective=False) - npt.assert_almost_equal(kwargs_result['kwargs_lens'][0]['theta_E'], 1, decimal=2) + npt.assert_almost_equal( + kwargs_result["kwargs_lens"][0]["theta_E"], 1, decimal=2 + ) + -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Workflow/test_flux_calibration.py b/test/test_Workflow/test_flux_calibration.py index f91c96980..7b42546bd 100644 --- a/test/test_Workflow/test_flux_calibration.py +++ b/test/test_Workflow/test_flux_calibration.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import copy @@ -13,7 +13,6 @@ class TestImageCalibration(object): - def setup(self): np.random.seed(41) # data specifics @@ -25,50 +24,96 @@ def setup(self): # PSF specification - self.kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + self.kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**self.kwargs_data) - kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': fwhm, 'pixel_size': deltaPix, 'truncation': 3} + kwargs_psf = { + "psf_type": "GAUSSIAN", + "fwhm": fwhm, + "pixel_size": deltaPix, + "truncation": 3, + } psf_class = PSF(**kwargs_psf) - kwargs_numerics = {'supersampling_factor': 1} + kwargs_numerics = {"supersampling_factor": 1} self.flux_scale_factor = 0.1 - self.kwargs_lens_light = [{'amp': 100., 'R_sersic': 0.5, 'n_sersic': 2, 'center_x': 0, 'center_y': 0}] - self.kwargs_lens_light2 = [{'amp': 100 * self.flux_scale_factor, 'R_sersic': 0.5, 'n_sersic': 2, 'center_x': 0, 'center_y': 0}] - lens_light_model_list = ['SERSIC'] + self.kwargs_lens_light = [ + {"amp": 100.0, "R_sersic": 0.5, "n_sersic": 2, "center_x": 0, "center_y": 0} + ] + self.kwargs_lens_light2 = [ + { + "amp": 100 * self.flux_scale_factor, + "R_sersic": 0.5, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } + ] + lens_light_model_list = ["SERSIC"] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - imageModel = ImageModel(data_class, psf_class, lens_light_model_class=lens_light_model_class) - image_sim = sim_util.simulate_simple(imageModel, kwargs_lens_light=self.kwargs_lens_light) - image_sim2 = sim_util.simulate_simple(imageModel, kwargs_lens_light=self.kwargs_lens_light2) - self.kwargs_data['image_data'] = image_sim + imageModel = ImageModel( + data_class, psf_class, lens_light_model_class=lens_light_model_class + ) + image_sim = sim_util.simulate_simple( + imageModel, kwargs_lens_light=self.kwargs_lens_light + ) + image_sim2 = sim_util.simulate_simple( + imageModel, kwargs_lens_light=self.kwargs_lens_light2 + ) + self.kwargs_data["image_data"] = image_sim self.kwargs_data2 = copy.deepcopy(self.kwargs_data) - self.kwargs_data2['image_data'] = image_sim2 + self.kwargs_data2["image_data"] = image_sim2 image_band1 = [self.kwargs_data, kwargs_psf, kwargs_numerics] image_band2 = [self.kwargs_data2, kwargs_psf, kwargs_numerics] multi_band_list = [image_band1, image_band2] - self.kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'joint-linear'} + self.kwargs_data_joint = { + "multi_band_list": multi_band_list, + "multi_band_type": "joint-linear", + } - self.kwargs_model = {'lens_light_model_list': lens_light_model_list} + self.kwargs_model = {"lens_light_model_list": lens_light_model_list} self.kwargs_constraints = {} self.kwargs_likelihood = {} - lens_light_sigma = [{'R_sersic': 0.05, 'n_sersic': 0.5, 'center_x': 0.1, 'center_y': 0.1}] - lens_light_lower = [{'R_sersic': 0.01, 'n_sersic': 0.5, 'center_x': -2, 'center_y': -2}] - lens_light_upper = [{'R_sersic': 10, 'n_sersic': 5.5, 'center_x': 2, 'center_y': 2}] - lens_light_param = self.kwargs_lens_light, lens_light_sigma, [{}], lens_light_lower, lens_light_upper - self.kwargs_params = {'lens_light_model': lens_light_param} + lens_light_sigma = [ + {"R_sersic": 0.05, "n_sersic": 0.5, "center_x": 0.1, "center_y": 0.1} + ] + lens_light_lower = [ + {"R_sersic": 0.01, "n_sersic": 0.5, "center_x": -2, "center_y": -2} + ] + lens_light_upper = [ + {"R_sersic": 10, "n_sersic": 5.5, "center_x": 2, "center_y": 2} + ] + lens_light_param = ( + self.kwargs_lens_light, + lens_light_sigma, + [{}], + lens_light_lower, + lens_light_upper, + ) + self.kwargs_params = {"lens_light_model": lens_light_param} def test_flux_calibration(self): - - fittingSequence = FittingSequence(self.kwargs_data_joint, self.kwargs_model, self.kwargs_constraints, - self.kwargs_likelihood, self.kwargs_params) + fittingSequence = FittingSequence( + self.kwargs_data_joint, + self.kwargs_model, + self.kwargs_constraints, + self.kwargs_likelihood, + self.kwargs_params, + ) fitting_list = [] - kwargs_calibrate = {'n_particles': 20, 'n_iterations': 40, 'calibrate_bands': [False, True]} - fitting_list.append(['calibrate_images', kwargs_calibrate]) + kwargs_calibrate = { + "n_particles": 20, + "n_iterations": 40, + "calibrate_bands": [False, True], + } + fitting_list.append(["calibrate_images", kwargs_calibrate]) chain_list = fittingSequence.fit_sequence(fitting_list) multi_band_list_new = fittingSequence.multi_band_list kwargs_data2_new = multi_band_list_new[1][0] - flux_scaling = kwargs_data2_new['flux_scaling'] + flux_scaling = kwargs_data2_new["flux_scaling"] npt.assert_almost_equal(flux_scaling, self.flux_scale_factor, decimal=1) diff --git a/test/test_Workflow/test_multiband_update_manager.py b/test/test_Workflow/test_multiband_update_manager.py index 43fb9dad9..6707f1d33 100644 --- a/test/test_Workflow/test_multiband_update_manager.py +++ b/test/test_Workflow/test_multiband_update_manager.py @@ -3,45 +3,74 @@ class TestUpdateManager(object): - def setup_method(self): - kwargs_model = {'lens_model_list': ['SHEAR', 'SHEAR'], 'source_light_model_list': ['UNIFORM'], 'index_lens_model_list': [[0], [1]]} - kwargs_constraints ={} + kwargs_model = { + "lens_model_list": ["SHEAR", "SHEAR"], + "source_light_model_list": ["UNIFORM"], + "index_lens_model_list": [[0], [1]], + } + kwargs_constraints = {} kwargs_likelihood = {} kwargs_params = {} - lens_init = [{'e1': 0, 'e2': 0, 'ra_0': 0, 'dec_0': 0}, {'e1': 0, 'e2': 0, 'ra_0': 0, 'dec_0': 0}] - lens_sigma = [{'e1': 0.1, 'e2': 0.1}, {'e1': 0.1, 'e2': 0.1}] - lens_fixed = [{'ra_0': 0, 'dec_0': 0}, {'ra_0': 0, 'dec_0': 0}] - lens_lower = [{'e1': -1, 'e2': -1}, {'e1': -1, 'e2': -1}] - lens_upper = [{'e1': 1, 'e2': 1}, {'e1': 1, 'e2': 1}] - kwargs_params['lens_model'] = [lens_init, lens_sigma, lens_fixed, lens_lower, lens_upper] - kwargs_params['source_model'] = [[{}], [{}], [{}], [{}], [{}]] - kwargs_params['special'] = [{'special1': 1}, {'special1': 1}, {'special1': 0.1}, {'special1': 0}, {'special1': 1}] - self.manager = MultiBandUpdateManager(kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params, num_bands=2) + lens_init = [ + {"e1": 0, "e2": 0, "ra_0": 0, "dec_0": 0}, + {"e1": 0, "e2": 0, "ra_0": 0, "dec_0": 0}, + ] + lens_sigma = [{"e1": 0.1, "e2": 0.1}, {"e1": 0.1, "e2": 0.1}] + lens_fixed = [{"ra_0": 0, "dec_0": 0}, {"ra_0": 0, "dec_0": 0}] + lens_lower = [{"e1": -1, "e2": -1}, {"e1": -1, "e2": -1}] + lens_upper = [{"e1": 1, "e2": 1}, {"e1": 1, "e2": 1}] + kwargs_params["lens_model"] = [ + lens_init, + lens_sigma, + lens_fixed, + lens_lower, + lens_upper, + ] + kwargs_params["source_model"] = [[{}], [{}], [{}], [{}], [{}]] + kwargs_params["special"] = [ + {"special1": 1}, + {"special1": 1}, + {"special1": 0.1}, + {"special1": 0}, + {"special1": 1}, + ] + self.manager = MultiBandUpdateManager( + kwargs_model, + kwargs_constraints, + kwargs_likelihood, + kwargs_params, + num_bands=2, + ) def test_none_mamager(self): - manager = MultiBandUpdateManager(kwargs_model={}, kwargs_constraints={}, kwargs_likelihood={}, kwargs_params={}, num_bands=0) + manager = MultiBandUpdateManager( + kwargs_model={}, + kwargs_constraints={}, + kwargs_likelihood={}, + kwargs_params={}, + num_bands=0, + ) results = manager.best_fit() - assert len(results['kwargs_lens']) == 0 + assert len(results["kwargs_lens"]) == 0 def test_keep_frame_fixed(self): frame_list_fixed = [0] - assert 'e1' not in self.manager._lens_fixed[0] + assert "e1" not in self.manager._lens_fixed[0] self.manager.keep_frame_fixed(frame_list_fixed) - assert 'e1' in self.manager._lens_fixed[0] + assert "e1" in self.manager._lens_fixed[0] self.manager.undo_frame_fixed(frame_list=[0]) - assert 'e1' not in self.manager._lens_fixed[0] - assert 'ra_0' in self.manager._lens_fixed[0] + assert "e1" not in self.manager._lens_fixed[0] + assert "ra_0" in self.manager._lens_fixed[0] def test_fix_not_computed(self): - self.manager.fix_not_computed(free_bands=[False, True]) print(self.manager._lens_fixed) - assert 'e1' in self.manager._lens_fixed[0] - assert 'ra_0' in self.manager._lens_fixed[0] - assert 'e1' not in self.manager._lens_fixed[1] + assert "e1" in self.manager._lens_fixed[0] + assert "ra_0" in self.manager._lens_fixed[0] + assert "e1" not in self.manager._lens_fixed[1] -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Workflow/test_psf_fitting.py b/test/test_Workflow/test_psf_fitting.py index 2895427f5..0d98f0256 100644 --- a/test/test_Workflow/test_psf_fitting.py +++ b/test/test_Workflow/test_psf_fitting.py @@ -1,4 +1,4 @@ -__author__ = 'sibirrer' +__author__ = "sibirrer" import pytest import numpy as np @@ -17,12 +17,9 @@ class TestPSFIteration(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" def setup_method(self): - # data specifics sigma_bkg = 0.01 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -32,89 +29,157 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) psf_error_map = np.zeros_like(kernel_point_source) - self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source, - 'psf_error_map': psf_error_map} + self.kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source, + "psf_error_map": psf_error_map, + } psf_class = PSF(**self.kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} - - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0, 'dec_source': 0.0, - 'source_amp': 10.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - - kwargs_numerics = {'supersampling_factor': 3, 'supersampling_convolution': False, 'compute_mode': 'regular', - 'point_source_supersampling_factor': 3} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0, "dec_source": 0.0, "source_amp": 10.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + + kwargs_numerics = { + "supersampling_factor": 3, + "supersampling_convolution": False, + "compute_mode": "regular", + "point_source_supersampling_factor": 3, + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - self.imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) + self.imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) self.psf_fitting = PsfFitting(self.imageModel) - self.kwargs_params = {'kwargs_lens': self.kwargs_lens, 'kwargs_source': self.kwargs_source, - 'kwargs_lens_light': self.kwargs_lens_light, 'kwargs_ps': self.kwargs_ps} + self.kwargs_params = { + "kwargs_lens": self.kwargs_lens, + "kwargs_source": self.kwargs_source, + "kwargs_lens_light": self.kwargs_lens_light, + "kwargs_ps": self.kwargs_ps, + } def test_update_psf(self): fwhm = 0.5 sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} - kwargs_psf_iter = {'stacking_method': 'median', 'error_map_radius': 0.5, 'new_procedure': True} + kwargs_psf_iter = { + "stacking_method": "median", + "error_map_radius": 0.5, + "new_procedure": True, + } - kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf(kwargs_psf, self.kwargs_params, **kwargs_psf_iter) + kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf( + kwargs_psf, self.kwargs_params, **kwargs_psf_iter + ) assert improved_bool - kernel_new = kwargs_psf_return['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + kernel_new = kwargs_psf_return["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new def test_calc_corner_mask(self): - kernel_old = np.ones((101,101)) + kernel_old = np.ones((101, 101)) nsymmetry = 4 corner_mask = self.psf_fitting.calc_cornermask(len(kernel_old), nsymmetry) assert corner_mask[corner_mask].size == 0 @@ -126,18 +191,37 @@ def test_calc_corner_mask(self): def test_combine_psf_corner(self): ## start kernel - kernel_old = np.ones((101,101)) + kernel_old = np.ones((101, 101)) test_updated_kernel = copy.deepcopy(kernel_old) ##allow the residuals to have different normaliztions - kernel_list_new = [test_updated_kernel*2, test_updated_kernel, test_updated_kernel*4, test_updated_kernel] + kernel_list_new = [ + test_updated_kernel * 2, + test_updated_kernel, + test_updated_kernel * 4, + test_updated_kernel, + ] nsymmetry = 6 corner_mask = self.psf_fitting.calc_cornermask(len(kernel_old), nsymmetry) - updated_psf = self.psf_fitting.combine_psf(kernel_list_new, kernel_old, factor=1., stacking_option='median', - symmetry=nsymmetry, corner_symmetry=1, corner_mask = corner_mask) + updated_psf = self.psf_fitting.combine_psf( + kernel_list_new, + kernel_old, + factor=1.0, + stacking_option="median", + symmetry=nsymmetry, + corner_symmetry=1, + corner_mask=corner_mask, + ) ##maybe a better criteria here for floats? - assert abs(updated_psf.max()-updated_psf.min())<1e-10 - updated_psf = self.psf_fitting.combine_psf(kernel_list_new, kernel_old, factor=1., stacking_option='median', - symmetry=nsymmetry, corner_symmetry=2,corner_mask = corner_mask) + assert abs(updated_psf.max() - updated_psf.min()) < 1e-10 + updated_psf = self.psf_fitting.combine_psf( + kernel_list_new, + kernel_old, + factor=1.0, + stacking_option="median", + symmetry=nsymmetry, + corner_symmetry=2, + corner_mask=corner_mask, + ) assert abs(updated_psf.max() - updated_psf.min()) < 1e-10 def test_update_iterative(self): @@ -145,57 +229,73 @@ def test_update_iterative(self): sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source} - kwargs_psf_iter = {'stacking_method': 'median', 'psf_symmetry': 2, 'psf_iter_factor': 0.2, - 'block_center_neighbour': 0.1, 'error_map_radius': 0.5, 'new_procedure': True} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} + kwargs_psf_iter = { + "stacking_method": "median", + "psf_symmetry": 2, + "psf_iter_factor": 0.2, + "block_center_neighbour": 0.1, + "error_map_radius": 0.5, + "new_procedure": True, + } kwargs_params = copy.deepcopy(self.kwargs_params) - kwargs_ps = kwargs_params['kwargs_ps'] - del kwargs_ps[0]['source_amp'] - print(kwargs_params['kwargs_ps']) - kwargs_psf_new = self.psf_fitting.update_iterative(kwargs_psf, kwargs_params, - **kwargs_psf_iter) - kernel_new = kwargs_psf_new['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + kwargs_ps = kwargs_params["kwargs_ps"] + del kwargs_ps[0]["source_amp"] + print(kwargs_params["kwargs_ps"]) + kwargs_psf_new = self.psf_fitting.update_iterative( + kwargs_psf, kwargs_params, **kwargs_psf_iter + ) + kernel_new = kwargs_psf_new["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new assert diff_new < 0.01 - assert 'psf_error_map' in kwargs_psf_new - - kwargs_psf_new = self.psf_fitting.update_iterative(kwargs_psf, kwargs_params, num_iter=3, - no_break=True, keep_psf_error_map=True) - kernel_new = kwargs_psf_new['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + assert "psf_error_map" in kwargs_psf_new + + kwargs_psf_new = self.psf_fitting.update_iterative( + kwargs_psf, + kwargs_params, + num_iter=3, + no_break=True, + keep_psf_error_map=True, + ) + kernel_new = kwargs_psf_new["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new assert diff_new < 0.01 def test_mask_point_source(self): - ra_image, dec_image, amp = self.imageModel.PointSource.point_source_list(self.kwargs_ps, self.kwargs_lens) + ra_image, dec_image, amp = self.imageModel.PointSource.point_source_list( + self.kwargs_ps, self.kwargs_lens + ) print(ra_image, dec_image, amp) x_grid, y_grid = self.imageModel.Data.pixel_coordinates x_grid = util.image2array(x_grid) y_grid = util.image2array(y_grid) radius = 0.5 - mask_point_source = self.psf_fitting.mask_point_source(ra_image, dec_image, x_grid, y_grid, radius, i=0) + mask_point_source = self.psf_fitting.mask_point_source( + ra_image, dec_image, x_grid, y_grid, radius, i=0 + ) assert mask_point_source[10, 10] == 1 class TestPSFIterationOld(object): - """ - tests the source model routines - """ + """Tests the source model routines.""" def setup_method(self): - # data specifics sigma_bkg = 0.01 # background noise per pixel exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit) @@ -205,83 +305,151 @@ def setup_method(self): # PSF specification - kwargs_data = sim_util.data_configure_simple(numPix, deltaPix, exp_time, sigma_bkg) + kwargs_data = sim_util.data_configure_simple( + numPix, deltaPix, exp_time, sigma_bkg + ) data_class = ImageData(**kwargs_data) sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) psf_error_map = np.zeros_like(kernel_point_source) - self.kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source, - 'psf_error_map': psf_error_map} + self.kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source, + "psf_error_map": psf_error_map, + } psf_class = PSF(**self.kwargs_psf) # 'EXERNAL_SHEAR': external shear - kwargs_shear = {'gamma1': 0.01, 'gamma2': 0.01} # gamma_ext: shear strength, psi_ext: shear angel (in radian) + kwargs_shear = { + "gamma1": 0.01, + "gamma2": 0.01, + } # gamma_ext: shear strength, psi_ext: shear angel (in radian) phi, q = 0.2, 0.8 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_spemd = {'theta_E': 1., 'gamma': 1.8, 'center_x': 0, 'center_y': 0, 'e1': e1, 'e2': e2} - - lens_model_list = ['SPEP', 'SHEAR'] + kwargs_spemd = { + "theta_E": 1.0, + "gamma": 1.8, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_model_list = ["SPEP", "SHEAR"] self.kwargs_lens = [kwargs_spemd, kwargs_shear] lens_model_class = LensModel(lens_model_list=lens_model_list) # list of light profiles (for lens and source) # 'SERSIC': spherical Sersic profile - kwargs_sersic = {'amp': 1., 'R_sersic': 0.1, 'n_sersic': 2, 'center_x': 0, 'center_y': 0} + kwargs_sersic = { + "amp": 1.0, + "R_sersic": 0.1, + "n_sersic": 2, + "center_x": 0, + "center_y": 0, + } # 'SERSIC_ELLIPSE': elliptical Sersic profile phi, q = 0.2, 0.9 e1, e2 = param_util.phi_q2_ellipticity(phi, q) - kwargs_sersic_ellipse = {'amp': 1., 'R_sersic': .6, 'n_sersic': 7, 'center_x': 0, 'center_y': 0, - 'e1': e1, 'e2': e2} - - lens_light_model_list = ['SERSIC'] + kwargs_sersic_ellipse = { + "amp": 1.0, + "R_sersic": 0.6, + "n_sersic": 7, + "center_x": 0, + "center_y": 0, + "e1": e1, + "e2": e2, + } + + lens_light_model_list = ["SERSIC"] self.kwargs_lens_light = [kwargs_sersic] lens_light_model_class = LightModel(light_model_list=lens_light_model_list) - source_model_list = ['SERSIC_ELLIPSE'] + source_model_list = ["SERSIC_ELLIPSE"] self.kwargs_source = [kwargs_sersic_ellipse] source_model_class = LightModel(light_model_list=source_model_list) - self.kwargs_ps = [{'ra_source': 0.0, 'dec_source': 0.0, - 'source_amp': 10.}] # quasar point source position in the source plane and intrinsic brightness - point_source_class = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[True]) - - kwargs_numerics = {'supersampling_factor': 3, 'supersampling_convolution': False, 'compute_mode': 'regular', - 'point_source_supersampling_factor': 3} - imageModel = ImageModel(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) - image_sim = sim_util.simulate_simple(imageModel, self.kwargs_lens, self.kwargs_source, - self.kwargs_lens_light, self.kwargs_ps) + self.kwargs_ps = [ + {"ra_source": 0.0, "dec_source": 0.0, "source_amp": 10.0} + ] # quasar point source position in the source plane and intrinsic brightness + point_source_class = PointSource( + point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True] + ) + + kwargs_numerics = { + "supersampling_factor": 3, + "supersampling_convolution": False, + "compute_mode": "regular", + "point_source_supersampling_factor": 3, + } + imageModel = ImageModel( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) + image_sim = sim_util.simulate_simple( + imageModel, + self.kwargs_lens, + self.kwargs_source, + self.kwargs_lens_light, + self.kwargs_ps, + ) data_class.update_data(image_sim) - self.imageModel = ImageLinearFit(data_class, psf_class, lens_model_class, source_model_class, - lens_light_model_class, - point_source_class, kwargs_numerics=kwargs_numerics) + self.imageModel = ImageLinearFit( + data_class, + psf_class, + lens_model_class, + source_model_class, + lens_light_model_class, + point_source_class, + kwargs_numerics=kwargs_numerics, + ) self.psf_fitting = PsfFitting(self.imageModel) - self.kwargs_params = {'kwargs_lens': self.kwargs_lens, 'kwargs_source': self.kwargs_source, - 'kwargs_lens_light': self.kwargs_lens_light, 'kwargs_ps': self.kwargs_ps} + self.kwargs_params = { + "kwargs_lens": self.kwargs_lens, + "kwargs_source": self.kwargs_source, + "kwargs_lens_light": self.kwargs_lens_light, + "kwargs_ps": self.kwargs_ps, + } def test_update_psf(self): fwhm = 0.5 sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source} + kwargs_psf = {"psf_type": "PIXEL", "kernel_point_source": kernel_point_source} - kwargs_psf_iter = {'stacking_method': 'median', 'error_map_radius': 0.5, 'new_procedure': False} + kwargs_psf_iter = { + "stacking_method": "median", + "error_map_radius": 0.5, + "new_procedure": False, + } - kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf(kwargs_psf, self.kwargs_params, **kwargs_psf_iter) + kwargs_psf_return, improved_bool, error_map = self.psf_fitting.update_psf( + kwargs_psf, self.kwargs_params, **kwargs_psf_iter + ) assert improved_bool - kernel_new = kwargs_psf_return['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + kernel_new = kwargs_psf_return["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new @@ -291,42 +459,61 @@ def test_update_iterative(self): sigma = util.fwhm2sigma(fwhm) x_grid, y_grid = util.make_grid(numPix=31, deltapix=0.05) from lenstronomy.LightModel.Profiles.gaussian import Gaussian + gaussian = Gaussian() - kernel_point_source = gaussian.function(x_grid, y_grid, amp=1., sigma=sigma, center_x=0, center_y=0) + kernel_point_source = gaussian.function( + x_grid, y_grid, amp=1.0, sigma=sigma, center_x=0, center_y=0 + ) kernel_point_source /= np.sum(kernel_point_source) kernel_point_source = util.array2image(kernel_point_source) - kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': kernel_point_source, - 'kernel_point_source_init': kernel_point_source} - kwargs_psf_iter = {'stacking_method': 'median', 'psf_symmetry': 2, 'psf_iter_factor': 0.2, - 'block_center_neighbour': 0.1, 'error_map_radius': 0.5, 'new_procedure': False, - 'no_break': False, 'verbose': True, 'keep_psf_error_map': False} + kwargs_psf = { + "psf_type": "PIXEL", + "kernel_point_source": kernel_point_source, + "kernel_point_source_init": kernel_point_source, + } + kwargs_psf_iter = { + "stacking_method": "median", + "psf_symmetry": 2, + "psf_iter_factor": 0.2, + "block_center_neighbour": 0.1, + "error_map_radius": 0.5, + "new_procedure": False, + "no_break": False, + "verbose": True, + "keep_psf_error_map": False, + } kwargs_params = copy.deepcopy(self.kwargs_params) - kwargs_ps = kwargs_params['kwargs_ps'] - del kwargs_ps[0]['source_amp'] - print(kwargs_params['kwargs_ps']) - kwargs_psf_new = self.psf_fitting.update_iterative(kwargs_psf, kwargs_params, - **kwargs_psf_iter) - kernel_new = kwargs_psf_new['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + kwargs_ps = kwargs_params["kwargs_ps"] + del kwargs_ps[0]["source_amp"] + print(kwargs_params["kwargs_ps"]) + kwargs_psf_new = self.psf_fitting.update_iterative( + kwargs_psf, kwargs_params, **kwargs_psf_iter + ) + kernel_new = kwargs_psf_new["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new assert diff_new < 0.01 - assert 'psf_error_map' in kwargs_psf_new - - kwargs_psf_new = self.psf_fitting.update_iterative(kwargs_psf, kwargs_params, num_iter=3, - no_break=True, keep_psf_error_map=True) - kernel_new = kwargs_psf_new['kernel_point_source'] - kernel_true = self.kwargs_psf['kernel_point_source'] - kernel_old = kwargs_psf['kernel_point_source'] + assert "psf_error_map" in kwargs_psf_new + + kwargs_psf_new = self.psf_fitting.update_iterative( + kwargs_psf, + kwargs_params, + num_iter=3, + no_break=True, + keep_psf_error_map=True, + ) + kernel_new = kwargs_psf_new["kernel_point_source"] + kernel_true = self.kwargs_psf["kernel_point_source"] + kernel_old = kwargs_psf["kernel_point_source"] diff_old = np.sum((kernel_old - kernel_true) ** 2) diff_new = np.sum((kernel_new - kernel_true) ** 2) assert diff_old > diff_new assert diff_new < 0.01 - -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/test/test_Workflow/test_update_manager.py b/test/test_Workflow/test_update_manager.py index 3cb81390a..d60985342 100644 --- a/test/test_Workflow/test_update_manager.py +++ b/test/test_Workflow/test_update_manager.py @@ -4,86 +4,125 @@ class TestUpdateManager(object): - def setup_method(self): - kwargs_model = {'lens_model_list': ['SHEAR', 'SHEAR'], 'source_light_model_list': ['UNIFORM'], - 'lens_light_model_list': ['UNIFORM'], - 'optical_depth_model_list': []} - kwargs_constraints ={} + kwargs_model = { + "lens_model_list": ["SHEAR", "SHEAR"], + "source_light_model_list": ["UNIFORM"], + "lens_light_model_list": ["UNIFORM"], + "optical_depth_model_list": [], + } + kwargs_constraints = {} kwargs_likelihood = {} kwargs_params = {} - lens_init = [{'e1': 0, 'e2': 0}, {'e1': 0, 'e2': 0}] - lens_sigma = [{'e1': 0.1, 'e2': 0.1}, {'e1': 0.1, 'e2': 0.1}] - lens_fixed = [{'ra_0': 0, 'dec_0': 0}, {'ra_0': 0, 'dec_0': 0}] - lens_lower = [{'e1': -1, 'e2': -1}, {'e1': -1, 'e2': -1}] - lens_upper = [{'e1': 1, 'e2': 1}, {'e1': 1, 'e2': 1}] - kwargs_params['lens_model'] = [lens_init, lens_sigma, lens_fixed, lens_lower, lens_upper] - kwargs_params['source_model'] = [[{}], [{}], [{}], [{}], [{}]] - kwargs_params['lens_light_model'] = [[{}], [{}], [{}], [{}], [{}]] - kwargs_params['special'] = [{'special1': 1}, {'special1': 1}, {'special1': 0.1}, {'special1': 0}, {'special1': 1}] - kwargs_params['extinction_model'] = [[], [], [], [], []] - self.manager = UpdateManager(kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) + lens_init = [{"e1": 0, "e2": 0}, {"e1": 0, "e2": 0}] + lens_sigma = [{"e1": 0.1, "e2": 0.1}, {"e1": 0.1, "e2": 0.1}] + lens_fixed = [{"ra_0": 0, "dec_0": 0}, {"ra_0": 0, "dec_0": 0}] + lens_lower = [{"e1": -1, "e2": -1}, {"e1": -1, "e2": -1}] + lens_upper = [{"e1": 1, "e2": 1}, {"e1": 1, "e2": 1}] + kwargs_params["lens_model"] = [ + lens_init, + lens_sigma, + lens_fixed, + lens_lower, + lens_upper, + ] + kwargs_params["source_model"] = [[{}], [{}], [{}], [{}], [{}]] + kwargs_params["lens_light_model"] = [[{}], [{}], [{}], [{}], [{}]] + kwargs_params["special"] = [ + {"special1": 1}, + {"special1": 1}, + {"special1": 0.1}, + {"special1": 0}, + {"special1": 1}, + ] + kwargs_params["extinction_model"] = [[], [], [], [], []] + self.manager = UpdateManager( + kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params + ) def test_none_mamager(self): - manager = MultiBandUpdateManager(kwargs_model={}, kwargs_constraints={}, kwargs_likelihood={}, kwargs_params={}, num_bands=0) + manager = MultiBandUpdateManager( + kwargs_model={}, + kwargs_constraints={}, + kwargs_likelihood={}, + kwargs_params={}, + num_bands=0, + ) results = manager.best_fit() - assert len(results['kwargs_lens']) == 0 + assert len(results["kwargs_lens"]) == 0 def test_init_kwargs(self): kwargs_init = self.manager.init_kwargs - assert kwargs_init['kwargs_lens'][0]['e1'] == 0 + assert kwargs_init["kwargs_lens"][0]["e1"] == 0 def test_sigma_kwargs(self): kwargs_sigma = self.manager.sigma_kwargs - assert kwargs_sigma['kwargs_lens'][0]['e1'] == 0.1 + assert kwargs_sigma["kwargs_lens"][0]["e1"] == 0.1 def test_update_kwargs_model(self): - kwargs_model = {'lens_model_list': ['SHEAR', 'SHEAR'], - 'source_light_model_list': ['UNIFORM'], - 'lens_light_model_list': ['UNIFORM'], - 'optical_depth_model_list': [], - 'multi_plane': True, - 'z_source': 2., - 'lens_redshift_list': [0.5, 0.6], - 'source_redshift_list': [2.], - } - kwargs_constraints = {'lens_redshift_sampling_indexes': [-1, 1], - 'source_redshift_sampling_indexes': [0] - } + kwargs_model = { + "lens_model_list": ["SHEAR", "SHEAR"], + "source_light_model_list": ["UNIFORM"], + "lens_light_model_list": ["UNIFORM"], + "optical_depth_model_list": [], + "multi_plane": True, + "z_source": 2.0, + "lens_redshift_list": [0.5, 0.6], + "source_redshift_list": [2.0], + } + kwargs_constraints = { + "lens_redshift_sampling_indexes": [-1, 1], + "source_redshift_sampling_indexes": [0], + } kwargs_likelihood = {} kwargs_params = {} - lens_init = [{'e1': 0, 'e2': 0}, {'e1': 0, 'e2': 0}] - lens_sigma = [{'e1': 0.1, 'e2': 0.1}, {'e1': 0.1, 'e2': 0.1}] - lens_fixed = [{'ra_0': 0, 'dec_0': 0}, {'ra_0': 0, 'dec_0': 0}] - lens_lower = [{'e1': -1, 'e2': -1}, {'e1': -1, 'e2': -1}] - lens_upper = [{'e1': 1, 'e2': 1}, {'e1': 1, 'e2': 1}] - kwargs_params['lens_model'] = [lens_init, lens_sigma, lens_fixed, lens_lower, lens_upper] - kwargs_params['source_model'] = [[{}], [{}], [{}], [{}], [{}]] - kwargs_params['lens_light_model'] = [[{}], [{}], [{}], [{}], [{}]] - kwargs_params['special'] = [{'special1': 1}, {'special1': 1}, {'special1': 0.1}, {'special1': 0}, - {'special1': 1}] - kwargs_params['extinction_model'] = [[], [], [], [], []] - - manager = UpdateManager(kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params) - - kwargs_special = {'z_sampling': [1.9, 0.7]} + lens_init = [{"e1": 0, "e2": 0}, {"e1": 0, "e2": 0}] + lens_sigma = [{"e1": 0.1, "e2": 0.1}, {"e1": 0.1, "e2": 0.1}] + lens_fixed = [{"ra_0": 0, "dec_0": 0}, {"ra_0": 0, "dec_0": 0}] + lens_lower = [{"e1": -1, "e2": -1}, {"e1": -1, "e2": -1}] + lens_upper = [{"e1": 1, "e2": 1}, {"e1": 1, "e2": 1}] + kwargs_params["lens_model"] = [ + lens_init, + lens_sigma, + lens_fixed, + lens_lower, + lens_upper, + ] + kwargs_params["source_model"] = [[{}], [{}], [{}], [{}], [{}]] + kwargs_params["lens_light_model"] = [[{}], [{}], [{}], [{}], [{}]] + kwargs_params["special"] = [ + {"special1": 1}, + {"special1": 1}, + {"special1": 0.1}, + {"special1": 0}, + {"special1": 1}, + ] + kwargs_params["extinction_model"] = [[], [], [], [], []] + + manager = UpdateManager( + kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params + ) + + kwargs_special = {"z_sampling": [1.9, 0.7]} manager.update_kwargs_model(kwargs_special) - assert manager.kwargs_model['lens_redshift_list'] == [0.5, 0.7] - assert manager.kwargs_model['source_redshift_list'] == [1.9] + assert manager.kwargs_model["lens_redshift_list"] == [0.5, 0.7] + assert manager.kwargs_model["source_redshift_list"] == [1.9] def test_update_parameter_state(self): - self.manager.update_param_state(kwargs_lens=[{'e1': -2, 'e2': 0}, {'e1': 2, 'e2': 0}]) + self.manager.update_param_state( + kwargs_lens=[{"e1": -2, "e2": 0}, {"e1": 2, "e2": 0}] + ) kwargs_temp = self.manager.parameter_state - assert kwargs_temp['kwargs_lens'][0]['e1'] == -2 + assert kwargs_temp["kwargs_lens"][0]["e1"] == -2 self.manager.set_init_state() kwargs_temp = self.manager.parameter_state - assert kwargs_temp['kwargs_lens'][0]['e1'] == 0 + assert kwargs_temp["kwargs_lens"][0]["e1"] == 0 def test_update_param_value(self): - self.manager.update_param_value(lens=[[1, ['e1'], [0.029]]]) + self.manager.update_param_value(lens=[[1, ["e1"], [0.029]]]) kwargs_temp = self.manager.parameter_state - assert kwargs_temp['kwargs_lens'][1]['e1'] == 0.029 + assert kwargs_temp["kwargs_lens"][1]["e1"] == 0.029 def test_param_class(self): param_class = self.manager.param_class @@ -92,62 +131,84 @@ def test_param_class(self): def test_best_fit(self): kwargs_result = self.manager.best_fit(bijective=True) - assert kwargs_result['kwargs_lens'][0]['e1'] == 0 + assert kwargs_result["kwargs_lens"][0]["e1"] == 0 def test_update_options(self): - self.manager.update_options(kwargs_model=None, kwargs_constraints={'test': 'test'}, kwargs_likelihood=None) - assert self.manager.kwargs_constraints['test'] == 'test' - - self.manager.update_options(kwargs_model={'test': 'test'}, kwargs_constraints=None, kwargs_likelihood=None) - assert self.manager.kwargs_model['test'] == 'test' + self.manager.update_options( + kwargs_model=None, + kwargs_constraints={"test": "test"}, + kwargs_likelihood=None, + ) + assert self.manager.kwargs_constraints["test"] == "test" + + self.manager.update_options( + kwargs_model={"test": "test"}, + kwargs_constraints=None, + kwargs_likelihood=None, + ) + assert self.manager.kwargs_model["test"] == "test" def test_update_limits(self): - self.manager.update_limits(change_source_lower_limit=[[0, ['test'], [-1]]], change_source_upper_limit=[[0, ['test'], [1]]]) - self.manager.update_limits(change_lens_lower_limit=[[0, ['e1'], [-0.9]]], change_lens_upper_limit=[[0, ['e1'], [0.9]]]) + self.manager.update_limits( + change_source_lower_limit=[[0, ["test"], [-1]]], + change_source_upper_limit=[[0, ["test"], [1]]], + ) + self.manager.update_limits( + change_lens_lower_limit=[[0, ["e1"], [-0.9]]], + change_lens_upper_limit=[[0, ["e1"], [0.9]]], + ) upper_lens, upper_source, _, _, _, _ = self.manager._upper_kwargs - assert upper_source[0]['test'] == 1 - assert upper_lens[0]['e1'] == 0.9 + assert upper_source[0]["test"] == 1 + assert upper_lens[0]["e1"] == 0.9 def test_update_sigmas(self): - self.manager.update_sigmas(change_sigma_source=[[0, ['test'], [1]]], - change_sigma_lens=[[0, ['test'], [2]]]) - self.manager.update_sigmas(change_sigma_lens_light=[[0, ['e1'], [-0.9]]], - change_sigma_lens=[[0, ['e1'], [0.9]]]) + self.manager.update_sigmas( + change_sigma_source=[[0, ["test"], [1]]], + change_sigma_lens=[[0, ["test"], [2]]], + ) + self.manager.update_sigmas( + change_sigma_lens_light=[[0, ["e1"], [-0.9]]], + change_sigma_lens=[[0, ["e1"], [0.9]]], + ) upper_lens, upper_source, _, _, _, _ = self.manager._upper_kwargs - assert self.manager._lens_sigma[0]['test'] == 2 - assert self.manager._lens_sigma[0]['e1'] == 0.9 + assert self.manager._lens_sigma[0]["test"] == 2 + assert self.manager._lens_sigma[0]["e1"] == 0.9 def test_update_fixed(self): - lens_add_fixed = [[0, ['e1'], [-1]]] + lens_add_fixed = [[0, ["e1"], [-1]]] self.manager.update_fixed(lens_add_fixed=lens_add_fixed) - assert self.manager._lens_fixed[0]['e1'] == -1 + assert self.manager._lens_fixed[0]["e1"] == -1 - lens_add_fixed = [[0, ['e2']]] + lens_add_fixed = [[0, ["e2"]]] self.manager.update_fixed(lens_add_fixed=lens_add_fixed) - assert self.manager._lens_fixed[0]['e2'] == 0 + assert self.manager._lens_fixed[0]["e2"] == 0 - lens_remove_fixed = [[0, ['e1']]] + lens_remove_fixed = [[0, ["e1"]]] self.manager.update_fixed(lens_remove_fixed=lens_remove_fixed) - assert 'e1' not in self.manager._lens_fixed[0] + assert "e1" not in self.manager._lens_fixed[0] - assert 'special1' in self.manager._special_fixed - self.manager.update_fixed(special_remove_fixed=['special1']) - assert 'special1' not in self.manager._special_fixed + assert "special1" in self.manager._special_fixed + self.manager.update_fixed(special_remove_fixed=["special1"]) + assert "special1" not in self.manager._special_fixed - self.manager.update_fixed(special_add_fixed=['special1']) - assert self.manager._special_fixed['special1'] == 1 + self.manager.update_fixed(special_add_fixed=["special1"]) + assert self.manager._special_fixed["special1"] == 1 - self.manager.update_fixed(special_add_fixed=['special1']) - assert self.manager._special_fixed['special1'] == 1 + self.manager.update_fixed(special_add_fixed=["special1"]) + assert self.manager._special_fixed["special1"] == 1 def test_update_logsampling(self): - self.manager.update_options(kwargs_model={}, kwargs_constraints={'log_sampling_lens': [[0, ['e1']]]}, kwargs_likelihood = {}) - assert self.manager.param_class.lensParams.kwargs_logsampling[0] == ['e1'] + self.manager.update_options( + kwargs_model={}, + kwargs_constraints={"log_sampling_lens": [[0, ["e1"]]]}, + kwargs_likelihood={}, + ) + assert self.manager.param_class.lensParams.kwargs_logsampling[0] == ["e1"] def test_fix_image_parameters(self): self.manager.fix_image_parameters(image_index=0) assert 1 == 1 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main()