diff --git a/flows/__init__.py b/flows/__init__.py index e8fc4ed..ad350f3 100644 --- a/flows/__init__.py +++ b/flows/__init__.py @@ -1,7 +1,7 @@ """ FLOWS pipeline package """ -from .photometry import photometry +from .photometry import do_phot as photometry from .catalogs import download_catalog from .visibility import visibility from .version import get_version diff --git a/flows/epsfbuilder/__init__.py b/flows/epsfbuilder/__init__.py index dcd2755..4369e44 100644 --- a/flows/epsfbuilder/__init__.py +++ b/flows/epsfbuilder/__init__.py @@ -2,4 +2,4 @@ # -*- coding: utf-8 -*- # flake8: noqa -from .epsfbuilder import FlowsEPSFBuilder +from .epsfbuilder import FlowsEPSFBuilder, verify_epsf diff --git a/flows/epsfbuilder/epsfbuilder.py b/flows/epsfbuilder/epsfbuilder.py index d8f8ebd..e49bb27 100644 --- a/flows/epsfbuilder/epsfbuilder.py +++ b/flows/epsfbuilder/epsfbuilder.py @@ -7,8 +7,12 @@ """ import time import numpy as np -from scipy.interpolate import griddata +from scipy.interpolate import griddata, UnivariateSpline import photutils.psf +from typing import List, Tuple +import logging + +logger = logging.getLogger(__name__) class FlowsEPSFBuilder(photutils.psf.EPSFBuilder): @@ -51,3 +55,31 @@ def __call__(self, *args, **kwargs): epsf.fit_info = dict(n_iter=len(self._epsf), max_iters=self.maxiters, time=time.time() - t0, ) return epsf, stars + + +def verify_epsf(epsf: photutils.psf.EPSFBuilder) -> Tuple[bool, List[float]]: + fwhms = [] + epsf_ok = True + for a in (0, 1): + # Collapse the PDF along this axis: + profile = epsf.data.sum(axis=a) + itop = profile.argmax() + poffset = profile[itop] / 2 + + # Run a spline through the points, but subtract half of the peak value, and find the roots: + # We have to use a cubic spline, since roots() is not supported for other splines + # for some reason + profile_intp = UnivariateSpline(np.arange(0, len(profile)), profile - poffset, k=3, s=0, ext=3) + lr = profile_intp.roots() + + # Do some sanity checks on the ePSF: + # It should pass 50% exactly twice and have the maximum inside that region. + # I.e. it should be a single gaussian-like peak + if len(lr) != 2 or itop < lr[0] or itop > lr[1]: + logger.error(f"EPSF is not a single gaussian-like peak along axis {a}") + epsf_ok = False + else: + axis_fwhm = lr[1] - lr[0] + fwhms.append(axis_fwhm) + + return epsf_ok, fwhms diff --git a/flows/fileio.py b/flows/fileio.py new file mode 100644 index 0000000..f24dc02 --- /dev/null +++ b/flows/fileio.py @@ -0,0 +1,80 @@ +import os +import logging +from typing import Optional, Protocol +from configparser import ConfigParser + +logger = logging.getLogger(__name__) + + +class DirectoryProtocol(Protocol): + archive_local: str + output_folder: str + + def set_output_dirs(self, target_name: str, fileid: int) -> None: + ... + + def image_path(self, image_path: str) -> str: + ... + + @property + def photometry_path(self) -> str: + ... + + +class Directories: + """ + Class for creating input and output directories, given a configparser instance. + Overwrite archive_local or output_folder to manually place elsewhere. + """ + archive_local: Optional[str] = None + output_folder: Optional[str] = None + + def __init__(self, config: ConfigParser): + self.config = config + + def set_output_dirs(self, target_name: str, fileid: int) -> None: + """ + The function is meant to be called from within a context where a + target_name and fileid are defined, so that the output_folder + can be created appropriately. + + Parameters: + target_name (str): Target name. + fileid (int): The fileid of the file being processed + """ + + # Checking for None allows manual declarations to not be overwritten. + if self.archive_local is None: + self.archive_local = self._set_archive() + + self.output_folder = self._set_output(target_name, fileid) + + # Create output folder if necessary. + os.makedirs(self.output_folder, exist_ok=True) + logger.info("Placing output in '%s'", self.output_folder) + + def _set_archive(self) -> str: + archive_local = self.config.get('photometry', 'archive_local', fallback=None) + if archive_local is not None and not os.path.isdir(archive_local): + raise FileNotFoundError("ARCHIVE is not available: " + archive_local) + logger.info(f"Using data from: {archive_local}.") + return archive_local + + def _set_output(self, target_name: str, fileid: int) -> str: + """ + Directory for output, defaults to current + directory if config is invalid or empty. + """ + output_folder_root = self.config.get('photometry', 'output', fallback='.') + output_folder = os.path.join(output_folder_root, target_name, f'{fileid:05d}') + return output_folder + + def image_path(self, image_path: str) -> str: + return os.path.join(self.archive_local, image_path) + + @property + def photometry_path(self) -> str: + return os.path.join(self.output_folder, 'photometry.ecsv') + + def save_as(self, filename: str) -> str: + return os.path.join(self.output_folder, filename) diff --git a/flows/filters.py b/flows/filters.py new file mode 100644 index 0000000..b900792 --- /dev/null +++ b/flows/filters.py @@ -0,0 +1,33 @@ +import logging + +FILTERS = { + 'up': 'u_mag', + 'gp': 'g_mag', + 'rp': 'r_mag', + 'ip': 'i_mag', + 'zp': 'z_mag', + 'B': 'B_mag', + 'V': 'V_mag', + 'J': 'J_mag', + 'H': 'H_mag', + 'K': 'K_mag', +} + +FALLBACK_FILTER = 'gp' +logger = logging.getLogger(__name__) + + +def get_reference_filter(photfilter: str) -> str: + """ + Translate photometric filter into table column. + + Parameters: + photfilter (str): photometric filter corresponding to key of FILTERS + """ + + _ref_filter = FILTERS.get(photfilter, None) + if _ref_filter is None: + logger.warning(f"Could not find filter {photfilter} in catalogs. " + f"Using default {FALLBACK_FILTER} filter.") + _ref_filter = FILTERS[FALLBACK_FILTER] + return _ref_filter diff --git a/flows/load_image.py b/flows/load_image.py index bc72a25..cd43dac 100644 --- a/flows/load_image.py +++ b/flows/load_image.py @@ -10,7 +10,7 @@ from astropy.io import fits from astropy.time import Time from astropy.wcs import WCS, FITSFixedWarning -from typing import Tuple +from typing import Tuple, Union, Dict, Any, Optional from tendrils import api from dataclasses import dataclass # , field @@ -20,21 +20,39 @@ logger = logging.getLogger(__name__) # Singleton logger instance -# -------------------------------------------------------------------------------------------------- +@dataclass +class InstrumentDefaults: + """ + Default radius and FWHM for an instrument in arcseconds. + """ + radius: float = 10 + fwhm: float = 6.0 # Best initial guess + fwhm_min: float = 3.5 + fwhm_max: float = 18.0 + + @dataclass class FlowsImage: image: np.ndarray header: typing.Dict - mask: np.ndarray = None - peakmax: float = None - exptime: float = None - clean: np.ma.MaskedArray = None + mask: Optional[np.ndarray] = None + peakmax: Optional[float] = None + exptime: Optional[float] = None + instrument_defaults: Optional[InstrumentDefaults] = None + site: Optional[Dict[str, Any]] = None + obstime: Optional[Time] = None + photfilter: Optional[str] = None + wcs: Optional[WCS] = None + + clean: Optional[np.ma.MaskedArray] = None + subclean: Optional[np.ma.MaskedArray] = None + error: Optional[np.ma.MaskedArray] = None def __post_init__(self): self.shape = self.image.shape self.wcs = self.create_wcs() # Make empty mask - if not self.mask: + if self.mask is None: self.mask = np.zeros_like(self.image, dtype='bool') self.check_finite() @@ -169,23 +187,29 @@ class Instrument(AbstractInstrument): def __init__(self, image: FlowsImage = None): self.image = image - def get_site(self): + def get_site(self) -> Dict[str, Any]: if self.siteid is not None: return api.get_site(self.siteid) - def get_exptime(self): + def get_exptime(self) -> Union[float, int, str]: exptime = self.image.header.get('EXPTIME', None) if exptime is None: raise ValueError("Image exposure time could not be extracted") return exptime - def get_obstime(self): + def get_obstime(self) -> Time: """Default for JD, jd, utc.""" return Time(self.image.header['JD'], format='jd', scale='utc', location=self.image.site['EarthLocation']) def get_photfilter(self): return self.image.header['FILTER'] + def set_instrument_defaults(self): + """ + Set default values for instrument. + """ + self.image.instrument_defaults = InstrumentDefaults() + def _get_clean_image(self): self.image.peakmax = self.peakmax self.image.site = self.get_site() @@ -194,7 +218,7 @@ def _get_clean_image(self): self.image.photfilter = self.get_photfilter() self.image.create_masked_image() - def process_image(self, image: FlowsImage = None): + def process_image(self, image: FlowsImage = None) -> FlowsImage: """Process existing or new image.""" if image is not None: self.image = image @@ -202,6 +226,8 @@ def process_image(self, image: FlowsImage = None): raise AttributeError('No FlowsImage to be processed. Self.image was None') self._get_clean_image() + self.set_instrument_defaults() + return self.image class LCOGT(Instrument): @@ -373,6 +399,14 @@ def get_photfilter(self): self.image.header['FILTER']) return photfilter +class Swope_newheader(Swope): + + def get_obstime(self): + obstime = Time(self.image.header['MJD-OBS'], format='mjd', scale='utc', + location=self.image.site['EarthLocation']) + obstime += 0.5 * self.image.exptime * u.second + return obstime + class Dupont(Instrument): siteid = 14 @@ -522,10 +556,35 @@ def get_obstime(self): return obstime +class RATIR(Instrument): + siteid = 23 + + def get_obstime(self): + obstime = Time(self.image.header['DATE-OBS'], format='isot', scale='utc', + location=self.image.site['EarthLocation']) + return obstime + + instruments = {'LCOGT': LCOGT, 'HAWKI': HAWKI, 'ALFOSC': ALFOSC, 'NOTCAM': NOTCAM, 'PS1': PS1, 'Liverpool': Liverpool, - 'Omega2000': Omega2000, 'Swope': Swope, 'Dupont': Dupont, 'Retrocam': RetroCam, 'Baade': Baade, + 'Omega2000': Omega2000, 'Swope': Swope, 'Swope_newheader':Swope_newheader, 'Dupont': Dupont, 'Retrocam': + RetroCam, 'Baade': Baade, 'Sofi': Sofi, 'EFOSC': EFOSC, 'AstroNIRCam': AstroNIRCam, 'OmegaCam': OmegaCam, 'AndiCam': AndiCam, - 'PairTel': PairTel, 'TJO': TJO} + 'PairTel': PairTel, 'TJO': TJO, 'RATIR': RATIR, } + + +def correct_barycentric(obstime: Time, target_coord: coords.SkyCoord) -> Time: + """ + BARYCENTRIC CORRECTION OF TIME + + Parameters: + obstime (astropy.time.Time): Midpoint observed image time. + target_coord (astropy.coords.SkyCoord): Coordinates of target in image. + + Returns: + obstime (astropy.time.Time): Time corrected to barycenter with jpl ephemeris. + """ + ltt_bary = obstime.light_travel_time(target_coord, ephemeris='jpl') + return obstime.tdb + ltt_bary def load_image(filename: str, target_coord: typing.Union[coords.SkyCoord, typing.Tuple[float, float]] = None): @@ -602,10 +661,14 @@ def load_image(filename: str, target_coord: typing.Union[coords.SkyCoord, typing instrument_name = Omega2000() break - elif telescope == 'SWO' and hdr.get('SITENAME') == 'LCO': + elif telescope.upper().startswith('SWO') and hdr.get('SITENAME') == 'LCO': instrument_name = Swope() break + elif telescope.upper().startswith('SWO') and origin == 'ziggy': + instrument_name = Swope_newheader() + break + elif telescope == 'DUP' and hdr.get('SITENAME') == 'LCO' and instrument == 'Direct/SITe2K-1': instrument_name = Dupont() break @@ -649,10 +712,16 @@ def load_image(filename: str, target_coord: typing.Union[coords.SkyCoord, typing instrument_name = TJO() break + elif telescope == "OAN/SPM Harold L. Johnson 1.5-meter": + instrument_name = RATIR() + break + else: raise RuntimeError("Could not determine origin of image") image = FlowsImage(image=np.asarray(hdul[ext].data, dtype='float64'), header=hdr, mask=mask) - instrument_name.process_image(image) + clean_image = instrument_name.process_image(image) - return instrument_name.image + if target_coord is not None: + clean_image.obstime = correct_barycentric(clean_image.obstime, target_coord) + return clean_image diff --git a/flows/magnitudes.py b/flows/magnitudes.py new file mode 100644 index 0000000..28d087d --- /dev/null +++ b/flows/magnitudes.py @@ -0,0 +1,111 @@ +import logging +from typing import Tuple, Any +from astropy.table import Table +import numpy as np +from bottleneck import nansum +from astropy.stats import sigma_clip +from astropy.modeling import models, fitting +import matplotlib.pyplot as plt + +from .target import Target +from .zeropoint import sigma_from_Chauvenet, bootstrap_outlier +from .filters import get_reference_filter + +logger = logging.getLogger(__name__) + + +def instrumental_mag(tab: Table, target: Target) -> Tuple[Table, Tuple[Any, Any]]: + target_rows = tab['starid'] <= 0 + + # Check that we got valid flux photometry: + if np.any(~np.isfinite(tab[target_rows]['flux_psf'])) or np.any(~np.isfinite(tab[target_rows]['flux_psf_error'])): + raise RuntimeError(f"Target:{target.name} flux is undefined.") + + # Convert PSF fluxes to magnitudes: + mag_inst = -2.5 * np.log10(tab['flux_psf']) + mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] / tab['flux_psf']) + + # Corresponding magnitudes in catalog: + mag_catalog = tab[get_reference_filter(target.photfilter)] + + # Mask out things that should not be used in calibration: + use_for_calibration = np.ones_like(mag_catalog, dtype='bool') + use_for_calibration[target_rows] = False # Do not use target for calibration + use_for_calibration[~np.isfinite(mag_inst) | ~np.isfinite(mag_catalog)] = False + + + # Just creating some short-hands: + x = mag_catalog[use_for_calibration] + y = mag_inst[use_for_calibration] + yerr = mag_inst_err[use_for_calibration] + weights = 1.0 / yerr ** 2 + + if not any(use_for_calibration): + raise RuntimeError("No calibration stars") + + # Fit linear function with fixed slope, using sigma-clipping: + model = models.Linear1D(slope=1, fixed={'slope': True}) + fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(), sigma_clip, sigma=3.0) + best_fit, sigma_clipped = fitter(model, x, y, weights=weights) + + # Extract zero-point and estimate its error using a single weighted fit: + # I don't know why there is not an error-estimate attached directly to the Parameter? + zp = -1 * best_fit.intercept.value # Negative, because that is the way zeropoints are usually defined + + weights[sigma_clipped] = 0 # Trick to make following expression simpler + n_weights = len(weights.nonzero()[0]) + if n_weights > 1: + zp_error = np.sqrt(n_weights * nansum(weights * (y - best_fit(x)) ** 2) / nansum(weights) / (n_weights - 1)) + else: + zp_error = np.NaN + logger.info('Leastsquare ZP = %.3f, ZP_error = %.3f', zp, zp_error) + + # Determine sigma clipping sigma according to Chauvenet method + # But don't allow less than sigma = sigmamin, setting to 1.5 for now. + # Should maybe be 2? + sigmamin = 1.5 + sig_chauv = sigma_from_Chauvenet(len(x)) + sig_chauv = sig_chauv if sig_chauv >= sigmamin else sigmamin + + # Extract zero point and error using bootstrap method + nboot = 1000 + logger.info('Running bootstrap with sigma = %.2f and n = %d', sig_chauv, nboot) + pars = bootstrap_outlier(x, y, yerr, n=nboot, model=model, fitter=fitting.LinearLSQFitter, outlier=sigma_clip, + outlier_kwargs={'sigma': sig_chauv}, summary='median', error='bootstrap', + return_vals=False) + + zp_bs = pars['intercept'] * -1.0 + zp_error_bs = pars['intercept_error'] + + logger.info('Bootstrapped ZP = %.3f, ZP_error = %.3f', zp_bs, zp_error_bs) + + # Check that difference is not large + zp_diff = 0.4 + if np.abs(zp_bs - zp) >= zp_diff: + logger.warning("Bootstrap and weighted LSQ ZPs differ by %.2f, " + "which is more than the allowed %.2f mag.", np.abs(zp_bs - zp), zp_diff) + + # Add calibrated magnitudes to the photometry table: + tab['mag'] = mag_inst + zp_bs + tab['mag_error'] = np.sqrt(mag_inst_err ** 2 + zp_error_bs ** 2) + + # Check that we got valid magnitude photometry: + if not np.isfinite(tab[0]['mag']) or not np.isfinite(tab[0]['mag_error']): + raise RuntimeError(f"Target:{target.name} magnitude is undefined.") + + + # Update Meta-data: + tab.meta['zp'] = zp_bs + tab.meta['zp_error'] = zp_error_bs + tab.meta['zp_diff'] = np.abs(zp_bs - zp) + tab.meta['zp_error_weights'] = zp_error + + # Plot: + mag_fig, mag_ax = plt.subplots(1, 1) + mag_ax.errorbar(x, y, yerr=yerr, fmt='k.') + mag_ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r') + mag_ax.plot(x, best_fit(x), color='g', linewidth=3) + mag_ax.set_xlabel('Catalog magnitude') + mag_ax.set_ylabel('Instrumental magnitude') + + return (tab, (mag_fig, mag_ax)) diff --git a/flows/photometry.py b/flows/photometry.py index 173bb6a..ff094cf 100644 --- a/flows/photometry.py +++ b/flows/photometry.py @@ -8,216 +8,122 @@ .. codeauthor:: Simon Holmbo """ -import os import logging import warnings -import gc from timeit import default_timer import numpy as np -from scipy.interpolate import UnivariateSpline -from bottleneck import nansum, allnan, replace -from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning, ErfaWarning +from astropy.coordinates import SkyCoord +from bottleneck import allnan +from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning import astropy.units as u -import astropy.coordinates as coords -from astropy.stats import sigma_clip, SigmaClip -from astropy.table import Table, vstack +from astropy.stats import SigmaClip +from astropy.table import Table from astropy.nddata import NDData -from astropy.modeling import models, fitting +from astropy.modeling import fitting from astropy.wcs.utils import proj_plane_pixel_area, fit_wcs_from_points -from astropy.time import Time -import sep + from tendrils import api from tendrils.utils import load_config +from typing import Dict, List, Optional, Callable +from numpy.typing import ArrayLike + +from .magnitudes import instrumental_mag +from .result_model import ResultsTable warnings.simplefilter('ignore', category=AstropyDeprecationWarning) from photutils import CircularAperture, CircularAnnulus, aperture_photometry # noqa: E402 from photutils.psf import EPSFFitter, BasicPSFPhotometry, DAOGroup, extract_stars # noqa: E402 -from photutils import Background2D, SExtractorBackground, MedianBackground # noqa: E402 +from photutils.background import Background2D, SExtractorBackground, MedianBackground # noqa: E402 from photutils.utils import calc_total_error # noqa: E402 from . import reference_cleaning as refclean # noqa: E402 from .plots import plt, plot_image # noqa: E402 from .version import get_version # noqa: E402 -from .load_image import load_image # noqa: E402 -from .run_imagematch import run_imagematch # noqa: E402 -from .zeropoint import bootstrap_outlier, sigma_from_Chauvenet # noqa: E402 +from .load_image import load_image, FlowsImage # noqa: E402 from .coordinatematch import CoordinateMatch, WCS2 # noqa: E402 -from .epsfbuilder import FlowsEPSFBuilder # noqa: E402 +from .epsfbuilder import FlowsEPSFBuilder, verify_epsf # noqa: E402 +from .fileio import DirectoryProtocol, Directories # noqa: E402 +from .filters import get_reference_filter # noqa: E402 +from .target import Target # noqa: E402 +# @TODO: refactor load_image to separate modules. __version__ = get_version(pep440=False) +logger = logging.getLogger(__name__) -# -------------------------------------------------------------------------------------------------- -def photometry(fileid, output_folder=None, attempt_imagematch=True, keep_diff_fixed=False, cm_timeout=None): +def get_datafile(fileid: int) -> Dict: """ - Run photometry. - - Parameters: - fileid (int): File ID to process. - output_folder (str, optional): Path to directory where output should be placed. - attempt_imagematch (bool, optional): If no subtracted image is available, but a - template image is, should we attempt to run ImageMatch using standard settings. - Default=True. - keep_diff_fixed (bool, optional): Allow psf photometry to recenter when - calculating the flux for the difference image. Setting to True can help if diff - image has non-source flux in the region around the SN. - cm_timeout (float, optional): Timeout in seconds for the :class:`CoordinateMatch` algorithm. - - .. codeauthor:: Rasmus Handberg - .. codeauthor:: Emir Karamehmetoglu - .. codeauthor:: Simon Holmbo + Get datafile from API, log it, return. """ - - # Settings: - ref_target_dist_limit = 10 * u.arcsec # Reference star must be further than this away to be included - - logger = logging.getLogger(__name__) - tic = default_timer() - - # Use local copy of archive if configured to do so: - config = load_config() - - # Get datafile dict from API: + # targetid = datafile['targetid'] + # target_name = datafile['target_name'] + # photfilter = datafile['photfilter'] datafile = api.get_datafile(fileid) logger.debug("Datafile: %s", datafile) - targetid = datafile['targetid'] - target_name = datafile['target_name'] - photfilter = datafile['photfilter'] - - archive_local = config.get('photometry', 'archive_local', fallback=None) - if archive_local is not None: - datafile['archive_path'] = archive_local - if not os.path.isdir(datafile['archive_path']): - raise FileNotFoundError("ARCHIVE is not available: " + datafile['archive_path']) - - # Get the catalog containing the target and reference stars: - # TODO: Include proper-motion to the time of observation + return datafile + + +def get_catalog(targetid: int) -> Dict: catalog = api.get_catalog(targetid, output='table') - target = catalog['target'][0] - target_coord = coords.SkyCoord(ra=target['ra'], dec=target['decl'], unit='deg', frame='icrs') - - # Folder to save output: - if output_folder is None: - output_folder_root = config.get('photometry', 'output', fallback='.') - output_folder = os.path.join(output_folder_root, target_name, f'{fileid:05d}') - logger.info("Placing output in '%s'", output_folder) - os.makedirs(output_folder, exist_ok=True) - - # The paths to the science image: - filepath = os.path.join(datafile['archive_path'], datafile['path']) - - # TODO: Download datafile using API to local drive: - # TODO: Is this a security concern? - # if archive_local: - # api.download_datafile(datafile, archive_local) - - # Translate photometric filter into table column: - ref_filter = {'up': 'u_mag', 'gp': 'g_mag', 'rp': 'r_mag', 'ip': 'i_mag', 'zp': 'z_mag', 'B': 'B_mag', 'V': 'V_mag', - 'J': 'J_mag', 'H': 'H_mag', 'K': 'K_mag', }.get(photfilter, None) - - if ref_filter is None: - logger.warning("Could not find filter '%s' in catalogs. Using default gp filter.", photfilter) - ref_filter = 'g_mag' - - # Load the image from the FITS file: - logger.info("Load image '%s'", filepath) - image = load_image(filepath, target_coord=target_coord) - - references = catalog['references'] - references.sort(ref_filter) - - # Check that there actually are reference stars in that filter: - if allnan(references[ref_filter]): - raise ValueError("No reference stars found in current photfilter.") - - # ============================================================================================== - # BARYCENTRIC CORRECTION OF TIME - # ============================================================================================== - - ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl') - image.obstime = image.obstime.tdb + ltt_bary - - # ============================================================================================== - # BACKGROUND ESTIMATION - # ============================================================================================== - - fig, ax = plt.subplots(1, 2, figsize=(20, 18)) - plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image') - plot_image(image.mask, ax=ax[1], scale='linear', cbar='right', title='Mask') - fig.savefig(os.path.join(output_folder, 'original.png'), bbox_inches='tight') - plt.close(fig) - - # Estimate image background: - # Not using image.clean here, since we are redefining the mask anyway - background = Background2D(image.clean, (128, 128), filter_size=(5, 5), sigma_clip=SigmaClip(sigma=3.0), - bkg_estimator=SExtractorBackground(), exclude_percentile=50.0) - - # Create background-subtracted image: - image.subclean = image.clean - background.background - - # Plot background estimation: - fig, ax = plt.subplots(1, 3, figsize=(20, 6)) - plot_image(image.clean, ax=ax[0], scale='log', title='Original') - plot_image(background.background, ax=ax[1], scale='log', title='Background') - plot_image(image.subclean, ax=ax[2], scale='log', title='Background subtracted') - fig.savefig(os.path.join(output_folder, 'background.png'), bbox_inches='tight') - plt.close(fig) - - # TODO: Is this correct?! - image.error = calc_total_error(image.clean, background.background_rms, 1.0) - - # Use sep to for soure extraction - sep_background = sep.Background(image.image, mask=image.mask) - objects = sep.extract(image.image - sep_background, thresh=5., err=sep_background.globalrms, mask=image.mask, - deblend_cont=0.1, minarea=9, clean_param=2.0) - - # Cleanup large arrays which are no longer needed: - del background, fig, ax, sep_background, ltt_bary - gc.collect() - - # ============================================================================================== - # DETECTION OF STARS AND MATCHING WITH CATALOG - # ============================================================================================== - - # Account for proper motion: - replace(references['pm_ra'], np.NaN, 0) - replace(references['pm_dec'], np.NaN, 0) - refs_coord = coords.SkyCoord(ra=references['ra'], dec=references['decl'], pm_ra_cosdec=references['pm_ra'], - pm_dec=references['pm_dec'], unit='deg', frame='icrs', - obstime=Time(2015.5, format='decimalyear')) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore", ErfaWarning) - refs_coord = refs_coord.apply_space_motion(new_obstime=image.obstime) - - # TODO: These need to be based on the instrument! - radius = 10 - fwhm_guess = 6.0 - fwhm_min = 3.5 - fwhm_max = 18.0 - - # Clean extracted stars - masked_sep_xy, sep_mask, masked_sep_rsqs = refclean.force_reject_g2d(objects['x'], objects['y'], image, - get_fwhm=False, radius=radius, - fwhm_guess=fwhm_guess, rsq_min=0.3, - fwhm_max=fwhm_max, fwhm_min=fwhm_min) + logger.debug(f"catalog obtained for target: {targetid}") + return catalog + + +class FlowsBackground: + + def __init__(self, background_estimator: Background2D = Background2D): + self.background_estimator = Background2D + self.background: Optional[ArrayLike] = None + self.background_rms: Optional[ArrayLike] = None + + def estimate_background(self, clean_image: np.ma.MaskedArray) -> None: + # Estimate image background: + # Not using image.clean here, since we are redefining the mask anyway + bkg2d = self.background_estimator(clean_image, (128, 128), filter_size=(5, 5), + sigma_clip=SigmaClip(sigma=3.0), bkg_estimator=SExtractorBackground(), + exclude_percentile=50.0) + self.background = bkg2d.background + self.background_rms = bkg2d.background_rms + + def background_subtract(self, clean_image: ArrayLike) -> ArrayLike: + if self.background is None: + self.estimate_background(clean_image) + return clean_image - self.background + + def error(self, clean_image: ArrayLike, error_method: Callable = calc_total_error): + """ + Calculate the 2D error using the background RMS. + """ + if self.background is None: + raise AttributeError("background must be estimated before calling error") + return error_method(clean_image, self.background_rms, 1.0) + + +def correct_wcs(image: FlowsImage, references: refclean.References, target: Target, + timeout: float = np.inf) -> FlowsImage: + """ + Correct WCS of image to match the reference image. + """ + # Start pre-cleaning + sep_cleaner = refclean.ReferenceCleaner(image, references, rsq_min=0.3) + # Use Source Extractor to make clean references + sep_references_clean = sep_cleaner.make_sep_clean_references() + + # Find WCS logger.info("Finding new WCS solution...") head_wcs = str(WCS2.from_astropy_wcs(image.wcs)) logger.debug('Head WCS: %s', head_wcs) - # Solve for new WCS - cm = CoordinateMatch(xy=list(masked_sep_xy[sep_mask]), rd=list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), - xy_order=np.argsort( - np.power(masked_sep_xy[sep_mask] - np.array(image.shape[::-1]) / 2, 2).sum(axis=1)), - rd_order=np.argsort(target_coord.separation(refs_coord)), xy_nmax=100, rd_nmax=100, - maximum_angle_distance=0.002) - - # Set timeout par to infinity unless specified. - if cm_timeout is None: - cm_timeout = float('inf') + cm = CoordinateMatch( + xy=list(sep_references_clean.masked), + rd=list(zip(references.coords.ra.deg, references.coords.dec.deg)), + xy_order=np.argsort(np.power(sep_references_clean.masked - np.array(image.shape[::-1]) / 2, 2).sum(axis=1)), + rd_order=np.argsort(target.coords.separation(references.coords)), + xy_nmax=100, rd_nmax=100, maximum_angle_distance=0.002) + try: - i_xy, i_rd = map(np.array, zip(*cm(5, 1.5, timeout=cm_timeout))) + i_xy, i_rd = map(np.array, zip(*cm(5, 1.5, timeout=timeout))) except TimeoutError: logger.warning('TimeoutError: No new WCS solution found') except StopIteration: @@ -225,452 +131,937 @@ def photometry(fileid, output_folder=None, attempt_imagematch=True, keep_diff_fi else: logger.info('Found new WCS') image.wcs = fit_wcs_from_points(np.array(list(zip(*cm.xy[i_xy]))), - coords.SkyCoord(*map(list, zip(*cm.rd[i_rd])), unit='deg')) + SkyCoord(*map(list, zip(*cm.rd[i_rd])), unit='deg')) del i_xy, i_rd - used_wcs = str(WCS2.from_astropy_wcs(image.wcs)) - logger.debug('Used WCS: %s', used_wcs) - - # Calculate pixel-coordinates of references: - xy = image.wcs.all_world2pix(list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), 0) - references['pixel_column'], references['pixel_row'] = x, y = list(map(np.array, zip(*xy))) - - # Clean out the references: - hsize = 10 - clean_references = references[(target_coord.separation(refs_coord) > ref_target_dist_limit) & (x > hsize) & ( - x < (image.shape[1] - 1 - hsize)) & (y > hsize) & (y < (image.shape[0] - 1 - hsize))] - - if not clean_references: - raise RuntimeError('No clean references in field') + logger.debug(f'Used WCS: {WCS2.from_astropy_wcs(image.wcs)}') + return image + +# @TODO: make photometry protocol +class Photometry: + init_cutout_size: int = 29 + min_pixels: int = 15 + + def __init__(self, image: FlowsImage, target: Target, fwhm_guess: float, + epsf_builder: FlowsEPSFBuilder = FlowsEPSFBuilder): + self.image = image + self.target = target + self.fwhm = fwhm_guess + self.epsf_builder = epsf_builder + + @property + def star_size(self) -> int: + # Make cutouts of stars using extract_stars: + # Scales with FWHM + size = int(np.round(self.init_cutout_size * self.fwhm / 6)) + size += 0 if size % 2 else 1 # Make sure it's odd + size = max(size, self.min_pixels) # Never go below 15 pixels + return size + + def extract_star_cutouts(self, star_xys: np.ndarray) -> List[np.ma.MaskedArray]: + """ + Extract star cutouts from the image. + """ + # Extract stars from image + with warnings.catch_warnings(): + warnings.simplefilter('ignore', AstropyUserWarning) + stars = extract_stars(NDData(data=self.image.subclean.data, mask=self.image.mask), + Table(star_xys, names=('x', 'y')), + size=self.star_size + 6 # +6 for edge buffer + ) + logger.info("Number of stars input to ePSF builder: %d", len(stars)) + return stars + + def make_ePSF(self, stars): + """ + Make an ePSF from the star cutouts. + """ + # Build ePSF + logger.info("Building ePSF...") + builder = self.epsf_builder( + oversampling=1, shape=1 * self.star_size, + fitter=EPSFFitter(fit_boxsize=max(int(np.round(1.5 * self.fwhm)), 5)), + recentering_boxsize=max(int(np.round(2 * self.fwhm)), 5), + norm_radius=max(self.fwhm, 5), maxiters=100, + progress_bar=logger.isEnabledFor(logging.INFO) + ) + epsf, stars = builder(stars) + + logger.info(f"Built PSF model " + f"{epsf.fit_info['n_iter']/epsf.fit_info['max_iters']} in {epsf.fit_info['time']} seconds") + + return epsf, stars + + +class PhotometryManager: + """ + Implement a runner to shuffle data. + """ - # Calculate the targets position in the image: - target_pixel_pos = image.wcs.all_world2pix([(target['ra'], target['decl'])], 0)[0] + def __init__(self, target: Target, + directories: DirectoryProtocol): + self.target = target + self.directories = directories + self.output_folder = directories.output_folder + self.archive_local = directories.archive_local + + def load_science_image(self, image_path: str) -> FlowsImage: + # The paths to the science image: + science_image = self.directories.image_path(image_path) + # Load the image from the FITS file: + logger.info("Load image '%s'", science_image) + return load_image(science_image, target_coord=self.target.coords) + + def get_filter(self): + return get_reference_filter(self.target.photfilter) + + def load_references(self, catalog): + use_filter = self.get_filter() + references = catalog['references'] + references.sort(use_filter) + # Check that there actually are reference stars in that filter: + if allnan(references[use_filter]): + raise ValueError("No reference stars found in current photfilter.") + return refclean.References(table=references) + + +def calculate_appflux(apphot_tbl: Table, apertures: CircularAperture, annuli: CircularAnnulus) -> Table: + """ + Calculate the aperture flux for the given apertures and annuli and append result to table. + """ + # Subtract background estimated from annuli: + bkg = (apphot_tbl['aperture_sum_1'] / annuli.area) * apertures.area + apphot_tbl['flux_aperture'] = apphot_tbl['aperture_sum_0'] - bkg - # Clean reference star locations - masked_fwhms, masked_ref_xys, rsq_mask, masked_rsqs = refclean.force_reject_g2d(clean_references['pixel_column'], - clean_references['pixel_row'], - image, get_fwhm=True, radius=radius, - fwhm_guess=fwhm_guess, - fwhm_max=fwhm_max, - fwhm_min=fwhm_min, rsq_min=0.15) - - # Use R^2 to more robustly determine initial FWHM guess. - # This cleaning is good when we have FEW references. - fwhm, fwhm_clean_references = refclean.clean_with_rsq_and_get_fwhm(masked_fwhms, masked_rsqs, clean_references, - min_fwhm_references=2, min_references=6, - rsq_min=0.15) - logger.info('Initial FWHM guess is %f pixels', fwhm) - - # Create plot of target and reference star positions from 2D Gaussian fits. - fig, ax = plt.subplots(1, 1, figsize=(20, 18)) - plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) - ax.scatter(fwhm_clean_references['pixel_column'], fwhm_clean_references['pixel_row'], c='r', marker='o', alpha=0.3) - ax.scatter(masked_sep_xy[:, 0], masked_sep_xy[:, 1], marker='s', alpha=1.0, edgecolors='green', facecolors='none') - ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r') - fig.savefig(os.path.join(output_folder, 'positions_g2d.png'), bbox_inches='tight') - plt.close(fig) - - # Final clean of wcs corrected references - logger.info("Number of references before final cleaning: %d", len(clean_references)) - logger.debug('Masked R^2 values: %s', masked_rsqs[rsq_mask]) - references = refclean.get_clean_references(clean_references, masked_rsqs, rsq_ideal=0.8) - logger.info("Number of references after final cleaning: %d", len(references)) - - # Create plot of target and reference star positions: - fig, ax = plt.subplots(1, 1, figsize=(20, 18)) - plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) - ax.scatter(references['pixel_column'], references['pixel_row'], c='r', marker='o', alpha=0.6) - ax.scatter(masked_sep_xy[:, 0], masked_sep_xy[:, 1], marker='s', alpha=0.6, edgecolors='green', facecolors='none') - ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r') - fig.savefig(os.path.join(output_folder, 'positions.png'), bbox_inches='tight') - plt.close(fig) - - # Cleanup large arrays which are no longer needed: - del fig, ax, cm - gc.collect() - - # ============================================================================================== - # CREATE EFFECTIVE PSF MODEL - # ============================================================================================== - - # Make cutouts of stars using extract_stars: - # Scales with FWHM - size = int(np.round(29 * fwhm / 6)) - size += 0 if size % 2 else 1 # Make sure it's a uneven number - size = max(size, 15) # Never go below 15 pixels - - # Extract stars sub-images: - xy = [tuple(masked_ref_xys[clean_references['starid'] == ref['starid']].data[0]) for ref in references] - with warnings.catch_warnings(): - warnings.simplefilter('ignore', AstropyUserWarning) - stars = extract_stars(NDData(data=image.subclean.data, mask=image.mask), Table(np.array(xy), names=('x', 'y')), - size=size + 6 # +6 for edge buffer - ) - - logger.info("Number of stars input to ePSF builder: %d", len(stars)) - - # Plot the stars being used for ePSF: - imgnr = 0 - nrows, ncols = 5, 5 - for k in range(int(np.ceil(len(stars) / (nrows * ncols)))): - fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20), squeeze=True) - ax = ax.ravel() - for i in range(nrows * ncols): - if imgnr > len(stars) - 1: - ax[i].axis('off') - else: - plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis') # FIXME (no x-ticks) - imgnr += 1 - - fig.savefig(os.path.join(output_folder, f'epsf_stars{k + 1:02d}.png'), bbox_inches='tight') - plt.close(fig) + apphot_tbl['flux_aperture_error'] = np.sqrt(apphot_tbl['aperture_sum_err_0'] ** 2 + + (apphot_tbl['aperture_sum_err_1'] / annuli.area * apertures.area) ** 2) + return apphot_tbl - # Build the ePSF: - epsf, stars = FlowsEPSFBuilder(oversampling=1, shape=1 * size, - fitter=EPSFFitter(fit_boxsize=max(int(np.round(1.5 * fwhm)), 5)), - recentering_boxsize=max(int(np.round(2 * fwhm)), 5), norm_radius=max(fwhm, 5), - maxiters=100, progress_bar=logger.isEnabledFor(logging.INFO))(stars) - logger.info('Built PSF model (%(n_iter)d/%(max_iters)d) in %(time).1f seconds', epsf.fit_info) - # Store which stars were used in ePSF in the table: - references['used_for_epsf'] = False - references['used_for_epsf'][[star.id_label - 1 for star in stars.all_good_stars]] = True - logger.info("Number of stars used for ePSF: %d", np.sum(references['used_for_epsf'])) - - fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15)) - plot_image(epsf.data, ax=ax1, cmap='viridis') - - fwhms = [] - bad_epsf_detected = False - for a, ax in ((0, ax3), (1, ax2)): - # Collapse the PDF along this axis: - profile = epsf.data.sum(axis=a) - itop = profile.argmax() - poffset = profile[itop] / 2 - - # Run a spline through the points, but subtract half of the peak value, and find the roots: - # We have to use a cubic spline, since roots() is not supported for other splines - # for some reason - profile_intp = UnivariateSpline(np.arange(0, len(profile)), profile - poffset, k=3, s=0, ext=3) - lr = profile_intp.roots() - - # Plot the profile and spline: - x_fine = np.linspace(-0.5, len(profile) - 0.5, 500) - ax.plot(profile, 'k.-') - ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-') - ax.axvline(itop) - ax.set_xlim(-0.5, len(profile) - 0.5) - - # Do some sanity checks on the ePSF: - # It should pass 50% exactly twice and have the maximum inside that region. - # I.e. it should be a single gaussian-like peak - if len(lr) != 2 or itop < lr[0] or itop > lr[1]: - logger.error("Bad PSF along axis %d", a) - bad_epsf_detected = True - else: - axis_fwhm = lr[1] - lr[0] - fwhms.append(axis_fwhm) - ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2) - - # Save the ePSF figure: - ax4.axis('off') - fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight') - plt.close(fig) - - # There was a problem with the ePSF: - if bad_epsf_detected: - raise RuntimeError("Bad ePSF detected.") +def apphot(coordinates: ArrayLike, image: FlowsImage, fwhm: float, use_raw: bool = False) -> Table: + img = image.clean if use_raw else image.subclean + apertures = CircularAperture(coordinates, r=fwhm) + annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm) + apphot_tbl = aperture_photometry(img, [apertures, annuli], mask=image.mask, error=image.error) + return calculate_appflux(apphot_tbl, apertures, annuli) - # Let's make the final FWHM the largest one we found: - fwhm = np.max(fwhms) - logger.info("Final FWHM based on ePSF: %f", fwhm) - # Cleanup large arrays which are no longer needed: - del fig, ax, stars, fwhms, profile_intp - gc.collect() +def do_phot(fileid: int, cm_timeout: Optional[float] = None, make_plots: bool = True): + # TODO: Timer should be moved out of this function. + tic = default_timer() - # ============================================================================================== - # COORDINATES TO DO PHOTOMETRY AT - # ============================================================================================== + # Load config and set up directories + config = load_config() + directories = Directories(config) + + # Query API for datafile and catalogs + datafile = get_datafile(fileid) + catalog = get_catalog(datafile['targetid']) + target = Target(catalog['target'][0]['ra'], + catalog['target'][0]['decl'], + name=datafile['target_name'], + id=datafile['targetid'], + photfilter=datafile['photfilter']) + + # set output directories, creating if necessary + directories.set_output_dirs(target.name, fileid) + # science_image = directories.image_path(datafile['path']) + + # Set up photometry runner + pr = PhotometryManager(target, directories) + image = pr.load_science_image(datafile['path']) # FlowsImage + references = pr.load_references(catalog) # Reference catalog + references.make_sky_coords() # Make sky coordinates + references.propagate(image.obstime) # get reference catalog at obstime + + # Estimate background and subtract from cleaned image + bkg = FlowsBackground() + image.subclean = bkg.background_subtract(image.clean) + image.error = bkg.error(image.clean) + + # Correct WCS + cm_timeout = cm_timeout if cm_timeout is not None else np.inf + image = correct_wcs(image, references, target=target, timeout=cm_timeout) - coordinates = np.array([[ref['pixel_column'], ref['pixel_row']] for ref in references]) + # Calculate pixel-coordinates of references: + references.get_xy(image.wcs) + references.make_pixel_columns() - # Add the main target position as the first entry for doing photometry directly in the - # science image: - coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0) + # Clean out the references: + cleaner = refclean.ReferenceCleaner(image, references, rsq_min=0.15) + # Reject references that are too close to target or edge of the image + masked_references = cleaner.mask_edge_and_target(target.coords) + if not masked_references.table: + raise RuntimeError("No clean references in field") - # ============================================================================================== - # APERTURE PHOTOMETRY - # ============================================================================================== + # Clean reference star locations + clean_references, fwhm = cleaner.clean_references(masked_references) # Clean the masked references + + # EPSF creation + phot = Photometry(image, target, fwhm) + star_cutouts = phot.extract_star_cutouts(cleaner.gaussian_xys) + epsf, stars = phot.make_ePSF(star_cutouts) + epsf_ok, epsf_fwhms = verify_epsf(epsf) + if not epsf_ok: + raise RuntimeError("Bad ePSF detected.") - # Define apertures for aperture photometry: - apertures = CircularAperture(coordinates, r=fwhm) - annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm) + # Store which stars were used in ePSF in the table: + clean_references.table.add_column(col=[False], name='used_for_epsf') + clean_references.table['used_for_epsf'][[star.id_label - 1 for star in stars.all_good_stars]] = True + logger.info("Number of stars used for ePSF: %d", np.sum(clean_references.table['used_for_epsf'])) - apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli], mask=image.mask, error=image.error) + fwhm = np.max(epsf_fwhms) # Use the largest FWHM as new FWHM + logger.info(f"Final FWHM based on ePSF: {fwhm}") - logger.info('Aperture Photometry Success') - logger.debug("Aperture Photometry Table:\n%s", apphot_tbl) + # position in the image including target as row 0: + target.calc_pixels(image.wcs) + clean_references.add_target(target, starid=0) # by default prepends target - # ============================================================================================== - # PSF PHOTOMETRY - # ============================================================================================== + # Aperture photometry: + apphot_tbl = apphot(clean_references.xy, image, fwhm) - # Create photometry object: + # PSF photometry: photometry_obj = BasicPSFPhotometry(group_maker=DAOGroup(fwhm), bkg_estimator=MedianBackground(), psf_model=epsf, - fitter=fitting.LevMarLSQFitter(), fitshape=size, aperture_radius=fwhm) - - psfphot_tbl = photometry_obj(image=image.subclean, init_guesses=Table(coordinates, names=['x_0', 'y_0'])) - - logger.info('PSF Photometry Success') - logger.debug("PSF Photometry Table:\n%s", psfphot_tbl) - - # ============================================================================================== - # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY - # ============================================================================================== - - # Find the pixel-scale of the science image: + fitter=fitting.LevMarLSQFitter(), fitshape=phot.star_size, aperture_radius=fwhm) + psfphot_tbl = photometry_obj(image=image.subclean, init_guesses=Table(clean_references.xy, names=['x_0', 'y_0'])) + + # Difference image photometry: + diffimage_df = datafile.get('diffimg', None) + if diffimage_df: + diffimage_path = diffimage_df.get('path', None) + if diffimage_path is None: + logger.warning("No diffimage present but without path, skipping diffimage photometry") + diffimage = load_image(directories.image_path(diffimage_path), target_coord=target.coords) + diffimage.error = image.error + diff_apphot_tbl = apphot(clean_references.xy[0], diffimage, fwhm, use_raw=True) + diff_psfphot_tbl = photometry_obj(image=diffimage.clean, init_guesses=Table(clean_references.xy[0], + names=['x_0', 'y_0'])) + + # Store the difference image photometry on row 0 of the table. + # This pushes the un-subtracted target photometry to row 1. + clean_references.add_target(target, starid=-1) + psfphot_tbl.insert_row(0, diff_psfphot_tbl[0]) + apphot_tbl.insert_row(0, diff_apphot_tbl[0]) + + + # TODO: This should be moved to the photometry manager. + # Build results table: + results_table = ResultsTable.make_results_table(clean_references.table, apphot_tbl, psfphot_tbl, image) + + # Todo: refactor. + # Get instrumental magnitude (currently we do too much here). + results_table, (mag_fig, mag_ax) = instrumental_mag(results_table, target) + + # Add metadata to the results table: + results_table.meta['fileid'] = fileid + results_table.meta['target_name'] = target.name + results_table.meta['version'] = __version__ + results_table.meta['template'] = None if datafile.get('template') is None else datafile['template']['fileid'] + results_table.meta['diffimg'] = None if datafile.get('diffimg') is None else datafile['diffimg']['fileid'] + results_table.meta['photfilter'] = target.photfilter + results_table.meta['fwhm'] = fwhm * u.pixel + # Find the pixel-scale of the science image pixel_area = proj_plane_pixel_area(image.wcs.celestial) pixel_scale = np.sqrt(pixel_area) * 3600 # arcsec/pixel - # print(image.wcs.celestial.cunit) % Doesn't work? logger.info("Science image pixel scale: %f", pixel_scale) - - diffimage = None - if datafile.get('diffimg') is not None: - diffimg_path = os.path.join(datafile['archive_path'], datafile['diffimg']['path']) - diffimg = load_image(diffimg_path) - diffimage = diffimg.image - - elif attempt_imagematch and datafile.get('template') is not None: - # Run the template subtraction, and get back - # the science image where the template has been subtracted: - diffimage = run_imagematch(datafile, target, star_coord=coordinates, fwhm=fwhm, pixel_scale=pixel_scale) - - # We have a diff image, so let's do photometry of the target using this: - if diffimage is not None: - # Include mask from original image: - diffimage = np.ma.masked_array(diffimage, image.mask) - - # Create apertures around the target: - apertures = CircularAperture(target_pixel_pos, r=fwhm) - annuli = CircularAnnulus(target_pixel_pos, r_in=1.5 * fwhm, r_out=2.5 * fwhm) - - # Create two plots of the difference image: - fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20)) - plot_image(diffimage, ax=ax, cbar='right', title=target_name) - ax.plot(target_pixel_pos[0], target_pixel_pos[1], marker='+', markersize=20, color='r') - fig.savefig(os.path.join(output_folder, 'diffimg.png'), bbox_inches='tight') - apertures.plot(axes=ax, color='r', lw=2) - annuli.plot(axes=ax, color='r', lw=2) - ax.set_xlim(target_pixel_pos[0] - 50, target_pixel_pos[0] + 50) - ax.set_ylim(target_pixel_pos[1] - 50, target_pixel_pos[1] + 50) - fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'), bbox_inches='tight') + # Keep adding metadata now that we have the pixel scale. + results_table.meta['pixel_scale'] = pixel_scale * u.arcsec / u.pixel + results_table.meta['seeing'] = (fwhm * pixel_scale) * u.arcsec + results_table.meta['obstime-bmjd'] = float(image.obstime.mjd) + results_table.meta['used_wcs'] = str(image.wcs) + + # Save the results table: + # TODO: Photometry should RETURN a table, not save it. + # TODO: Saving should be offloaded to the caller or to a parameter at least. + results_table.write(directories.photometry_path, format='ascii.ecsv', delimiter=',', overwrite=True) + + # Log result and time taken: + # TODO: This should be logged by the calling function. + logger.info("------------------------------------------------------") + logger.info("Success!") + logger.info("Main target: %f +/- %f", results_table[0]['mag'], results_table[0]['mag_error']) + logger.info("Photometry took: %.1f seconds", default_timer() - tic) + + # Plotting. TODO: refactor. + # These are here for now for backwards compatibility. + if make_plots: + # Plot the image: + fig, ax = plt.subplots(1, 2, figsize=(20, 18)) + plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image') + plot_image(image.mask, ax=ax[1], scale='linear', cbar='right', title='Mask') + fig.savefig(directories.save_as('original.png'), bbox_inches='tight') plt.close(fig) - # Run aperture photometry on subtracted image: - target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli], mask=image.mask, error=image.error) - - # Make target only photometry object if keep_diff_fixed = True - if keep_diff_fixed: - epsf.fixed.update({'x_0': True, 'y_0': True}) - - # TODO: Try iteraratively subtracted photometry - # Create photometry object: - photometry_obj = BasicPSFPhotometry(group_maker=DAOGroup(0.0001), bkg_estimator=MedianBackground(), - psf_model=epsf, fitter=fitting.LevMarLSQFitter(), fitshape=size, - aperture_radius=fwhm) - - # Run PSF photometry on template subtracted image: - target_psfphot_tbl = photometry_obj(diffimage, init_guesses=Table(target_pixel_pos, names=['x_0', 'y_0'])) - - # Need to adjust table columns if x_0 and y_0 were fixed - if keep_diff_fixed: - target_psfphot_tbl['x_0_unc'] = 0.0 - target_psfphot_tbl['y_0_unc'] = 0.0 - - # Combine the output tables from the target and the reference stars into one: - apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact') - psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl], join_type='exact') - - # Build results table: - tab = references.copy() - - row = {'starid': 0, 'ra': target['ra'], 'decl': target['decl'], 'pixel_column': target_pixel_pos[0], - 'pixel_row': target_pixel_pos[1], 'used_for_epsf': False} - row.update([(k, np.NaN) for k in set(tab.keys()) - set(row) - {'gaia_variability'}]) - tab.insert_row(0, row) - - if diffimage is not None: - row['starid'] = -1 - tab.insert_row(0, row) - - indx_main_target = tab['starid'] <= 0 + # Plot background estimation: + fig, ax = plt.subplots(1, 3, figsize=(20, 6)) + plot_image(image.clean, ax=ax[0], scale='log', title='Original') + plot_image(bkg.background, ax=ax[1], scale='log', title='Background') + plot_image(image.subclean, ax=ax[2], scale='log', title='Background subtracted') + fig.savefig(directories.save_as('background.png'), bbox_inches='tight') + plt.close(fig) - # Subtract background estimated from annuli: - flux_aperture = apphot_tbl['aperture_sum_0'] - (apphot_tbl['aperture_sum_1'] / annuli.area) * apertures.area - flux_aperture_error = np.sqrt( - apphot_tbl['aperture_sum_err_0'] ** 2 + (apphot_tbl['aperture_sum_err_1'] / annuli.area * apertures.area) ** 2) - - # Add table columns with results: - tab['flux_aperture'] = flux_aperture / image.exptime - tab['flux_aperture_error'] = flux_aperture_error / image.exptime - tab['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime - tab['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime - tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit'] - tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit'] - tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc'] - tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc'] - - # Check that we got valid photometry: - if np.any(~np.isfinite(tab[indx_main_target]['flux_psf'])) or np.any( - ~np.isfinite(tab[indx_main_target]['flux_psf_error'])): - raise RuntimeError("Target magnitude is undefined.") - - # ============================================================================================== - # CALIBRATE - # ============================================================================================== - - # Convert PSF fluxes to magnitudes: - mag_inst = -2.5 * np.log10(tab['flux_psf']) - mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] / tab['flux_psf']) - - # Corresponding magnitudes in catalog: - mag_catalog = tab[ref_filter] - - # Mask out things that should not be used in calibration: - use_for_calibration = np.ones_like(mag_catalog, dtype='bool') - use_for_calibration[indx_main_target] = False # Do not use target for calibration - use_for_calibration[~np.isfinite(mag_inst) | ~np.isfinite(mag_catalog)] = False - - # Just creating some short-hands: - x = mag_catalog[use_for_calibration] - y = mag_inst[use_for_calibration] - yerr = mag_inst_err[use_for_calibration] - weights = 1.0 / yerr ** 2 - - if not any(use_for_calibration): - raise RuntimeError("No calibration stars") - - # Fit linear function with fixed slope, using sigma-clipping: - model = models.Linear1D(slope=1, fixed={'slope': True}) - fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(), sigma_clip, sigma=3.0) - best_fit, sigma_clipped = fitter(model, x, y, weights=weights) - - # Extract zero-point and estimate its error using a single weighted fit: - # I don't know why there is not an error-estimate attached directly to the Parameter? - zp = -1 * best_fit.intercept.value # Negative, because that is the way zeropoints are usually defined - - weights[sigma_clipped] = 0 # Trick to make following expression simpler - n_weights = len(weights.nonzero()[0]) - if n_weights > 1: - zp_error = np.sqrt(n_weights * nansum(weights * (y - best_fit(x)) ** 2) / nansum(weights) / (n_weights - 1)) - else: - zp_error = np.NaN - logger.info('Leastsquare ZP = %.3f, ZP_error = %.3f', zp, zp_error) - - # Determine sigma clipping sigma according to Chauvenet method - # But don't allow less than sigma = sigmamin, setting to 1.5 for now. - # Should maybe be 2? - sigmamin = 1.5 - sig_chauv = sigma_from_Chauvenet(len(x)) - sig_chauv = sig_chauv if sig_chauv >= sigmamin else sigmamin - - # Extract zero point and error using bootstrap method - nboot = 1000 - logger.info('Running bootstrap with sigma = %.2f and n = %d', sig_chauv, nboot) - pars = bootstrap_outlier(x, y, yerr, n=nboot, model=model, fitter=fitting.LinearLSQFitter, outlier=sigma_clip, - outlier_kwargs={'sigma': sig_chauv}, summary='median', error='bootstrap', - return_vals=False) - - zp_bs = pars['intercept'] * -1.0 - zp_error_bs = pars['intercept_error'] - - logger.info('Bootstrapped ZP = %.3f, ZP_error = %.3f', zp_bs, zp_error_bs) - - # Check that difference is not large - zp_diff = 0.4 - if np.abs(zp_bs - zp) >= zp_diff: - logger.warning("Bootstrap and weighted LSQ ZPs differ by %.2f, \ - which is more than the allowed %.2f mag.", np.abs(zp_bs - zp), zp_diff) - - # Add calibrated magnitudes to the photometry table: - tab['mag'] = mag_inst + zp_bs - tab['mag_error'] = np.sqrt(mag_inst_err ** 2 + zp_error_bs ** 2) - - fig, ax = plt.subplots(1, 1) - ax.errorbar(x, y, yerr=yerr, fmt='k.') - ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r') - ax.plot(x, best_fit(x), color='g', linewidth=3) - ax.set_xlabel('Catalog magnitude') - ax.set_ylabel('Instrumental magnitude') - fig.savefig(os.path.join(output_folder, 'calibration.png'), bbox_inches='tight') - plt.close(fig) - - # Check that we got valid photometry: - if not np.isfinite(tab[0]['mag']) or not np.isfinite(tab[0]['mag_error']): - raise RuntimeError("Target magnitude is undefined.") - - # ============================================================================================== - # SAVE PHOTOMETRY - # ============================================================================================== - - # Descriptions of columns: - tab['used_for_epsf'].description = 'Was object used for building ePSF?' - tab['mag'].description = 'Measured magnitude' - tab['mag'].unit = u.mag - tab['mag_error'].description = 'Error on measured magnitude' - tab['mag_error'].unit = u.mag - tab['flux_aperture'].description = 'Measured flux using aperture photometry' - tab['flux_aperture'].unit = u.count / u.second - tab['flux_aperture_error'].description = 'Error on measured flux using aperture photometry' - tab['flux_aperture_error'].unit = u.count / u.second - tab['flux_psf'].description = 'Measured flux using PSF photometry' - tab['flux_psf'].unit = u.count / u.second - tab['flux_psf_error'].description = 'Error on measured flux using PSF photometry' - tab['flux_psf_error'].unit = u.count / u.second - tab['pixel_column'].description = 'Location on image pixel columns' - tab['pixel_column'].unit = u.pixel - tab['pixel_row'].description = 'Location on image pixel rows' - tab['pixel_row'].unit = u.pixel - tab['pixel_column_psf_fit'].description = 'Measured location on image pixel columns from PSF photometry' - tab['pixel_column_psf_fit'].unit = u.pixel - tab[ - 'pixel_column_psf_fit_error'].description = 'Error on measured location on image pixel columns from PSF photometry' - tab['pixel_column_psf_fit_error'].unit = u.pixel - tab['pixel_row_psf_fit'].description = 'Measured location on image pixel rows from PSF photometry' - tab['pixel_row_psf_fit'].unit = u.pixel - tab['pixel_row_psf_fit_error'].description = 'Error on measured location on image pixel rows from PSF photometry' - tab['pixel_row_psf_fit_error'].unit = u.pixel - - # Meta-data: - tab.meta['fileid'] = fileid - tab.meta['target_name'] = target_name - tab.meta['version'] = __version__ - tab.meta['template'] = None if datafile.get('template') is None else datafile['template']['fileid'] - tab.meta['diffimg'] = None if datafile.get('diffimg') is None else datafile['diffimg']['fileid'] - tab.meta['photfilter'] = photfilter - tab.meta['fwhm'] = fwhm * u.pixel - tab.meta['pixel_scale'] = pixel_scale * u.arcsec / u.pixel - tab.meta['seeing'] = (fwhm * pixel_scale) * u.arcsec - tab.meta['obstime-bmjd'] = float(image.obstime.mjd) - tab.meta['zp'] = zp_bs - tab.meta['zp_error'] = zp_error_bs - tab.meta['zp_diff'] = np.abs(zp_bs - zp) - tab.meta['zp_error_weights'] = zp_error - tab.meta['head_wcs'] = head_wcs # TODO: Are these really useful? - tab.meta['used_wcs'] = used_wcs # TODO: Are these really useful? - - # Filepath where to save photometry: - photometry_output = os.path.join(output_folder, 'photometry.ecsv') - - # Write the final table to file: - tab.write(photometry_output, format='ascii.ecsv', delimiter=',', overwrite=True) - - toc = default_timer() + # Create plot of target and reference star positions: + fig, ax = plt.subplots(1, 1, figsize=(20, 18)) + plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target.name) + ax.scatter(references.table['pixel_column'], references.table['pixel_row'], c='r', marker='o', alpha=0.6) + ax.scatter(cleaner.gaussian_xys[:, 0], cleaner.gaussian_xys[:, 1], marker='s', alpha=0.6, edgecolors='green', + facecolors='none') + ax.scatter(target.pixel_column, target.pixel_row, marker='+', s=20, c='r') + fig.savefig(directories.save_as('positions.png'), bbox_inches='tight') + plt.close(fig) - logger.info("------------------------------------------------------") - logger.info("Success!") - logger.info("Main target: %f +/- %f", tab[0]['mag'], tab[0]['mag_error']) - logger.info("Photometry took: %.1f seconds", toc - tic) + # Plot EPSF + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15)) + plot_image(epsf.data, ax=ax1, cmap='viridis') + for a, ax in ((0, ax3), (1, ax2)): + profile = epsf.data.sum(axis=a) + ax.plot(profile, 'k.-') + ax.axvline(profile.argmax()) + ax.set_xlim(-0.5, len(profile) - 0.5) + ax4.axis('off') + fig.savefig(directories.save_as('epsf.png'), bbox_inches='tight') + plt.close(fig) + del ax, ax1, ax2, ax3, ax4 - return photometry_output + # Create two plots of the difference image: + if diffimage_df is not None: + fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20)) + plot_image(diffimage.clean, ax=ax, cbar='right', title=target.name) + ax.plot(target.pixel_column, target.pixel_row, marker='+', markersize=20, color='r') + fig.savefig(directories.save_as('diffimg.png'), bbox_inches='tight') + #apertures.plot(axes=ax, color='r', lw=2) + #annuli.plot(axes=ax, color='r', lw=2) + ax.set_xlim(target.pixel_column - 50, target.pixel_column + 50) + ax.set_ylim(target.pixel_row - 50, target.pixel_row + 50) + fig.savefig(directories.save_as('diffimg_zoom.png'), bbox_inches='tight') + plt.close(fig) + + # Calibration (handled in magnitudes.py). + mag_fig.savefig(directories.save_as('calibration.png'), bbox_inches='tight') + plt.close(mag_fig) + + # TODO: Return results table or photometry object or an event signature, not the path to the file.. + return directories.photometry_path + + +# def photometry(fileid, output_folder=None, attempt_imagematch=True, keep_diff_fixed=False, cm_timeout=None): +# """ +# Run photometry. +# +# Parameters: +# fileid (int): File ID to process. +# output_folder (str, optional): Path to directory where output should be placed. +# attempt_imagematch (bool, optional): If no subtracted image is available, but a +# template image is, should we attempt to run ImageMatch using standard settings. +# Default=True. +# keep_diff_fixed (bool, optional): Allow psf photometry to recenter when +# calculating the flux for the difference image. Setting to True can help if diff +# image has non-source flux in the region around the SN. +# cm_timeout (float, optional): Timeout in seconds for the :class:`CoordinateMatch` algorithm. +# +# .. codeauthor:: Rasmus Handberg +# .. codeauthor:: Emir Karamehmetoglu +# .. codeauthor:: Simon Holmbo +# """ +# +# # Settings: +# ref_target_dist_limit = 10 * u.arcsec # Reference star must be further than this away to be included +# +# tic = default_timer() +# +# # Use local copy of archive if configured to do so: +# config = load_config() +# +# # Get datafile dict from API: +# datafile = api.get_datafile(fileid) +# logger.debug("Datafile: %s", datafile) +# targetid = datafile['targetid'] +# target_name = datafile['target_name'] +# photfilter = datafile['photfilter'] +# +# archive_local = config.get('photometry', 'archive_local', fallback=None) +# if archive_local is not None: +# datafile['archive_path'] = archive_local +# if not os.path.isdir(datafile['archive_path']): +# raise FileNotFoundError("ARCHIVE is not available: " + datafile['archive_path']) +# +# # Get the catalog containing the target and reference stars: +# # TODO: Include proper-motion to the time of observation +# catalog = api.get_catalog(targetid, output='table') +# target = catalog['target'][0] +# target_coord = coords.SkyCoord(ra=target['ra'], dec=target['decl'], unit='deg', frame='icrs') +# +# # Folder to save output: +# if output_folder is None: +# output_folder_root = config.get('photometry', 'output', fallback='.') +# output_folder = os.path.join(output_folder_root, target_name, f'{fileid:05d}') +# logger.info("Placing output in '%s'", output_folder) +# os.makedirs(output_folder, exist_ok=True) +# +# # The paths to the science image: +# filepath = os.path.join(datafile['archive_path'], datafile['path']) +# +# # TODO: Download datafile using API to local drive: +# # TODO: Is this a security concern? +# # if archive_local: +# # api.download_datafile(datafile, archive_local) +# +# # Translate photometric filter into table column: +# ref_filter = get_reference_filter(photfilter) # Fallback to 'g' if photfilter is not in FILTERS +# +# # Load the image from the FITS file: +# logger.info("Load image '%s'", filepath) +# image = load_image(filepath, target_coord=target_coord) +# +# references = catalog['references'] +# references.sort(ref_filter) +# +# # Check that there actually are reference stars in that filter: +# if allnan(references[ref_filter]): +# raise ValueError("No reference stars found in current photfilter.") +# +# # ============================================================================================== +# # BARYCENTRIC CORRECTION OF TIME +# # ============================================================================================== +# +# ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl') +# image.obstime = image.obstime.tdb + ltt_bary +# +# # ============================================================================================== +# # BACKGROUND ESTIMATION +# # ============================================================================================== +# +# fig, ax = plt.subplots(1, 2, figsize=(20, 18)) +# plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image') +# plot_image(image.mask, ax=ax[1], scale='linear', cbar='right', title='Mask') +# fig.savefig(os.path.join(output_folder, 'original.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Estimate image background: +# # Not using image.clean here, since we are redefining the mask anyway +# background = Background2D(image.clean, (128, 128), filter_size=(5, 5), sigma_clip=SigmaClip(sigma=3.0), +# bkg_estimator=SExtractorBackground(), exclude_percentile=50.0) +# +# # Create background-subtracted image: +# image.subclean = image.clean - background.background +# +# # Plot background estimation: +# fig, ax = plt.subplots(1, 3, figsize=(20, 6)) +# plot_image(image.clean, ax=ax[0], scale='log', title='Original') +# plot_image(background.background, ax=ax[1], scale='log', title='Background') +# plot_image(image.subclean, ax=ax[2], scale='log', title='Background subtracted') +# fig.savefig(os.path.join(output_folder, 'background.png'), bbox_inches='tight') +# plt.close(fig) +# +# # TODO: Is this correct?! +# image.error = calc_total_error(image.clean, background.background_rms, 1.0) +# +# # Use sep to for soure extraction +# sep_background = sep.Background(image.image, mask=image.mask) +# objects = sep.extract(image.image - sep_background, thresh=5., err=sep_background.globalrms, mask=image.mask, +# deblend_cont=0.1, minarea=9, clean_param=2.0) +# +# # Cleanup large arrays which are no longer needed: +# del background, fig, ax, sep_background, ltt_bary +# gc.collect() +# +# # ============================================================================================== +# # DETECTION OF STARS AND MATCHING WITH CATALOG +# # ============================================================================================== +# +# # Account for proper motion: +# replace(references['pm_ra'], np.NaN, 0) +# replace(references['pm_dec'], np.NaN, 0) +# refs_coord = coords.SkyCoord(ra=references['ra'], dec=references['decl'], pm_ra_cosdec=references['pm_ra'], +# pm_dec=references['pm_dec'], unit='deg', frame='icrs', +# obstime=Time(2015.5, format='decimalyear')) +# +# with warnings.catch_warnings(): +# warnings.simplefilter("ignore", ErfaWarning) +# refs_coord = refs_coord.apply_space_motion(new_obstime=image.obstime) +# +# # TODO: These need to be based on the instrument! +# radius = 10 +# fwhm_guess = 6.0 +# fwhm_min = 3.5 +# fwhm_max = 18.0 +# +# # Clean extracted stars +# masked_sep_xy, sep_mask, masked_sep_rsqs = refclean.force_reject_g2d(objects['x'], objects['y'], image, +# radius=radius, +# fwhm_guess=fwhm_guess, rsq_min=0.3, +# fwhm_max=fwhm_max, fwhm_min=fwhm_min) +# +# logger.info("Finding new WCS solution...") +# head_wcs = str(WCS2.from_astropy_wcs(image.wcs)) +# logger.debug('Head WCS: %s', head_wcs) +# +# # Solve for new WCS +# cm = CoordinateMatch(xy=list(masked_sep_xy[sep_mask]), rd=list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), +# xy_order=np.argsort( +# np.power(masked_sep_xy[sep_mask] - np.array(image.shape[::-1]) / 2, 2).sum(axis=1)), +# rd_order=np.argsort(target_coord.separation(refs_coord)), xy_nmax=100, rd_nmax=100, +# maximum_angle_distance=0.002) +# +# # Set timeout par to infinity unless specified. +# if cm_timeout is None: +# cm_timeout = float('inf') +# try: +# i_xy, i_rd = map(np.array, zip(*cm(5, 1.5, timeout=cm_timeout))) +# except TimeoutError: +# logger.warning('TimeoutError: No new WCS solution found') +# except StopIteration: +# logger.warning('StopIterationError: No new WCS solution found') +# else: +# logger.info('Found new WCS') +# image.wcs = fit_wcs_from_points(np.array(list(zip(*cm.xy[i_xy]))), +# coords.SkyCoord(*map(list, zip(*cm.rd[i_rd])), unit='deg')) +# del i_xy, i_rd +# +# used_wcs = str(WCS2.from_astropy_wcs(image.wcs)) +# logger.debug('Used WCS: %s', used_wcs) +# +# # Calculate pixel-coordinates of references: +# xy = image.wcs.all_world2pix(list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), 0) +# references['pixel_column'], references['pixel_row'] = x, y = list(map(np.array, zip(*xy))) +# +# # Clean out the references: +# hsize = 10 +# ref_target_dist_limit = 10 * u.arcsec # Reference star must be further than this away to be included +# clean_references = references[(target_coord.separation(refs_coord) > ref_target_dist_limit) & (x > hsize) & ( +# x < (image.shape[1] - 1 - hsize)) & (y > hsize) & (y < (image.shape[0] - 1 - hsize))] +# +# if not clean_references: +# raise RuntimeError('No clean references in field') +# +# # Calculate the targets position in the image: +# target_pixel_pos = image.wcs.all_world2pix([(target['ra'], target['decl'])], 0)[0] +# +# # Clean reference star locations +# masked_fwhms, masked_ref_xys, rsq_mask, masked_rsqs = refclean.force_reject_g2d(clean_references['pixel_column'], +# clean_references['pixel_row'], +# image, get_fwhm=True, radius=radius, +# fwhm_guess=fwhm_guess, +# fwhm_max=fwhm_max, +# fwhm_min=fwhm_min, rsq_min=0.15) +# +# # Use R^2 to more robustly determine initial FWHM guess. +# # This cleaning is good when we have FEW references. +# fwhm, fwhm_clean_references = refclean.clean_with_rsq_and_get_fwhm(masked_fwhms, masked_rsqs, clean_references, +# min_fwhm_references=2, min_references=6, +# rsq_min=0.15) +# logger.info('Initial FWHM guess is %f pixels', fwhm) +# +# # Create plot of target and reference star positions from 2D Gaussian fits. +# fig, ax = plt.subplots(1, 1, figsize=(20, 18)) +# plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) +# ax.scatter(fwhm_clean_references['pixel_column'], fwhm_clean_references['pixel_row'], c='r', marker='o', alpha=0.3) +# ax.scatter(masked_sep_xy[:, 0], masked_sep_xy[:, 1], marker='s', alpha=1.0, edgecolors='green', facecolors='none') +# ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r') +# fig.savefig(os.path.join(output_folder, 'positions_g2d.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Final clean of wcs corrected references +# logger.info("Number of references before final cleaning: %d", len(clean_references)) +# logger.debug('Masked R^2 values: %s', masked_rsqs[rsq_mask]) +# references = refclean.get_clean_references(clean_references, masked_rsqs, rsq_ideal=0.8) +# logger.info("Number of references after final cleaning: %d", len(references)) +# +# # Create plot of target and reference star positions: +# fig, ax = plt.subplots(1, 1, figsize=(20, 18)) +# plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) +# ax.scatter(references['pixel_column'], references['pixel_row'], c='r', marker='o', alpha=0.6) +# ax.scatter(masked_sep_xy[:, 0], masked_sep_xy[:, 1], marker='s', alpha=0.6, edgecolors='green', facecolors='none') +# ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r') +# fig.savefig(os.path.join(output_folder, 'positions.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Cleanup large arrays which are no longer needed: +# del fig, ax, cm +# gc.collect() +# +# # ============================================================================================== +# # CREATE EFFECTIVE PSF MODEL +# # ============================================================================================== +# +# # Make cutouts of stars using extract_stars: +# # Scales with FWHM +# size = int(np.round(29 * fwhm / 6)) +# size += 0 if size % 2 else 1 # Make sure it's a uneven number +# size = max(size, 15) # Never go below 15 pixels +# +# # Extract stars sub-images: +# xy = [tuple(masked_ref_xys[clean_references['starid'] == ref['starid']].data[0]) for ref in references] +# with warnings.catch_warnings(): +# warnings.simplefilter('ignore', AstropyUserWarning) +# stars = extract_stars(NDData(data=image.subclean.data, mask=image.mask), Table(np.array(xy), names=('x', 'y')), +# size=size + 6 # +6 for edge buffer +# ) +# +# logger.info("Number of stars input to ePSF builder: %d", len(stars)) +# +# # Plot the stars being used for ePSF: +# imgnr = 0 +# nrows, ncols = 5, 5 +# for k in range(int(np.ceil(len(stars) / (nrows * ncols)))): +# fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20), squeeze=True) +# ax = ax.ravel() +# for i in range(nrows * ncols): +# if imgnr > len(stars) - 1: +# ax[i].axis('off') +# else: +# plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis') # FIXME (no x-ticks) +# imgnr += 1 +# +# fig.savefig(os.path.join(output_folder, f'epsf_stars{k + 1:02d}.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Build the ePSF: +# epsf, stars = FlowsEPSFBuilder(oversampling=1, shape=1 * size, +# fitter=EPSFFitter(fit_boxsize=max(int(np.round(1.5 * fwhm)), 5)), +# recentering_boxsize=max(int(np.round(2 * fwhm)), 5), norm_radius=max(fwhm, 5), +# maxiters=100, progress_bar=logger.isEnabledFor(logging.INFO))(stars) +# logger.info('Built PSF model (%(n_iter)d/%(max_iters)d) in %(time).1f seconds', epsf.fit_info) +# +# # Store which stars were used in ePSF in the table: +# references['used_for_epsf'] = False +# references['used_for_epsf'][[star.id_label - 1 for star in stars.all_good_stars]] = True +# logger.info("Number of stars used for ePSF: %d", np.sum(references['used_for_epsf'])) +# +# fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15)) +# plot_image(epsf.data, ax=ax1, cmap='viridis') +# +# fwhms = [] +# bad_epsf_detected = False +# for a, ax in ((0, ax3), (1, ax2)): +# # Collapse the PDF along this axis: +# profile = epsf.data.sum(axis=a) +# itop = profile.argmax() +# poffset = profile[itop] / 2 +# +# # Run a spline through the points, but subtract half of the peak value, and find the roots: +# # We have to use a cubic spline, since roots() is not supported for other splines +# # for some reason +# profile_intp = UnivariateSpline(np.arange(0, len(profile)), profile - poffset, k=3, s=0, ext=3) +# lr = profile_intp.roots() +# +# # Plot the profile and spline: +# x_fine = np.linspace(-0.5, len(profile) - 0.5, 500) +# ax.plot(profile, 'k.-') +# ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-') +# ax.axvline(itop) +# ax.set_xlim(-0.5, len(profile) - 0.5) +# +# # Do some sanity checks on the ePSF: +# # It should pass 50% exactly twice and have the maximum inside that region. +# # I.e. it should be a single gaussian-like peak +# if len(lr) != 2 or itop < lr[0] or itop > lr[1]: +# logger.error("Bad PSF along axis %d", a) +# bad_epsf_detected = True +# else: +# axis_fwhm = lr[1] - lr[0] +# fwhms.append(axis_fwhm) +# ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2) +# +# # Save the ePSF figure: +# ax4.axis('off') +# fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight') +# plt.close(fig) +# +# # There was a problem with the ePSF: +# if bad_epsf_detected: +# raise RuntimeError("Bad ePSF detected.") +# +# # Let's make the final FWHM the largest one we found: +# fwhm = np.max(fwhms) +# logger.info("Final FWHM based on ePSF: %f", fwhm) +# +# # Cleanup large arrays which are no longer needed: +# del fig, ax, stars, fwhms, profile_intp +# gc.collect() +# +# # ============================================================================================== +# # COORDINATES TO DO PHOTOMETRY AT +# # ============================================================================================== +# +# coordinates = np.array([[ref['pixel_column'], ref['pixel_row']] for ref in references]) +# +# # Add the main target position as the first entry for doing photometry directly in the +# # science image: +# coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0) +# +# # ============================================================================================== +# # APERTURE PHOTOMETRY +# # ============================================================================================== +# +# # Define apertures for aperture photometry: +# apertures = CircularAperture(coordinates, r=fwhm) +# annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm) +# +# apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli], mask=image.mask, error=image.error) +# +# logger.info('Aperture Photometry Success') +# logger.debug("Aperture Photometry Table:\n%s", apphot_tbl) +# +# # ============================================================================================== +# # PSF PHOTOMETRY +# # ============================================================================================== +# +# # Create photometry object: +# photometry_obj = BasicPSFPhotometry(group_maker=DAOGroup(fwhm), bkg_estimator=MedianBackground(), psf_model=epsf, +# fitter=fitting.LevMarLSQFitter(), fitshape=size, aperture_radius=fwhm) +# +# psfphot_tbl = photometry_obj(image=image.subclean, init_guesses=Table(coordinates, names=['x_0', 'y_0'])) +# +# logger.info('PSF Photometry Success') +# logger.debug("PSF Photometry Table:\n%s", psfphot_tbl) +# +# # ============================================================================================== +# # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY +# # ============================================================================================== +# +# # Find the pixel-scale of the science image: +# pixel_area = proj_plane_pixel_area(image.wcs.celestial) +# pixel_scale = np.sqrt(pixel_area) * 3600 # arcsec/pixel +# # print(image.wcs.celestial.cunit) % Doesn't work? +# logger.info("Science image pixel scale: %f", pixel_scale) +# +# diffimage = None +# if datafile.get('diffimg') is not None: +# diffimg_path = os.path.join(datafile['archive_path'], datafile['diffimg']['path']) +# diffimg = load_image(diffimg_path) +# diffimage = diffimg.image +# +# elif attempt_imagematch and datafile.get('template') is not None: +# # Run the template subtraction, and get back +# # the science image where the template has been subtracted: +# diffimage = run_imagematch(datafile, target, star_coord=coordinates, fwhm=fwhm, pixel_scale=pixel_scale) +# +# # We have a diff image, so let's do photometry of the target using this: +# if diffimage is not None: +# # Include mask from original image: +# diffimage = np.ma.masked_array(diffimage, image.mask) +# +# # Create apertures around the target: +# apertures = CircularAperture(target_pixel_pos, r=fwhm) +# annuli = CircularAnnulus(target_pixel_pos, r_in=1.5 * fwhm, r_out=2.5 * fwhm) +# +# # Create two plots of the difference image: +# fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20)) +# plot_image(diffimage, ax=ax, cbar='right', title=target_name) +# ax.plot(target_pixel_pos[0], target_pixel_pos[1], marker='+', markersize=20, color='r') +# fig.savefig(os.path.join(output_folder, 'diffimg.png'), bbox_inches='tight') +# apertures.plot(axes=ax, color='r', lw=2) +# annuli.plot(axes=ax, color='r', lw=2) +# ax.set_xlim(target_pixel_pos[0] - 50, target_pixel_pos[0] + 50) +# ax.set_ylim(target_pixel_pos[1] - 50, target_pixel_pos[1] + 50) +# fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Run aperture photometry on subtracted image: +# target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli], mask=image.mask, error=image.error) +# +# # Make target only photometry object if keep_diff_fixed = True +# if keep_diff_fixed: +# epsf.fixed.update({'x_0': True, 'y_0': True}) +# +# # TODO: Try iteraratively subtracted photometry +# # Create photometry object: +# photometry_obj = BasicPSFPhotometry(group_maker=DAOGroup(0.0001), bkg_estimator=MedianBackground(), +# psf_model=epsf, fitter=fitting.LevMarLSQFitter(), fitshape=size, +# aperture_radius=fwhm) +# +# # Run PSF photometry on template subtracted image: +# target_psfphot_tbl = photometry_obj(diffimage, init_guesses=Table(target_pixel_pos, names=['x_0', 'y_0'])) +# +# # Need to adjust table columns if x_0 and y_0 were fixed +# if keep_diff_fixed: +# target_psfphot_tbl['x_0_unc'] = 0.0 +# target_psfphot_tbl['y_0_unc'] = 0.0 +# +# # Combine the output tables from the target and the reference stars into one: +# apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact') +# psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl], join_type='exact') +# +# # Build results table: +# tab = references.copy() +# +# row = {'starid': 0, 'ra': target['ra'], 'decl': target['decl'], 'pixel_column': target_pixel_pos[0], +# 'pixel_row': target_pixel_pos[1], 'used_for_epsf': False} +# row.update([(k, np.NaN) for k in set(tab.keys()) - set(row) - {'gaia_variability'}]) +# tab.insert_row(0, row) +# +# if diffimage is not None: +# row['starid'] = -1 +# tab.insert_row(0, row) +# +# indx_main_target = tab['starid'] <= 0 +# +# # Subtract background estimated from annuli: +# flux_aperture = apphot_tbl['aperture_sum_0'] - (apphot_tbl['aperture_sum_1'] / annuli.area) * apertures.area +# flux_aperture_error = np.sqrt( +# apphot_tbl['aperture_sum_err_0'] ** 2 + (apphot_tbl['aperture_sum_err_1'] / annuli.area * apertures.area) ** 2) +# +# # Add table columns with results: +# tab['flux_aperture'] = flux_aperture / image.exptime +# tab['flux_aperture_error'] = flux_aperture_error / image.exptime +# tab['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime +# tab['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime +# tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit'] +# tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit'] +# tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc'] +# tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc'] +# +# # Check that we got valid photometry: +# if np.any(~np.isfinite(tab[indx_main_target]['flux_psf'])) or np.any( +# ~np.isfinite(tab[indx_main_target]['flux_psf_error'])): +# raise RuntimeError("Target magnitude is undefined.") +# +# # ============================================================================================== +# # CALIBRATE +# # ============================================================================================== +# +# # Convert PSF fluxes to magnitudes: +# mag_inst = -2.5 * np.log10(tab['flux_psf']) +# mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] / tab['flux_psf']) +# +# # Corresponding magnitudes in catalog: +# mag_catalog = tab[ref_filter] +# +# # Mask out things that should not be used in calibration: +# use_for_calibration = np.ones_like(mag_catalog, dtype='bool') +# use_for_calibration[indx_main_target] = False # Do not use target for calibration +# use_for_calibration[~np.isfinite(mag_inst) | ~np.isfinite(mag_catalog)] = False +# +# # Just creating some short-hands: +# x = mag_catalog[use_for_calibration] +# y = mag_inst[use_for_calibration] +# yerr = mag_inst_err[use_for_calibration] +# weights = 1.0 / yerr ** 2 +# +# if not any(use_for_calibration): +# raise RuntimeError("No calibration stars") +# +# # Fit linear function with fixed slope, using sigma-clipping: +# model = models.Linear1D(slope=1, fixed={'slope': True}) +# fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(), sigma_clip, sigma=3.0) +# best_fit, sigma_clipped = fitter(model, x, y, weights=weights) +# +# # Extract zero-point and estimate its error using a single weighted fit: +# # I don't know why there is not an error-estimate attached directly to the Parameter? +# zp = -1 * best_fit.intercept.value # Negative, because that is the way zeropoints are usually defined +# +# weights[sigma_clipped] = 0 # Trick to make following expression simpler +# n_weights = len(weights.nonzero()[0]) +# if n_weights > 1: +# zp_error = np.sqrt(n_weights * nansum(weights * (y - best_fit(x)) ** 2) / nansum(weights) / (n_weights - 1)) +# else: +# zp_error = np.NaN +# logger.info('Leastsquare ZP = %.3f, ZP_error = %.3f', zp, zp_error) +# +# # Determine sigma clipping sigma according to Chauvenet method +# # But don't allow less than sigma = sigmamin, setting to 1.5 for now. +# # Should maybe be 2? +# sigmamin = 1.5 +# sig_chauv = sigma_from_Chauvenet(len(x)) +# sig_chauv = sig_chauv if sig_chauv >= sigmamin else sigmamin +# +# # Extract zero point and error using bootstrap method +# nboot = 1000 +# logger.info('Running bootstrap with sigma = %.2f and n = %d', sig_chauv, nboot) +# pars = bootstrap_outlier(x, y, yerr, n=nboot, model=model, fitter=fitting.LinearLSQFitter, outlier=sigma_clip, +# outlier_kwargs={'sigma': sig_chauv}, summary='median', error='bootstrap', +# return_vals=False) +# +# zp_bs = pars['intercept'] * -1.0 +# zp_error_bs = pars['intercept_error'] +# +# logger.info('Bootstrapped ZP = %.3f, ZP_error = %.3f', zp_bs, zp_error_bs) +# +# # Check that difference is not large +# zp_diff = 0.4 +# if np.abs(zp_bs - zp) >= zp_diff: +# logger.warning("Bootstrap and weighted LSQ ZPs differ by %.2f, \ +# which is more than the allowed %.2f mag.", np.abs(zp_bs - zp), zp_diff) +# +# # Add calibrated magnitudes to the photometry table: +# tab['mag'] = mag_inst + zp_bs +# tab['mag_error'] = np.sqrt(mag_inst_err ** 2 + zp_error_bs ** 2) +# +# fig, ax = plt.subplots(1, 1) +# ax.errorbar(x, y, yerr=yerr, fmt='k.') +# ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r') +# ax.plot(x, best_fit(x), color='g', linewidth=3) +# ax.set_xlabel('Catalog magnitude') +# ax.set_ylabel('Instrumental magnitude') +# fig.savefig(os.path.join(output_folder, 'calibration.png'), bbox_inches='tight') +# plt.close(fig) +# +# # Check that we got valid photometry: +# if not np.isfinite(tab[0]['mag']) or not np.isfinite(tab[0]['mag_error']): +# raise RuntimeError("Target magnitude is undefined.") +# +# # ============================================================================================== +# # SAVE PHOTOMETRY +# # ============================================================================================== +# +# # Descriptions of columns: +# tab['used_for_epsf'].description = 'Was object used for building ePSF?' +# tab['mag'].description = 'Measured magnitude' +# tab['mag'].unit = u.mag +# tab['mag_error'].description = 'Error on measured magnitude' +# tab['mag_error'].unit = u.mag +# tab['flux_aperture'].description = 'Measured flux using aperture photometry' +# tab['flux_aperture'].unit = u.count / u.second +# tab['flux_aperture_error'].description = 'Error on measured flux using aperture photometry' +# tab['flux_aperture_error'].unit = u.count / u.second +# tab['flux_psf'].description = 'Measured flux using PSF photometry' +# tab['flux_psf'].unit = u.count / u.second +# tab['flux_psf_error'].description = 'Error on measured flux using PSF photometry' +# tab['flux_psf_error'].unit = u.count / u.second +# tab['pixel_column'].description = 'Location on image pixel columns' +# tab['pixel_column'].unit = u.pixel +# tab['pixel_row'].description = 'Location on image pixel rows' +# tab['pixel_row'].unit = u.pixel +# tab['pixel_column_psf_fit'].description = 'Measured location on image pixel columns from PSF photometry' +# tab['pixel_column_psf_fit'].unit = u.pixel +# tab[ +# 'pixel_column_psf_fit_error'].description = 'Error on measured location on image pixel columns from PSF photometry' +# tab['pixel_column_psf_fit_error'].unit = u.pixel +# tab['pixel_row_psf_fit'].description = 'Measured location on image pixel rows from PSF photometry' +# tab['pixel_row_psf_fit'].unit = u.pixel +# tab['pixel_row_psf_fit_error'].description = 'Error on measured location on image pixel rows from PSF photometry' +# tab['pixel_row_psf_fit_error'].unit = u.pixel +# +# # Meta-data: +# tab.meta['fileid'] = fileid +# tab.meta['target_name'] = target_name +# tab.meta['version'] = __version__ +# tab.meta['template'] = None if datafile.get('template') is None else datafile['template']['fileid'] +# tab.meta['diffimg'] = None if datafile.get('diffimg') is None else datafile['diffimg']['fileid'] +# tab.meta['photfilter'] = photfilter +# tab.meta['fwhm'] = fwhm * u.pixel +# tab.meta['pixel_scale'] = pixel_scale * u.arcsec / u.pixel +# tab.meta['seeing'] = (fwhm * pixel_scale) * u.arcsec +# tab.meta['obstime-bmjd'] = float(image.obstime.mjd) +# tab.meta['zp'] = zp_bs +# tab.meta['zp_error'] = zp_error_bs +# tab.meta['zp_diff'] = np.abs(zp_bs - zp) +# tab.meta['zp_error_weights'] = zp_error +# tab.meta['head_wcs'] = head_wcs # TODO: Are these really useful? +# tab.meta['used_wcs'] = used_wcs # TODO: Are these really useful? +# +# # Filepath where to save photometry: +# photometry_output = os.path.join(output_folder, 'photometry.ecsv') +# +# # Write the final table to file: +# tab.write(photometry_output, format='ascii.ecsv', delimiter=',', overwrite=True) +# +# toc = default_timer() +# +# logger.info("------------------------------------------------------") +# logger.info("Success!") +# logger.info("Main target: %f +/- %f", tab[0]['mag'], tab[0]['mag_error']) +# logger.info("Photometry took: %.1f seconds", toc - tic) +# +# return photometry_output diff --git a/flows/reference_cleaning.py b/flows/reference_cleaning.py index 27daac5..839c839 100644 --- a/flows/reference_cleaning.py +++ b/flows/reference_cleaning.py @@ -6,6 +6,10 @@ .. codeauthor:: Emir Karamehmetoglu .. codeauthor:: Rasmus Handberg """ +from typing import Dict, Optional, TypeVar, Tuple, Any, List +from dataclasses import dataclass +import warnings +import logging import numpy as np import astroalign as aa @@ -13,27 +17,93 @@ from astropy import wcs from astropy.stats import sigma_clip, gaussian_fwhm_to_sigma from astropy.modeling import models, fitting +from astropy.time import Time +from astropy.utils.exceptions import ErfaWarning +from astropy.utils import lazyproperty +import astropy.units as u +from astropy.table import Table from copy import deepcopy -from bottleneck import nanmedian, nansum, nanmean +from bottleneck import nanmedian, nansum, nanmean, replace from scipy.spatial import KDTree import pandas as pd # TODO: Convert to pure numpy implementation +import sep +from flows.load_image import FlowsImage +from numpy.typing import ArrayLike +from .target import Target + +logger = logging.getLogger(__name__) + +RefTable = TypeVar('Reference_Table', Dict, ArrayLike, Table) -# -------------------------------------------------------------------------------------------------- class MinStarError(RuntimeError): pass -# -------------------------------------------------------------------------------------------------- -def force_reject_g2d(xarray, yarray, image, get_fwhm=True, rsq_min=0.5, radius=10, fwhm_guess=6.0, fwhm_min=3.5, - fwhm_max=18.0): +@dataclass +class References: + table: RefTable + coords: Optional[SkyCoord] = None + mask: Optional[np.ndarray] = None + xy: Optional[RefTable] = None + + def replace_nans_pm(self) -> None: + replace(self.table['pm_ra'], np.NaN, 0.) + replace(self.table['pm_dec'], np.NaN, 0.) + + def make_sky_coords(self, reference_time: float = 2015.5) -> None: + self.replace_nans_pm() + self.coords = SkyCoord(ra=self.table['ra'], dec=self.table['decl'], pm_ra_cosdec=self.table['pm_ra'], + pm_dec=self.table['pm_dec'], unit='deg', frame='icrs', + obstime=Time(reference_time, format='decimalyear')) + + def propagate(self, obstime: Time) -> None: + if self.coords is None: + raise AttributeError("References.coords is not defined.") + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ErfaWarning) + self.coords = self.coords.apply_space_motion(new_obstime=obstime) + + @property + def masked(self, mask: Optional[np.ndarray] = None) -> RefTable: + if self.mask is None: + if mask is None: + raise AttributeError("No mask defined.") + self.mask = mask + return self.table[self.mask] + + def get_xy(self, img_wcs: wcs.WCS) -> None: + """get pixel coordinates of reference stars""" + self.xy = img_wcs.all_world2pix(list(zip(self.coords.ra.deg, self.coords.dec.deg)), 0) + + def make_pixel_columns(self, column_name: str ='pixel_column', row_name: str = 'pixel_row') -> None: + self.table[column_name], self.table[row_name] = list(map(np.array, zip(*self.xy))) + + def _prepend_row(self, row: dict) -> None: + self.table.insert_row(0, row) + + def add_target(self, target: Target, starid: int = 0) -> None: + self._prepend_row(target.output_dict(starid=starid)) + if target.pixel_row and target.pixel_column: + self.xy = np.vstack(((target.pixel_column, target.pixel_row), self.xy)) + + +def use_sep(image: FlowsImage): + # Use sep to for soure extraction + sep_background = sep.Background(image.image, mask=image.mask) + objects = sep.extract(image.image - sep_background, thresh=5., err=sep_background.globalrms, mask=image.mask, + deblend_cont=0.1, minarea=9, clean_param=2.0) + return References(table=objects) + + +def force_reject_g2d(xarray, yarray, image: FlowsImage, rsq_min=0.5, radius=10, fwhm_guess=6.0, fwhm_min=3.5, + fwhm_max=18.0) -> Tuple[np.ma.MaskedArray, ...]: """ Parameters: xarray: yarray: image: - get_fwhm (bool, optional): rsq_min (float, optional): radius (float, optional): fwhm_guess=6.0: @@ -102,13 +172,11 @@ def force_reject_g2d(xarray, yarray, image, get_fwhm=True, rsq_min=0.5, radius=1 # masked_xys = masked_xys[mask] # Clean extracted array. # to masked_xys.mask[~mask] = True - # don't know if it breaks anything, but it doesn't make sence if + # don't know if it breaks anything, but it doesn't make sense if # len(masked_xys) != len(masked_rsqs) FIXME masked_fwhms = np.ma.masked_array(fwhms, ~np.isfinite(fwhms)) - if get_fwhm: - return masked_fwhms, masked_xys, mask, masked_rsqs - return masked_xys, mask, masked_rsqs + return masked_fwhms, masked_xys, mask, masked_rsqs # -------------------------------------------------------------------------------------------------- @@ -131,7 +199,7 @@ def clean_with_rsq_and_get_fwhm(masked_fwhms, masked_rsqs, references, min_fwhm_ rsqvals = np.arange(rsq_min, 0.95, 0.15)[::-1] fwhm_found = False min_references_achieved = False - + fwhm = np.nan # Clean based on R^2 Value while not min_references_achieved: for rsqval in rsqvals: @@ -172,7 +240,7 @@ def clean_with_rsq_and_get_fwhm(masked_fwhms, masked_rsqs, references, min_fwhm_ # Check len of references as this is a destructive cleaning. # if len(references) == 2: logger.info('2 reference stars remaining, check WCS and image quality') if len(references) < 2: - raise RuntimeError(f"{len(references)} References remaining; could not clean.") + raise MinStarError(f"{len(references)} References remaining; could not estimate fwhm.") return fwhm, references @@ -276,42 +344,134 @@ def get_new_wcs(extracted_ind, extracted_stars, clean_references, ref_ind, obsti return wcs.utils.fit_wcs_from_points(targets, c) -# -------------------------------------------------------------------------------------------------- -def get_clean_references(references, masked_rsqs, min_references_ideal=6, min_references_abs=3, rsq_min=0.15, - rsq_ideal=0.5, keep_max=100, rescue_bad: bool = True): +def make_rsq_mask(masked_rsqs: np.ma.MaskedArray) -> np.ndarray: + # Switching to pandas for easier selection + df = pd.DataFrame(masked_rsqs, columns=['rsq']) + return df.sort_values('rsq', ascending=False).dropna().index.values + + +def get_clean_references(reference_table: RefTable, masked_rsqs: np.ma.MaskedArray, min_references_ideal: int = 6, + min_references_abs: int = 3, rsq_min: float = 0.15, rsq_ideal: float = 0.5, + keep_max: int = 100, rescue_bad: bool = True) -> Tuple[RefTable, np.ndarray]: # Greedy first try mask = (masked_rsqs >= rsq_ideal) & (masked_rsqs < 1.0) - if np.sum(np.isfinite(masked_rsqs[mask])) >= min_references_ideal: - if len(references[mask]) <= keep_max: - return references[mask] - elif len(references[mask]) >= keep_max: - - df = pd.DataFrame(masked_rsqs, columns=['rsq']) - masked_rsqs.mask = ~mask - nmasked_rsqs = df.sort_values('rsq', ascending=False).dropna().index._data - return references[nmasked_rsqs[:keep_max]] + mask = ~mask.data | mask.mask # masked out of range values OR non-finite values + masked_rsqs.mask = mask + rsq_mask_index = make_rsq_mask(masked_rsqs)[:keep_max] + if len(rsq_mask_index) >= min_references_ideal: + return reference_table[rsq_mask_index], rsq_mask_index # Desperate second try mask = (masked_rsqs >= rsq_min) & (masked_rsqs < 1.0) - masked_rsqs.mask = ~mask - - # Switching to pandas for easier selection - df = pd.DataFrame(masked_rsqs, columns=['rsq']) - nmasked_rsqs = deepcopy(df.sort_values('rsq', ascending=False).dropna().index._data) - nmasked_rsqs = nmasked_rsqs[:min(min_references_ideal, len(nmasked_rsqs))] - if len(nmasked_rsqs) >= min_references_abs: - return references[nmasked_rsqs] + mask = ~mask.data | mask.mask + masked_rsqs.mask = mask + rsq_mask_index = make_rsq_mask(masked_rsqs)[:min_references_ideal] + if len(rsq_mask_index) >= min_references_abs: + return reference_table[rsq_mask_index], rsq_mask_index if not rescue_bad: raise MinStarError(f'Less than {min_references_abs} clean stars and rescue_bad = False') # Extremely desperate last ditch attempt i.e. "rescue bad" mask = (masked_rsqs >= 0.02) & (masked_rsqs < 1.0) - masked_rsqs.mask = ~mask - - # Switch to pandas - df = pd.DataFrame(masked_rsqs, columns=['rsq']) - nmasked_rsqs = df.sort_values('rsq', ascending=False).dropna().index._data - nmasked_rsqs = nmasked_rsqs[:min(min_references_ideal, len(nmasked_rsqs))] - if len(nmasked_rsqs) < 2: + mask = ~mask.data | mask.mask + masked_rsqs.mask = mask + rsq_mask_index = make_rsq_mask(masked_rsqs)[:min_references_ideal] + if len(rsq_mask_index) < 2: raise MinStarError('Less than 2 clean stars.') - return references[nmasked_rsqs] # Return if len >= 2 + return reference_table[rsq_mask_index], rsq_mask_index # Return if len >= 2 + + +class ReferenceCleaner: + + def __init__(self, image: FlowsImage, references: References, rsq_min: float = 0.3, + min_references_ideal: int = 6, min_references_abs: int = 3): + self.image = image + self.references = references + self.rsq_min = rsq_min + self.min_references_ideal = min_references_ideal + self.min_references_abs = min_references_abs + self.gaussian_xys: Optional[np.ndarray] = None # gaussian pixel positions + + def _clean_extracted_stars(self, x=None, y=None) -> Tuple[np.ma.MaskedArray, ...]: + """ + Clean extracted stars. + :return: Tuple of masked_fwhms, masked_ref_xys, rsq_mask, masked_rsqs + """ + # use instrument_defaults for initial guess of FWHM + radius = self.image.instrument_defaults.radius + fwhm_guess = self.image.instrument_defaults.fwhm + fwhm_min = self.image.instrument_defaults.fwhm_min + fwhm_max = self.image.instrument_defaults.fwhm_max + + # Clean the references + return force_reject_g2d(x, y, self.image, radius=radius, fwhm_guess=fwhm_guess, rsq_min=self.rsq_min, + fwhm_max=fwhm_max, fwhm_min=fwhm_min) + + def set_gaussian_xys(self, masked_ref_xys: np.ma.MaskedArray, old_references: RefTable, + new_references: RefTable) -> None: + xy = [tuple(masked_ref_xys[old_references['starid'] == ref['starid']].data[0]) for ref in new_references] + self.gaussian_xys = np.array(xy) + + def clean_references(self, references: References = None) -> Tuple[References, float]: + if references is None: + references = self.references + + # Clean the references + masked_fwhms, masked_ref_xys, rsq_mask, masked_rsqs = self._clean_extracted_stars( + references.table['pixel_column'], + references.table['pixel_row']) + + + # Use R^2 to more robustly determine initial FWHM guess. + # This cleaning is good when we have FEW references. + fwhm, fwhm_clean_references = clean_with_rsq_and_get_fwhm( + masked_fwhms, masked_rsqs, references.table, min_fwhm_references=2, + min_references=self.min_references_abs, rsq_min=self.rsq_min) + logger.info('Initial FWHM guess is %f pixels', fwhm) + + # Final clean of wcs corrected references + logger.info("Number of references before final cleaning: %d", len(references.table)) + logger.debug('Masked R^2 values: %s', masked_rsqs[rsq_mask]) + # Get references cleaned and ordered by R^2: + ordered_cleaned_references, order_index = get_clean_references(references.table, masked_rsqs, rsq_ideal=0.8) + ordered_cleaned_references = References(table=ordered_cleaned_references, + coords=references.coords[order_index], + xy=references.xy[order_index]) + logger.info("Number of references after final cleaning: %d", len(ordered_cleaned_references.table)) + + # Save Gaussian XY positions before returning + self.set_gaussian_xys(masked_ref_xys, references.table, ordered_cleaned_references.table) + return ordered_cleaned_references, fwhm + + def make_sep_clean_references(self) -> References: + """ + Make a clean reference catalog using SExtractor. + """ + image = self.image + sep_references = use_sep(image) + + # Clean extracted stars + _, masked_sep_xy, sep_mask, masked_sep_rsqs = self._clean_extracted_stars( + sep_references.table['x'], sep_references.table['y']) + + sep_references_clean = References(table=masked_sep_xy, mask=sep_mask) + + return sep_references_clean + + def mask_edge_and_target(self, target_coords: SkyCoord, hsize: int = 10, + target_distance_lim: u.quantity.Quantity = 10 * u.arcsec) -> References: + """ + Clean the references by removing references that are too close to the target. + """ + image_shape = self.image.shape + + # Make mask + mask = (target_coords.separation(self.references.coords) > target_distance_lim) & ( + self.references.table['pixel_column'] > hsize) & (self.references.table['pixel_column'] < (image_shape[1] - 1 - hsize)) & ( + self.references.table['pixel_row'] > hsize) & (self.references.table['pixel_row'] < (image_shape[0] - 1 - hsize)) + self.references.mask = mask + + # Make new clean references + return References(table=self.references.masked, mask=mask, + coords=self.references.coords[mask], + xy=self.references.xy[mask]) diff --git a/flows/result_model.py b/flows/result_model.py new file mode 100644 index 0000000..34af49a --- /dev/null +++ b/flows/result_model.py @@ -0,0 +1,72 @@ +from astropy.table import Table +import astropy.units as u +from .load_image import FlowsImage + + +class ResultsTable(Table): + + def add_column_descriptions(self): + # Descriptions of columns: + self['used_for_epsf'].description = 'Was object used for building ePSF?' + self['mag'].description = 'Measured magnitude' + self['mag'].unit = u.mag + self['mag_error'].description = 'Error on measured magnitude' + self['mag_error'].unit = u.mag + self['flux_aperture'].description = 'Measured flux using aperture photometry' + self['flux_aperture'].unit = u.count / u.second + self['flux_aperture_error'].description = 'Error on measured flux using aperture photometry' + self['flux_aperture_error'].unit = u.count / u.second + self['flux_psf'].description = 'Measured flux using PSF photometry' + self['flux_psf'].unit = u.count / u.second + self['flux_psf_error'].description = 'Error on measured flux using PSF photometry' + self['flux_psf_error'].unit = u.count / u.second + self['pixel_column'].description = 'Location on image pixel columns' + self['pixel_column'].unit = u.pixel + self['pixel_row'].description = 'Location on image pixel rows' + self['pixel_row'].unit = u.pixel + self['pixel_column_psf_fit'].description = 'Measured location on image pixel columns from PSF photometry' + self['pixel_column_psf_fit'].unit = u.pixel + self[ + 'pixel_column_psf_fit_error'].description = 'Error on measured location on image pixel columns from PSF photometry' + self['pixel_column_psf_fit_error'].unit = u.pixel + self['pixel_row_psf_fit'].description = 'Measured location on image pixel rows from PSF photometry' + self['pixel_row_psf_fit'].unit = u.pixel + self[ + 'pixel_row_psf_fit_error'].description = 'Error on measured location on image pixel rows from PSF photometry' + self['pixel_row_psf_fit_error'].unit = u.pixel + + def add_metadata(tab): + raise NotImplementedError() + # # Meta-data: + # tab.meta['fileid'] = fileid + # tab.meta['target_name'] = target_name + # tab.meta['version'] = __version__ + # tab.meta['template'] = None if datafile.get('template') is None else datafile['template']['fileid'] + # tab.meta['diffimg'] = None if datafile.get('diffimg') is None else datafile['diffimg']['fileid'] + # tab.meta['photfilter'] = photfilter + # tab.meta['fwhm'] = fwhm * u.pixel + # tab.meta['pixel_scale'] = pixel_scale * u.arcsec / u.pixel + # tab.meta['seeing'] = (fwhm * pixel_scale) * u.arcsec + # tab.meta['obstime-bmjd'] = float(image.obstime.mjd) + # tab.meta['zp'] = zp_bs + # tab.meta['zp_error'] = zp_error_bs + # tab.meta['zp_diff'] = np.abs(zp_bs - zp) + # tab.meta['zp_error_weights'] = zp_error + # tab.meta['head_wcs'] = head_wcs # TODO: Are these really useful? + # tab.meta['used_wcs'] = used_wcs # TODO: Are these really useful? + + @classmethod + def make_results_table(cls, ref_table: Table, apphot_tbl: Table, psfphot_tbl: Table, image: FlowsImage): + results_table = cls(ref_table) + if len(ref_table) - len(apphot_tbl) == 1: + results_table.add_row(0) + results_table['flux_aperture'] = apphot_tbl['flux_aperture'] / image.exptime + results_table['flux_aperture_error'] = apphot_tbl['flux_aperture_error'] / image.exptime + results_table['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime + results_table['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime + results_table['pixel_column_psf_fit'] = psfphot_tbl['x_fit'] + results_table['pixel_row_psf_fit'] = psfphot_tbl['y_fit'] + results_table['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc'] + results_table['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc'] + + return results_table diff --git a/flows/target.py b/flows/target.py new file mode 100644 index 0000000..5be13e1 --- /dev/null +++ b/flows/target.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +from typing import Optional, Dict + +import numpy as np +from numpy.typing import ArrayLike, NDArray +from astropy.coordinates import SkyCoord +from astropy.wcs import WCS + + +@dataclass +class Target: + ra: float + dec: float + name: Optional[str] = None + id: Optional[int] = None # Target id from Flows database + photfilter: Optional[str] = None # Defined if target is associated with an image. + coords: Optional[SkyCoord] = None + pixel_column: Optional[int] = None + pixel_row: Optional[int] = None + + def __post_init__(self): + if self.coords is None: + self.coords = SkyCoord(ra=self.ra, dec=self.dec, unit='deg', frame='icrs') + + def calc_pixels(self, wcs: WCS) -> None: + pixels = np.array(wcs.all_world2pix(self.ra, self.dec, 1)).T + self._add_pixel_coordinates(pixel_pos=pixels) + + def _add_pixel_coordinates(self, pixel_column: Optional[int] = None, pixel_row: Optional[int] = None, + pixel_pos: Optional[ArrayLike] = None) -> None: + """ + Add pixel coordinates to target. + """ + if pixel_column is None or pixel_row is None: + if pixel_pos is None: + raise ValueError('Either pixel_column, pixel_row or pixel_pos must be provided.') + pixel_column, pixel_row = pixel_pos + + self.pixel_column = pixel_column + self.pixel_row = pixel_row + + + def output_dict(self, starid: Optional[int] = 0) -> Dict: + """ + Return target as output dictionary. starid = -1 means difference image. + """ + return {'starid': starid, 'ra': self.ra, 'decl': self.dec, 'pixel_column': self.pixel_column, + 'pixel_row': self.pixel_row, 'used_for_epsf': False} diff --git a/optional-requirements.txt b/optional-requirements.txt new file mode 100644 index 0000000..fd1f650 --- /dev/null +++ b/optional-requirements.txt @@ -0,0 +1,2 @@ +vtk +git+https://github.com/obscode/imagematch.git@photutils#egg=imagematch \ No newline at end of file diff --git a/run_photometry.py b/run_photometry.py index f7042ae..cea7dd3 100644 --- a/run_photometry.py +++ b/run_photometry.py @@ -9,6 +9,7 @@ import shutil import functools import multiprocessing +import tqdm from tendrils import api, utils from flows import photometry @@ -43,8 +44,9 @@ def process_fileid(fid, output_folder_root=None, attempt_imagematch=True, autoup logger.addHandler(_filehandler) logger_warn.addHandler(_filehandler) - photfile = photometry(fileid=fid, output_folder=output_folder, attempt_imagematch=attempt_imagematch, - keep_diff_fixed=keep_diff_fixed, cm_timeout=cm_timeout) + photfile = photometry(fileid=fid, cm_timeout=cm_timeout, make_plots=True) + # photfile = photometry(fileid=fid, output_folder=output_folder, attempt_imagematch=attempt_imagematch, + # keep_diff_fixed=keep_diff_fixed, cm_timeout=cm_timeout) except (SystemExit, KeyboardInterrupt): logger.error("Aborted by user or system.") @@ -158,12 +160,14 @@ def main(): if threads > 1: # Disable printing info messages from the parent function. # It is going to be all jumbled up anyway. - logger.setLevel(logging.WARNING) + if not args.debug: + logger.setLevel(logging.WARNING) # Don't silence if debugging. # There is more than one area to process, so let's start # a process pool and process them in parallel: with multiprocessing.Pool(threads) as pool: - for res in pool.imap_unordered(process_fileid_wrapper, fileids): + for result in tqdm.tqdm(pool.imap_unordered(process_fileid_wrapper, fileids), total=len(fileids)): + # We can do something with the partial results here (like calculate color terms!). pass else: diff --git a/tests/test_photometry.py b/tests/test_photometry.py index 3bab9cb..95f62d5 100644 --- a/tests/test_photometry.py +++ b/tests/test_photometry.py @@ -16,6 +16,9 @@ def test_import_photometry(): pass +def test_photometry_calc_mag_error(): + pass + # assert photometry # --------------------------------------------------------------------------------------------------