diff --git a/.github/workflows/autolabel.yml b/.github/workflows/autolabel.yml index c76d8a6e..637c6db1 100644 --- a/.github/workflows/autolabel.yml +++ b/.github/workflows/autolabel.yml @@ -10,5 +10,5 @@ jobs: - uses: Naturalclar/issue-action@v2.0.2 with: title-or-body: "both" - parameters: '[ {"keywords": ["NIRCam Stage 1/2 Pipeline"], "labels": ["NIRCam"], "assignees": ["AarynnCarter","mperrin","JarronL"]}, {"keywords": ["MIRI Stage 1/2 Pipeline"], "labels": ["MIRI"], "assignees": ["AarynnCarter","mperrin","JarronL"]}, {"keywords": ["Image Tools"], "labels": ["image tools"], "assignees": ["kglidic","kammerje","AarynnCarter","wbalmer","kdlawson","JarronL"]}, {"keywords": ["PSF Subtraction"], "labels": ["PSF subtraction"], "assignees": ["kdlawson","wbalmer","AarynnCarter"]}, {"keywords": ["Analysis Tools"], "labels": ["analysis tools"], "assignees": ["kdlawson","wbalmer","AarynnCarter","juliengirard","kammerje","kglidic"]}, {"keywords": ["Other"], "labels": [], "assignees": ["kdlawson","AarynnCarter","juliengirard","JarronL","kglidic"]}, {"keywords": ["Documentation"], "labels": ["documentation"], "assignees": ["AarynnCarter","mperrin","kglidic"]}]' + parameters: '[ {"keywords": ["NIRCam Stage 1/2 Pipeline"], "labels": ["NIRCam"], "assignees": ["AarynnCarter","mperrin","JarronL"]}, {"keywords": ["MIRI Stage 1/2 Pipeline"], "labels": ["MIRI"], "assignees": ["AarynnCarter","mperrin","JarronL"]}, {"keywords": ["Image Tools"], "labels": ["image tools"], "assignees": ["kglidic","kammerje","AarynnCarter","wbalmer","kdlawson","JarronL"]}, {"keywords": ["PSF Subtraction"], "labels": ["PSF subtraction"], "assignees": ["kdlawson","wbalmer","AarynnCarter"]}, {"keywords": ["Analysis Tools"], "labels": ["analysis tools"], "assignees": ["kdlawson","wbalmer","AarynnCarter","juliengirard","kammerje","kglidic"]}, {"keywords": ["Other"], "labels": ["other"], "assignees": ["kdlawson","AarynnCarter","juliengirard","JarronL","kglidic"]}, {"keywords": ["Documentation"], "labels": ["documentation"], "assignees": ["AarynnCarter","mperrin","kglidic"]}]' github-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.gitignore b/.gitignore index 9bb521ff..616adc87 100644 --- a/.gitignore +++ b/.gitignore @@ -213,4 +213,9 @@ tests/ultranest tests/species_* # setuptools-scm + +# PyCharm files +.idea/ + + spaceKLIP/_version.py diff --git a/README.rst b/README.rst index 30c2b75e..da93c9af 100644 --- a/README.rst +++ b/README.rst @@ -37,15 +37,11 @@ With the Anaconda environment created, move to the cloned directory and install :: cd where/you/saved/the/git/repo + conda install conda-forge::git-lfs pip install -r requirements.txt pip install -e . -NEW AS OF 1 JUNE 2023: you also need to make the custom PSF mask files before running spaceKLIP for the first time: - -:: - - cd spaceKLIP/ - python make_psfmasks.py +Note that installing git-lfs from pip does not work as of 25 June 2024. Finally, and very importantly, you will need to download the reference files and set the environment variables supporting the functioning of :code:`webbpsf` and :code:`webbpsf_ext`. Instructions to do this can be found at the respective package websites (`WebbPSF `_, `WebbPSF_ext `_). Ensure that if you edit your .bashrc file, close and reopen your terminal to fully apply the changes (:code:`source ~/.bashrc` or :code:`source ~/.zshrc` may also work). diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e203c4f..409e6ba8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,6 @@ -r requirements.txt -sphinx -pandoc -nbsphinx -sphinx-rtd-theme -sphinx_automodapi \ No newline at end of file +sphinx>=7.2.6 +pandoc>=2.3 +nbsphinx>=0.9.3 +sphinx-rtd-theme>=2.0.0 +sphinx_automodapi>=0.17.0 diff --git a/requirements.txt b/requirements.txt index 55addc90..8ff33cfb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,12 @@ -astropy -astroquery -jwst +astropy>=6.0.1 +astroquery>=0.4.7 +jwst>=1.14.0 matplotlib>=3.7.1 numpy<2 -synphot -stsynphot -scipy -stsci.stimage==0.2.6 -webbpsf +synphot>=1.4.0 +stsynphot>=1.3.0 +scipy>=1.13.0 +stsci.stimage>=0.2.9 +webbpsf>=1.2.1 webbpsf_ext>=1.2.0 -pyklip @ git+https://bitbucket.org/pyKLIP/pyklip.git@jwst +pyklip>=2.7.1 diff --git a/spaceKLIP/analysistools.py b/spaceKLIP/analysistools.py index d4d35e17..d4422751 100644 --- a/spaceKLIP/analysistools.py +++ b/spaceKLIP/analysistools.py @@ -19,6 +19,9 @@ import numpy as np import copy +import corner +import emcee +import matplotlib.patheffects as PathEffects import pyklip.fakes as fakes import pyklip.fitpsf as fitpsf import pyklip.fm as fm @@ -27,14 +30,14 @@ from pyklip import klip, parallelized from pyklip.instruments.JWST import JWSTData -from scipy.ndimage import gaussian_filter, rotate +from scipy.ndimage import fourier_shift, gaussian_filter, rotate from scipy.ndimage import shift as spline_shift from scipy.interpolate import interp1d from spaceKLIP import utils as ut from spaceKLIP.psf import get_offsetpsf, JWST_PSF from spaceKLIP.starphot import get_stellar_magnitudes, read_spec_file from spaceKLIP.pyklippipeline import get_pyklip_filepaths -from spaceKLIP.utils import write_starfile, set_surrounded_pixels +from spaceKLIP.utils import write_starfile, set_surrounded_pixels, pop_pxar_kw from webbpsf.constants import JWST_CIRCUMSCRIBED_DIAMETER @@ -171,7 +174,10 @@ def raw_contrast(self, # Compute the pixel area in steradian. pxsc_arcsec = self.database.red[key]['PIXSCALE'][j] # arcsec pxsc_rad = pxsc_arcsec / 3600. / 180. * np.pi # rad - pxar = pxsc_rad**2 # sr + pxar = self.database.red[key]['PIXAR_SR'][j] # sr + if np.isnan(pxar): + log.warning("PIXAR_SR not found in database, falling back to use PIXSCALE") + pxar = pxsc_rad**2 # sr # Convert the host star brightness from vegamag to MJy. Use an # unocculted model PSF whose integrated flux is normalized to @@ -386,8 +392,9 @@ def raw_contrast(self, for i, klmode in enumerate(klmodes): columns.append(cons[i]) names.append(f'contrast, N_kl={klmode}') - columns.append(cons_mask[i]) - names.append(f'contrast+mask, N_kl={klmode}') + if mask is not None: + columns.append(cons_mask[i]) + names.append(f'contrast+mask, N_kl={klmode}') results_table = Table(columns, names=names) results_table['separation'].unit = u.arcsec @@ -528,6 +535,7 @@ def calibrate_contrast(self, # Read Stage 2 files and make pyKLIP dataset filepaths, psflib_filepaths = get_pyklip_filepaths(self.database, key) + pop_pxar_kw(np.append(filepaths, psflib_filepaths)) pyklip_dataset = JWSTData(filepaths, psflib_filepaths) # Compute the resolution element. Account for possible blurring. @@ -833,6 +841,7 @@ def extract_companions(self, starfile, mstar_err, spectral_type='G2V', + planetfile=None, klmode='max', date='auto', use_fm_psf=True, @@ -841,6 +850,8 @@ def extract_companions(self, fitkernel='diag', subtract=True, inject=False, + remove_background=False, + save_preklip=False, overwrite=True, subdir='companions', **kwargs): @@ -865,6 +876,9 @@ def extract_companions(self, spectral_type : str, optional Host star spectral type for the stellar model SED. The default is 'G2V'. + planetfile : path + Path of VizieR VOTable containing companion photometry or two + column TXT file with wavelength (micron) and flux (Jy). klmode : int or 'max', optional KL mode for which the companions shall be extracted. If 'max', then the maximum possible KL mode will be used. The default is 'max'. @@ -892,6 +906,12 @@ def extract_companions(self, inject : bool, optional Instead of fitting for a companion at the guessed location and contrast, inject one into the data. + remove_background : bool, optional + Remove a constant background level from the KLIP-subtracted data + before fitting the FM PSF. The default is False. + save_preklip : bool, optional + Save the stage 2 files when injecting/killing a companion? The + default is False. overwrite : bool, optional If True, compute a new FM PSF and overwrite any existing one, otherwise try to load an existing one and only compute a new one if @@ -933,7 +953,10 @@ def extract_companions(self, # Compute the pixel area in steradian. pxsc_arcsec = self.database.red[key]['PIXSCALE'][j] # arcsec pxsc_rad = pxsc_arcsec / 3600. / 180. * np.pi # rad - pxar = pxsc_rad**2 # sr + pxar = self.database.red[key]['PIXAR_SR'][j] # sr + if np.isnan(pxar): + log.warning("PIXAR_SR not found in database, falling back to use PIXSCALE") + pxar = pxsc_rad**2 # sr # Compute the resolution element. Account for possible # blurring. @@ -954,11 +977,15 @@ def extract_companions(self, kwargs_temp['maxnumbasis'] = maxnumbasis # Initialize pyKLIP dataset. - dataset = JWSTData(filepaths, psflib_filepaths) + pop_pxar_kw(np.append(filepaths, psflib_filepaths)) + dataset = JWSTData(filepaths, psflib_filepaths, highpass=highpass) kwargs_temp['dataset'] = dataset kwargs_temp['aligned_center'] = dataset._centers[0] kwargs_temp['psf_library'] = dataset.psflib + # Make copy of the original pyKLIP dataset. + dataset_orig = copy.deepcopy(dataset) + # Get index of desired KL mode. klmodes = self.database.red[key]['KLMODES'][j].split(',') klmodes = np.array([int(temp) for temp in klmodes]) @@ -990,13 +1017,15 @@ def extract_companions(self, raise UserWarning('Data originates from unknown JWST instrument') else: raise UserWarning('Data originates from unknown telescope') - if 'planetfile' not in kwargs.keys() or kwargs['planetfile'] is None: - if starfile is not None and starfile.endswith('.txt'): - sed = read_spec_file(starfile) - else: - sed = None + if planetfile is None: + # Keep backward compatibility where planetfile was part of + # the kwargs. + if 'planetfile' in kwargs.keys() and kwargs['planetfile'] is not None: + planetfile = kwargs['planetfile'] + if planetfile is not None: + sed = read_spec_file(planetfile) else: - sed = read_spec_file(kwargs['planetfile']) + sed = None ww_sci = np.where(self.database.obs[key]['TYPE'] == 'SCI')[0] if date is not None: if date == 'auto': @@ -1065,9 +1094,10 @@ def extract_companions(self, output_dir_fm = os.path.join(output_dir_comp, 'KLIP_FM') if not os.path.exists(output_dir_fm): os.makedirs(output_dir_fm) - output_dir_pk = os.path.join(output_dir_comp, 'PREKLIP') - if not os.path.exists(output_dir_pk): - os.makedirs(output_dir_pk) + if save_preklip: + output_dir_pk = os.path.join(output_dir_comp, 'PREKLIP') + if not os.path.exists(output_dir_pk): + os.makedirs(output_dir_pk) # Offset PSF that is not affected by the coronagraphic # mask, but only the Lyot stop. @@ -1244,10 +1274,11 @@ def extract_companions(self, fileprefix='FM-' + key, annuli=annuli, subsections=subsections, - movement=1, + movement=1., numbasis=klmodes, maxnumbasis=maxnumbasis, calibrate_flux=False, + aligned_center=dataset._centers[0], psf_library=dataset.psflib, highpass=highpass_temp, mute_progression=True) @@ -1269,7 +1300,7 @@ def extract_companions(self, av_offsetpsf = np.average(rot_offsetpsfs, weights=sci_totinttime, axis=0) sx = av_offsetpsf.shape[1] sy = av_offsetpsf.shape[0] - + # Make sure that the model offset PSF has odd shape and # perform the required subpixel shift before inserting # it into the fm_frame. @@ -1289,11 +1320,11 @@ def extract_companions(self, # assign forward model kwargs if 'boxsize' not in kwargs.keys() or kwargs['boxsize'] is None: - boxsize = 31 + boxsize = 35 else: boxsize = kwargs['boxsize'] if 'dr' not in kwargs.keys() or kwargs['dr'] is None: - dr = 3 + dr = 5 else: dr = kwargs['dr'] if 'exclr' not in kwargs.keys() or kwargs['exclr'] is None: @@ -1301,15 +1332,15 @@ def extract_companions(self, else: exclr = kwargs['exclr']*resolution if 'xrange' not in kwargs.keys() or kwargs['xrange'] is None: - xrange = 2. + xrange = 3. else: xrange = kwargs['xrange'] if 'yrange' not in kwargs.keys() or kwargs['yrange'] is None: - yrange = 2. + yrange = 3. else: yrange = kwargs['yrange'] if 'frange' not in kwargs.keys() or kwargs['frange'] is None: - frange = [-1e-2, 1e2] # * guess_flux + frange = 2. #i.e. bounds=[guess_flux/(10.**frange),guess_flux*(10**frange)] else: frange = kwargs['frange'] if 'corr_len_range' not in kwargs.keys() or kwargs['corr_len_range'] is None: @@ -1317,14 +1348,92 @@ def extract_companions(self, else: corr_len_range = kwargs['corr_len_range'] if 'corr_len_guess' not in kwargs.keys() or kwargs['corr_len_guess'] is None: - corr_len_guess = 2. + corr_len_guess = 3. else: corr_len_guess = kwargs['corr_len_guess'] # Fit the FM PSF to the KLIP-subtracted data. if inject == False: + + # Remove a constant background level from the + # KLIP-subtracted data before fitting the FM PSF? + if remove_background: + + # Initialize pyKLIP FMAstrometry class. + fma = fitpsf.FMAstrometry(guess_sep=guess_sep, + guess_pa=guess_pa, + fitboxsize=boxsize) + fma.generate_fm_stamp(fm_image=fm_frame, + fm_center=[fm_centx, fm_centy], + padding=5) + fma.generate_data_stamp(data=data_frame, + data_center=[data_centx, data_centy], + dr=dr, + exclusion_radius=exclr) + corr_len_label = r'$l$' + fma.set_kernel(fitkernel, [corr_len_guess], [corr_len_label]) + fma.set_bounds(xrange, yrange, frange, [corr_len_range]) + + # Make sure that the noise map is invertible. + noise_map_max = np.nanmax(fma.noise_map) + fma.noise_map[np.isnan(fma.noise_map)] = noise_map_max + fma.noise_map[fma.noise_map == 0.] = noise_map_max + + # Set MCMC parameters from kwargs. + if 'nwalkers' not in kwargs.keys() or kwargs['nwalkers'] is None: + nwalkers = 50 + else: + nwalkers = kwargs['nwalkers'] + if 'nburn' not in kwargs.keys() or kwargs['nburn'] is None: + nburn = 100 + else: + nburn = kwargs['nburn'] + if 'nsteps' not in kwargs.keys() or kwargs['nsteps'] is None: + nsteps = 100 + else: + nsteps = kwargs['nsteps'] + if 'nthreads' not in kwargs.keys() or kwargs['nthreads'] is None: + nthreads = 4 + else: + nthreads = kwargs['nthreads'] + + # Run the MCMC fit. + chain_output = os.path.join(output_dir_kl, key + '-bka_chain_c%.0f' % (k + 1) + '.pkl') + fma.fit_astrometry(nwalkers=nwalkers, + nburn=nburn, + nsteps=nsteps, + numthreads=nthreads, + chain_output=chain_output) + + # Estimate the background level from those pixels + # in the KLIP-subtracted data which have a small + # flux in the best fit FM PSF. + if 'hsz' not in kwargs.keys() or kwargs['hsz'] is None: + hsz = 35 + else: + hsz = kwargs['hsz'] + stamp = data_frame.copy() + xp = int(round(data_centx - guess_dx)) + yp = int(round(data_centy + guess_dy)) + stamp = stamp[yp-hsz:yp+hsz+1, xp-hsz:xp+hsz+1] + psf = fm_frame.copy() + xp = int(round(fm_centx - guess_dx)) + yp = int(round(fm_centy + guess_dy)) + psf = psf[yp-hsz:yp+hsz+1, xp-hsz:xp+hsz+1] + xshift = -(fma.raw_RA_offset.bestfit - guess_dx) + yshift = fma.raw_Dec_offset.bestfit - guess_dy + yxshift = np.array([yshift, xshift]) + psf_shift = np.fft.ifftn(fourier_shift(np.fft.fftn(psf), yxshift)).real + con = fma.fit_flux.bestfit * guess_flux + res = stamp - fma.fit_flux.bestfit * psf_shift + thresh = np.nanmax(psf_shift) / 150. + bg = np.nanmedian(stamp[np.abs(psf_shift) < thresh]) + data_frame -= bg + # MCMC. if fitmethod == 'mcmc': + + # Initialize pyKLIP FMAstrometry class. fma = fitpsf.FMAstrometry(guess_sep=guess_sep, guess_pa=guess_pa, fitboxsize=boxsize) @@ -1344,9 +1453,7 @@ def extract_companions(self, fma.noise_map[np.isnan(fma.noise_map)] = noise_map_max fma.noise_map[fma.noise_map == 0.] = noise_map_max - # Run the MCMC fit. - - # set MCMC parameters from kwargs + # Set MCMC parameters from kwargs. if 'nwalkers' not in kwargs.keys() or kwargs['nwalkers'] is None: nwalkers = 50 else: @@ -1356,14 +1463,15 @@ def extract_companions(self, else: nburn = kwargs['nburn'] if 'nsteps' not in kwargs.keys() or kwargs['nsteps'] is None: - nsteps = 200 + nsteps = 100 else: nsteps = kwargs['nsteps'] if 'nthreads' not in kwargs.keys() or kwargs['nthreads'] is None: nthreads = 4 else: nthreads = kwargs['nthreads'] - + + # Run the MCMC fit. chain_output = os.path.join(output_dir_kl, key + '-bka_chain_c%.0f' % (k + 1) + '.pkl') fma.fit_astrometry(nwalkers=nwalkers, nburn=nburn, @@ -1533,14 +1641,44 @@ def extract_companions(self, # Otherwise. else: raise NotImplementedError() + + # Plot estimated background level. + if remove_background: + f, ax = plt.subplots(1, 4, figsize=(4 * 6.4, 4.8)) + p0 = ax[0].imshow(res, origin='lower') + c0 = plt.colorbar(p0, ax=ax[0]) + c0.set_label('Flux (arbitrary units)', rotation=270, labelpad=20) + text = ax[0].text(0.99, 0.99, 'Contrast = %.3e' % con, ha='right', va='top', transform=ax[0].transAxes) + text.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='white')]) + ax[0].set_title('Residuals before bg. subtraction') + p1 = ax[1].imshow(np.abs(psf_shift) < thresh, origin='lower') + c1 = plt.colorbar(p1, ax=ax[1]) + ax[1].set_title('Pixels used for bg. estimation') + ax[2].hist(stamp[np.abs(psf_shift) < thresh], bins=20) + ax[2].axvline(bg, ls='--', color='black', label='bg. = %.2f' % bg) + ax[2].set_xlabel('Pixel value') + ax[2].set_ylabel('Occurrence') + ax[2].legend(loc='upper right') + ax[2].set_title('Distribution of bg. pixels') + con = fma.fit_flux.bestfit * guess_flux + res = stamp - fma.fit_flux.bestfit * psf_shift + imgs = ax[0].get_images() + if len(imgs) > 0: + vmin, vmax = imgs[0].get_clim() + p3 = ax[3].imshow(res, origin='lower', vmin=vmin, vmax=vmax) + c3 = plt.colorbar(p3, ax=ax[3]) + c3.set_label('Flux (arbitrary units)', rotation=270, labelpad=20) + text = ax[3].text(0.99, 0.99, 'Contrast = %.3e' % con, ha='right', va='top', transform=ax[3].transAxes) + text.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='white')]) + ax[3].set_title('Residuals after bg. subtraction') + plt.tight_layout() + path = os.path.join(output_dir_kl, key + '-bgest_c%.0f' % (k + 1) + '.pdf') + plt.savefig(path) + plt.close() # Subtract companion before fitting the next one. if subtract or inject: - # Make copy of the original pyKLIP dataset. - if k == 0: - dataset_orig = copy.deepcopy(dataset) - # Subtract companion from pyKLIP dataset. Use offset # PSFs w/o high-pass filtering because this will be # applied by the klip_dataset routine below. @@ -1548,68 +1686,71 @@ def extract_companions(self, ra = companions[k][0] # arcsec dec = companions[k][1] # arcsec con = companions[k][2] - inputflux = con * np.array(all_offsetpsfs) # positive to inject companion + inputflux = con * np.array(all_offsetpsfs_nohpf) # positive to inject companion fileprefix = 'INJECTED-' + key else: ra = tab[-1]['RA'] # arcsec dec = tab[-1]['DEC'] # arcsec con = tab[-1]['CON'] - inputflux = -con * np.array(all_offsetpsfs) # negative to remove companion + inputflux = -con * np.array(all_offsetpsfs_nohpf) # negative to remove companion fileprefix = 'KILLED-' + key sep = np.sqrt(ra**2 + dec**2) / pxsc_arcsec # pix pa = np.rad2deg(np.arctan2(ra, dec)) # deg thetas = [pa + 90. - all_pa for all_pa in all_pas] - fakes.inject_planet(frames=dataset.input, centers=dataset.centers, inputflux=inputflux, astr_hdrs=dataset.wcs, radius=sep, pa=pa, thetas=np.array(thetas), field_dependent_correction=None) - - # Copy pre-KLIP files. - for filepath in filepaths: - src = filepath - dst = os.path.join(output_dir_pk, os.path.split(src)[1]) - shutil.copy(src, dst) - for psflib_filepath in psflib_filepaths: - src = psflib_filepath - dst = os.path.join(output_dir_pk, os.path.split(src)[1]) - shutil.copy(src, dst) + fakes.inject_planet(frames=dataset_orig.input, centers=dataset_orig.centers, inputflux=inputflux, astr_hdrs=dataset_orig.wcs, radius=sep, pa=pa, thetas=np.array(thetas), field_dependent_correction=None) - # Update content of pre-KLIP files. - filenames = dataset.filenames.copy() - for l, filename in enumerate(filenames): - filenames[l] = filename[:filename.find('_INT')] - for filepath in filepaths: - ww_file = filenames == os.path.split(filepath)[1] - file = os.path.join(output_dir_pk, os.path.split(filepath)[1]) - hdul = fits.open(file) - hdul['SCI'].data = dataset.input[ww_file] - hdul.writeto(file, output_verify='fix', overwrite=True) - hdul.close() - - # Update and write observations database. - temp = self.database.obs.copy() - for l in range(len(self.database.obs[key])): - file = os.path.split(self.database.obs[key]['FITSFILE'][l])[1] - self.database.obs[key]['FITSFILE'][l] = os.path.join(output_dir_pk, file) - file = os.path.split(self.database.red[key]['FITSFILE'][j])[1] - file = file[file.find('JWST'):file.find('-KLmodes-all')] - file = os.path.join(output_dir_fm, file + '.dat') - self.database.obs[key].write(file, format='ascii', overwrite=True) - self.database.obs = temp + if save_preklip: + + # Copy pre-KLIP files. + for filepath in filepaths: + src = filepath + dst = os.path.join(output_dir_pk, os.path.split(src)[1]) + shutil.copy(src, dst) + for psflib_filepath in psflib_filepaths: + src = psflib_filepath + dst = os.path.join(output_dir_pk, os.path.split(src)[1]) + shutil.copy(src, dst) + + # Update content of pre-KLIP files. + filenames = dataset_orig.filenames.copy() + for l, filename in enumerate(filenames): + filenames[l] = filename[:filename.find('_INT')] + for filepath in filepaths: + ww_file = filenames == os.path.split(filepath)[1] + file = os.path.join(output_dir_pk, os.path.split(filepath)[1]) + hdul = fits.open(file) + hdul['SCI'].data = dataset_orig.input[ww_file] + hdul.writeto(file, output_verify='fix', overwrite=True) + hdul.close() + + # Update and write observations database. + temp = self.database.obs.copy() + for l in range(len(self.database.obs[key])): + file = os.path.split(self.database.obs[key]['FITSFILE'][l])[1] + self.database.obs[key]['FITSFILE'][l] = os.path.join(output_dir_pk, file) + file = os.path.split(self.database.red[key]['FITSFILE'][j])[1] + file = file[file.find('JWST'):file.find('-KLmodes-all')] + file = os.path.join(output_dir_fm, file + '.dat') + self.database.obs[key].write(file, format='ascii', overwrite=True) + self.database.obs = temp # Reduce companion-subtracted data. mode = self.database.red[key]['MODE'][j] annuli = self.database.red[key]['ANNULI'][j] subsections = self.database.red[key]['SUBSECTS'][j] - parallelized.klip_dataset(dataset=dataset, + parallelized.klip_dataset(dataset=dataset_orig, mode=mode, outputdir=output_dir_fm, fileprefix=fileprefix, annuli=annuli, subsections=subsections, - movement=1, + movement=1., numbasis=klmodes, maxnumbasis=maxnumbasis, calibrate_flux=False, - psf_library=dataset.psflib, - highpass=False, + aligned_center=dataset_orig._centers[0], + psf_library=dataset_orig.psflib, + highpass=highpass_temp, verbose=False) head = fits.getheader(self.database.red[key]['FITSFILE'][j], 0) temp = os.path.join(output_dir_fm, fileprefix + '-KLmodes-all.fits') @@ -1617,13 +1758,13 @@ def extract_companions(self, hdul[0].header = head hdul.writeto(temp, output_verify='fix', overwrite=True) hdul.close() + + # Restore original pyKLIP dataset. + if subtract: + dataset = dataset_orig # Update source database. self.database.update_src(key, j, tab) - - # Restore original pyKLIP dataset. - if subtract: - dataset = dataset_orig pass diff --git a/spaceKLIP/coron1pipeline.py b/spaceKLIP/coron1pipeline.py index 0601115c..41fb7c6c 100644 --- a/spaceKLIP/coron1pipeline.py +++ b/spaceKLIP/coron1pipeline.py @@ -10,18 +10,24 @@ import pdb import sys -import astropy.io.fits as pyfits +import astropy.io.fits as fits import numpy as np from tqdm import trange from jwst.lib import reffile_utils +from jwst.stpipe import Step +from jwst import datamodels from jwst.datamodels import dqflags, RampModel, SaturationModel from jwst.pipeline import Detector1Pipeline, Image2Pipeline, Coron3Pipeline from .fnoise_clean import kTCSubtractStep, OneOverfStep - +from .expjumpramp import ExperimentalJumpRampStep from webbpsf_ext import robust +from scipy.interpolate import interp1d +from skimage.metrics import structural_similarity + +import warnings import logging log = logging.getLogger(__name__) log.setLevel(logging.INFO) @@ -70,8 +76,10 @@ def __init__(self, """ + self.step_defs['mask_groups'] = MaskGroupsStep self.step_defs['subtract_ktc'] = kTCSubtractStep self.step_defs['subtract_1overf'] = OneOverfStep + self.step_defs['experimental_jumpramp'] = ExperimentalJumpRampStep # Initialize Detector1Pipeline class. super(Coron1Pipeline_spaceKLIP, self).__init__(**kwargs) @@ -123,17 +131,17 @@ def process(self, input = self.run_step(self.group_scale, input) input = self.run_step(self.dq_init, input) input = self.run_step(self.saturation, input) - input = self.run_step(self.ipc, input) + #input = self.run_step(self.ipc, input) Not run for MIRI input = self.run_step(self.firstframe, input) input = self.run_step(self.lastframe, input) input = self.run_step(self.reset, input) input = self.run_step(self.linearity, input) input = self.run_step(self.rscd, input) input = self.run_step(self.dark_current, input) - input = self.do_refpix(input) - input = self.run_step(self.charge_migration, input) + input = self.run_step(self.refpix, input) + #input = self.run_step(self.charge_migration, input) Not run for MIRI input = self.run_step(self.jump, input) - # TODO: Include same / similar subtract_1overf step??? + input = self.run_step(self.mask_groups, input) else: input = self.run_step(self.group_scale, input) input = self.run_step(self.dq_init, input) @@ -147,6 +155,7 @@ def process(self, input = self.run_step(self.charge_migration, input) input = self.run_step(self.jump, input) input = self.run_step(self.subtract_ktc, input) + #1overf Only present in NIR data if 'groups' in self.stage_1overf: input = self.run_step(self.subtract_1overf, input) @@ -155,19 +164,28 @@ def process(self, self.save_model(input, suffix='ramp') # Run ramp fitting & gain scale correction. - res = self.run_step(self.ramp_fit, input, save_results=False) - rate, rateints = (res, None) if self.ramp_fit.skip else res + if not self.experimental_jumpramp.use: + # Use the default ramp fitting procedure + res = self.run_step(self.ramp_fit, input, save_results=False) + rate, rateints = (res, None) if self.ramp_fit.skip else res + else: + # Use the experimental fitting procedure + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + res = self.run_step(self.experimental_jumpramp, input) + rate, rateints = res + if self.rate_int_outliers and rateints is not None: # Flag additional outliers by comparing rateints and refit ramp input = self.apply_rateint_outliers(rateints, input) if input is None: - input, ints_model = (rate, rateints) + input, ints_model = rate, rateints else: res = self.run_step(self.ramp_fit, input, save_results=False) input, ints_model = (res, None) if self.ramp_fit.skip else res else: # input is the rate product, ints_model is the rateints product - input, ints_model = (rate, rateints) + input, ints_model = rate, rateints if input is None: self.ramp_fit.log.info('NoneType returned from ramp fitting. Gain scale correction skipped') @@ -661,14 +679,14 @@ def run_single_file(fitspath, output_dir, steps={}, verbose=False, **kwargs): # Skip dark current for subarray by default, but not full frame skip_dark = kwargs.get('skip_dark', None) if skip_dark is None: - hdr0 = pyfits.getheader(fitspath, ext=0) + hdr0 = fits.getheader(fitspath, ext=0) is_full_frame = 'FULL' in hdr0['SUBARRAY'] skip_dark = False if is_full_frame else True pipeline.dark_current.skip = skip_dark # Determine reference pixel correction parameters based on # instrument aperture name for NIRCam - hdr0 = pyfits.getheader(fitspath, 0) + hdr0 = fits.getheader(fitspath, 0) if hdr0['INSTRUME'] == 'NIRCAM': # Array of reference pixel borders [lower, upper, left, right] nb, nt, nl, nr = nrc_ref_info(hdr0['APERNAME'], orientation='sci') @@ -702,6 +720,12 @@ def run_single_file(fitspath, output_dir, steps={}, verbose=False, **kwargs): for key1 in steps.keys(): for key2 in steps[key1].keys(): setattr(getattr(pipeline, key1), key2, steps[key1][key2]) + + #Override jump & ramp if necessary + if pipeline.experimental_jumpramp.use: + log.info("Experimental jump/ramp fitting selected, regular jump and ramp will be skipped...") + pipeline.jump.skip = True + pipeline.ramp_fit.skip = True # Run Coron1Pipeline. Raise exception on error. # Ensure that pipeline is closed out. @@ -858,11 +882,13 @@ def run_obs(database, else: itervals = range(nkeys) + groupmaskflag = 0 # Set flag for group masking + skip_revert = False # Set flag for skipping a file # Loop through concatenations. for i in itervals: key = keys[i] if not quiet: log.info('--> Concatenation ' + key) - + # Loop through FITS files. nfitsfiles = len(database.obs[key]) jtervals = trange(nfitsfiles, desc='FITS files', leave=False) if quiet else range(nfitsfiles) @@ -875,17 +901,403 @@ def run_obs(database, if not quiet: log.info(' --> Coron1Pipeline: skipping non-stage 0 file ' + tail) continue + # Need to do some preparation steps for group masking before running pipeline + steps['mask_groups'] = steps.setdefault('mask_groups', {}) + if not steps['mask_groups']: + # If mask_groups unspecified or has no parameters, skip by default + steps['mask_groups']['skip'] = True + else: + # If mask_groups specified but skip isn't mentioned, set to False + steps['mask_groups'].setdefault('skip', False) + steps['mask_groups'].setdefault('mask_method', 'basic') + steps['mask_groups'].setdefault('types', ['REF', 'REF_BG']) + + # Check if we are skipping the mask_groups, if not run routine. + if not steps['mask_groups']['skip']: + if ('mask_array' not in steps['mask_groups']) and (groupmaskflag == 0): + # set a flag that we are running the group optimisation + groupmaskflag = 1 + + # Even if we are not skipping the routine, at the moment it only works on + # REF/REF_BG data, and don't want to run on unspecified file types + file_type = database.obs[key]['TYPE'][j] + this_skip = file_type not in steps['mask_groups']['types'] + if not this_skip and file_type not in ['REF', 'REF_BG']: + log.info(' --> Group masking only works for reference images at this time! Skipping...') + this_skip = True + + # Don't run function prep function if we don't need to + if not this_skip: + if steps['mask_groups']['mask_method'] == 'basic': + steps = prepare_group_masking_basic(steps, + database.obs[key], + quiet) + elif steps['mask_groups']['mask_method'] == 'advanced': + fitstype = database.obs[key]['TYPE'][j] + steps = prepare_group_masking_advanced(steps, + database.obs[key], + fitspath, + fitstype, + quiet) + else: + # Even though we are using mask_groups, this particular file will not have any groups masked + # Instruct to skip the step, but keep a record using skip_revert so we can undo for the next file. + steps['mask_groups']['skip'] = True + skip_revert = True + # Get expected output file name outfile_name = tail.replace('uncal.fits', 'rateints.fits') fitsout_path = os.path.join(output_dir, outfile_name) # Skip if file already exists and overwrite is False. if os.path.isfile(fitsout_path) and not overwrite: - if not quiet: log.info(' --> Coron1Pipeline: skipping already processed file ' + tail) + if not quiet: log.info(' --> Coron1Pipeline: skipping already processed file ' + + tail) else: if not quiet: log.info(' --> Coron1Pipeline: processing ' + tail) _ = run_single_file(fitspath, output_dir, steps=steps, verbose=verbose, **kwargs) + + if skip_revert: + # Need to make sure we don't skip later files if we just didn't want to mask_groups for this file + steps['mask_groups']['skip'] = False + skip_revert = False + + if (j == jtervals[-1]) and (groupmaskflag == 1): + '''This is the last file for this concatenation, and the groupmaskflag has been + set. This means we need to reset the mask_array back to original state, + which was that it didn't exist, so that the routine is rerun. ''' + groupmaskflag = 0 + + if steps['mask_groups']['mask_method'] == 'basic': + del steps['mask_groups']['mask_array'] + elif steps['mask_groups']['mask_method'] == 'advanced': + del steps['mask_groups']['maxgrps_faint'] + del steps['mask_groups']['maxgrps_bright'] + # Update spaceKLIP database. database.update_obs(key, j, fitsout_path) + +def prepare_group_masking_basic(steps, observations, quiet=False): + + if 'mask_array' not in steps['mask_groups']: + '''First time in the file loop, or groups_to_mask has been preset, + run the optimisation and set groups to mask. ''' + if not quiet: + log.info(' --> Coron1Pipeline: Optimizing number of groups to mask in ramp,' + ' this make take a few minutes.') + + if 'cropwidth' not in steps['mask_groups']: + steps['mask_groups']['cropwidth'] = 20 + if 'edgewidth' not in steps['mask_groups']: + steps['mask_groups']['edgewidth'] = 10 + + # Get crop width, part of image we care about + crop = steps['mask_groups']['cropwidth'] + edge = steps['mask_groups']['edgewidth'] + + # Get our cropped science frames and reference cubes + sci_frames = [] + ref_cubes = [] + nfitsfiles = len(observations) + for j in range(nfitsfiles): + if observations['TYPE'][j] == 'SCI': + with fits.open(os.path.abspath(observations['FITSFILE'][j])) as hdul: + sci_frame = hdul['SCI'].data[:, -1, :, :].astype(float) + + # Subtract a median so we focus on brightest pixels + sci_frame -= np.nanmedian(sci_frame, axis=(1,2), keepdims=True) + + # Crop around CRPIX + crpix_x, crpix_y = hdul["SCI"].header["CRPIX1"], hdul["SCI"].header["CRPIX2"] + xlo = int(crpix_x) - crop + xhi = int(crpix_x) + crop + ylo = int(crpix_y) - crop + yhi = int(crpix_y) + crop + sci_frame = sci_frame[:, ylo:yhi, xlo:xhi] + + # Now going to set the core to 0, so we focus less on the highly variable + # PSF core + sci_frame[:, edge:-edge, edge:-edge] = np.nan + + sci_frames.append(sci_frame) + elif observations['TYPE'][j] == 'REF': + with fits.open(os.path.abspath(observations['FITSFILE'][j])) as hdul: + ref_cube = hdul['SCI'].data.astype(float) + ref_shape = ref_cube.shape + + # Subtract a median so we focus on brightest pixels + ref_cube -= np.nanmedian(ref_cube, axis=(2,3), keepdims=True) + + # Crop around CRPIX + crpix_x, crpix_y = hdul["SCI"].header["CRPIX1"], hdul["SCI"].header["CRPIX2"] + xlo = int(crpix_x) - crop + xhi = int(crpix_x) + crop + ylo = int(crpix_y) - crop + yhi = int(crpix_y) + crop + ref_cube = ref_cube[:, :, ylo:yhi, xlo:xhi] + + # Now going to set the core to 0, so we focus less on the highly variable + # PSF core + ref_cube[:, :, edge:-edge, edge:-edge] = np.nan + + ref_cubes.append(ref_cube) + + # Want to check against every integration of every science dataset to find whichever + # matches the best, then use that for the scaling. + max_grp_to_use = [] + for sci_i, sci_frame in enumerate(sci_frames): + for int_i, sci_last_group in enumerate(sci_frame): + # Compare every reference group to this integration + best_diff = np.inf + for ref_cube in ref_cubes: + this_cube_diffs = [] + for ref_int in ref_cube: + this_int_diffs = [] + for ref_group in ref_int: + diff = np.abs(np.nansum(ref_group)-np.nansum(sci_last_group)) + this_int_diffs.append(diff) + this_cube_diffs.append(this_int_diffs) + + # Is the median of these diffs better that other reference cubes? + if np.nanmin(this_cube_diffs) < best_diff: + # If yes, this reference cube is a better match to the science + best_diff = np.nanmin(this_cube_diffs) + best_maxgrp = np.median(np.argmin(this_cube_diffs, axis=1)) + max_grp_to_use.append(best_maxgrp) + + # Assemble array of groups to mask, starting one above the max group + final_max_grp_to_use = int(np.nanmedian(max_grp_to_use)) + groups_to_mask = np.arange(final_max_grp_to_use+1, ref_cubes[0].shape[1]) + + # Make the mask array + mask_array = np.zeros(ref_shape, dtype=bool) + mask_array[:,groups_to_mask,:,:] = 1 + + # Assign to steps so this stage doesn't get repeated. + steps['mask_groups']['mask_array'] = mask_array + print(mask_array) + + return steps + +def prepare_group_masking_advanced(steps, observations, refpath, reftype, quiet=False): + + ''' + Advanced group masking method which computes the group mask on a pixel by pixel + and reference cube by reference cube basis + ''' + + if 'cropwidth' not in steps['mask_groups']: + steps['mask_groups']['cropwidth'] = 30 + if 'edgewidth' not in steps['mask_groups']: + steps['mask_groups']['edgewidth'] = 20 + if 'threshold' not in steps['mask_groups']: + steps['mask_groups']['threshold'] = 85 + + # Get crop width, part of image we care about + crop = steps['mask_groups']['cropwidth'] + edge = steps['mask_groups']['edgewidth'] + threshold = steps['mask_groups']['threshold'] + + + if ('maxgrps_faint' not in steps['mask_groups'] or + 'maxgrps_bright' not in steps['mask_groups'] or + 'cropmask' not in steps['mask_groups'] or + 'refbg_maxcounts' not in steps['mask_groups']): + '''First time in the file loop, or groups_to_mask has been preset, + run the optimisation and set groups to mask. ''' + + sci_frames = [] + sci_crpixs = [] + ref_cubes = [] + ref_crpixs = [] + + nfitsfiles = len(observations) + for j in range(nfitsfiles): + if observations['TYPE'][j] == 'SCI': + with fits.open(os.path.abspath(observations['FITSFILE'][j])) as hdul: + sci_frame = hdul['SCI'].data[:, -1, :, :].astype(float) + sci_crpix_x, sci_crpix_y = hdul["SCI"].header["CRPIX1"], hdul["SCI"].header["CRPIX2"] + sci_frames.append(sci_frame) + sci_crpixs.append([sci_crpix_x, sci_crpix_y]) + elif observations['TYPE'][j] == 'REF': + with fits.open(os.path.abspath(observations['FITSFILE'][j])) as hdul: + ref_cube = hdul['SCI'].data.astype(float) + ref_crpix_x, ref_crpix_y = hdul["SCI"].header["CRPIX1"], hdul["SCI"].header["CRPIX2"] + ref_cubes.append(ref_cube) + ref_crpixs.append([ref_crpix_x, ref_crpix_y]) + + # Crop and median science + sci_frames_cropped = [] + for i, scif in enumerate(sci_frames): + crpix_x, crpix_y = sci_crpixs[i] + xlo = int(crpix_x) - crop + xhi = int(crpix_x) + crop + ylo = int(crpix_y) - crop + yhi = int(crpix_y) + crop + sci_frames_cropped.append(scif[:, ylo:yhi, xlo:xhi]) + + sci_frames_modified = np.nanmedian(sci_frames_cropped, axis=1) #Median over integrations + + # Crop and median reference + ref_cubes_cropped = [] + for i, refc in enumerate(ref_cubes): + crpix_x, crpix_y = ref_crpixs[i] + xlo = int(crpix_x) - crop + xhi = int(crpix_x) + crop + ylo = int(crpix_y) - crop + yhi = int(crpix_y) + crop + ref_cubes_cropped.append(refc[:, :, ylo:yhi, xlo:xhi]) + + ref_cubes_modified = np.nanmedian(ref_cubes_cropped, axis=1) #Median over integrations + + # Median subtract the images + sci_frames_medsub = sci_frames_modified - np.nanmedian(sci_frames_modified, axis=(1,2), keepdims=True) + ref_cubes_medsub = ref_cubes_modified - np.nanmedian(ref_cubes_modified, axis=(2,3), keepdims=True) + + # Flatten array and find indices above percentile threshold value + sci_frames_flat = np.reshape(sci_frames_medsub, (sci_frames_medsub.shape[0], -1)) + per = np.percentile(sci_frames_flat, [threshold]) + above_threshold_indices = np.where(sci_frames_medsub > per) + + # Create an empty mask array + mask = np.zeros_like(sci_frames_medsub, dtype=bool) + + # Define function to expand indices and update mask + def expand_and_update_mask(indices, mask, xpad=2, ypad=2): + for z, x, y in zip(*indices): + mask[z, max(0, x-xpad):min(mask.shape[1], x+xpad), max(0, y-ypad):min(mask.shape[2], y+ypad)] = True + + # Expand indices and update mask for each 2D slice + for z_slice in range(sci_frames_medsub.shape[0]): + indices_slice = np.where(above_threshold_indices[0] == z_slice) + expand_and_update_mask((above_threshold_indices[0][indices_slice], above_threshold_indices[1][indices_slice], above_threshold_indices[2][indices_slice]), mask) + + # Okay now make some hollowed out cropped frames, to focus on fainter, but still bright, pixels + sci_frames_hollow = sci_frames_medsub.copy() + sci_frames_hollow[:, edge:-edge, edge:-edge] = np.nan + sci_frames_hollow[~mask] = np.nan + + ref_cubes_hollow = ref_cubes_medsub.copy() + ref_cubes_hollow[:, :, edge:-edge, edge:-edge] = np.nan + + # Create a 4D mask with zeros for reference + ref_cubes_shape = ref_cubes_hollow.shape + mask_4d = np.zeros(ref_cubes_shape, dtype=bool) + mask_ref = np.tile(mask[0:1], (ref_cubes_shape[0],1,1)) + + for i in range(mask_4d.shape[1]): + temp = mask_4d[:,i,:,:] + temp[mask_ref] = True + mask_4d[:,i,:,:] = temp + ref_cubes_hollow[~mask_4d] = np.nan + + # Now run the routine to figure out which groups to mask + best_faint_maxgrps = [] + best_bright_maxgrps = [] + ref_peak_pixels = [] + for i, scif in enumerate(sci_frames_hollow): + for j, refc in enumerate(ref_cubes_hollow): + # Need to save the peak pixel from each reference, as we'll use this for + # the REF_BG frame interpolations + if i == 0: + ref_peak_pixels.append(np.nanmax(ref_cubes_medsub[j][-1])) + this_faint_diffs= [] + this_bright_diffs = [] + for refg in refc: + faint_diff = np.abs(np.nanmedian(refg)-np.nanmedian(scif)) + this_faint_diffs.append(faint_diff) + for refg in ref_cubes_medsub[j]: + bright_diff = np.abs(np.nanmax(refg)-np.nanmax(sci_frames_medsub[i])) + this_bright_diffs.append(bright_diff) + + best_faint_maxgrp = np.argmin(this_faint_diffs) + best_faint_maxgrps.append(best_faint_maxgrp) + + best_bright_maxgrp = np.argmin(this_bright_diffs) + best_bright_maxgrps.append(best_bright_maxgrp) + + maxgrps_faint = int(np.nanmedian(best_faint_maxgrps)) + maxgrps_bright = int(np.nanmedian(best_bright_maxgrps)) + + steps['mask_groups']['maxgrps_faint'] = maxgrps_faint + steps['mask_groups']['maxgrps_bright'] = maxgrps_bright + steps['mask_groups']['cropmask'] = mask + steps['mask_groups']['refbg_maxcounts'] = np.nanmedian(ref_peak_pixels) + + # Now read in the specific reference file we're looking at. + with fits.open(refpath) as hdul: + refshape = hdul['SCI'].data.shape + ref_ints_slice = hdul['SCI'].data[:,-1,:,:].astype(float) + ref_slice = np.nanmedian(ref_ints_slice, axis=0) + ref_slice -= np.nanmedian(ref_slice) + ref_crpix_x, ref_crpix_y = hdul["SCI"].header["CRPIX1"], hdul["SCI"].header["CRPIX2"] + + # Get the peak pixel count in the last group, only look at the PSF core + xlo = int(ref_crpix_x) - crop + xhi = int(ref_crpix_x) + crop + ylo = int(ref_crpix_y) - crop + yhi = int(ref_crpix_y) + crop + ref_slice_cropped = ref_slice[ylo:yhi, xlo:xhi] + + if 'BG' in reftype: + maxcounts = steps['mask_groups']['refbg_maxcounts'] + else: + maxcounts = np.nanmax(ref_slice_cropped) + + # Get the median pixel count in our mask area from earlier + ref_slice_hollow = ref_slice_cropped.copy() + ref_slice_hollow[edge:-edge, edge:-edge] = np.nan + all_mincounts = [] + for saved_mask in steps['mask_groups']['cropmask']: + ref_slice_masked = ref_slice_hollow.copy() + ref_slice_masked[~saved_mask] = np.nan + all_mincounts.append(np.nanmedian(ref_slice_hollow)) + mincounts = np.nanmedian(all_mincounts) + + # Now make an interpolation connecting counts to the number of groups to be masked + maxgrps_interp = interp1d([maxcounts, mincounts], + [steps['mask_groups']['maxgrps_bright'],steps['mask_groups']['maxgrps_faint']], + kind='linear', + bounds_error=False, + fill_value=(steps['mask_groups']['maxgrps_bright'],steps['mask_groups']['maxgrps_faint'])) + + # Now use the interpolation to set the mask array, zero values will be included in the ramp fit + mask_array = np.zeros(refshape, dtype=bool) + for ri in range(mask_array.shape[2]): + for ci in range(mask_array.shape[3]): + if ref_slice[ri, ci] >= mincounts: + # Determine number of groups + this_grps = int(maxgrps_interp(ref_slice[ri, ci])) + groups_to_mask = np.arange(this_grps+1, refshape[1]) + mask_array[:,groups_to_mask,ri,ci] = 1 + + # Assign the mask array to the steps dictionary + steps['mask_groups']['mask_array'] = mask_array + + return steps + +class MaskGroupsStep(Step): + """ + Mask particular groups prior to ramp fitting + """ + class_alias = "maskgroups" + + spec = """ + mask_sigma_med = float(default=3) #Only mask pixels Nsigma above the median + mask_window = integer(default=2) #Also mask pixels within N pixels of a masked pixel + """ + + def process(self, input): + """Mask particular groups prior to ramp fitting""" + with datamodels.open(input) as input_model: + datamodel = input_model.copy() + + # Set particular groups to DO_NOT_USE + datamodel.groupdq[self.mask_array] = 1 + + return datamodel + + + diff --git a/spaceKLIP/coron2pipeline.py b/spaceKLIP/coron2pipeline.py index a03a41cf..b071a85b 100644 --- a/spaceKLIP/coron2pipeline.py +++ b/spaceKLIP/coron2pipeline.py @@ -344,7 +344,7 @@ def run_obs(database, verbose=verbose, **kwargs) # Update spaceKLIP database. - database.update_obs(key, j, fitsout_path) + database.update_obs(key, j, fitsout_path, update_pxar=True) # Also process rate files? if do_rates: diff --git a/spaceKLIP/database.py b/spaceKLIP/database.py index 1fa3be1f..89204015 100644 --- a/spaceKLIP/database.py +++ b/spaceKLIP/database.py @@ -20,8 +20,8 @@ import webbpsf, webbpsf_ext from astropy.table import Table -from astroquery.svo_fps import SvoFps from jwst.pipeline import Detector1Pipeline, Image2Pipeline, Coron3Pipeline +from stdatamodels.jwst import datamodels from .utils import nircam_apname, get_nrcmask_from_apname, get_filter_info @@ -191,6 +191,7 @@ def read_jwst_s012_data(self, APERNAME = [] PPS_APER = [] PIXSCALE = [] # arcsec + PIXAR_SR = [] # sr BUNIT = [] CRPIX1 = [] # pix CRPIX2 = [] # pix @@ -274,6 +275,7 @@ def read_jwst_s012_data(self, raise UserWarning('Data originates from unknown telescope') BLURFWHM += [head.get('BLURFWHM', np.nan)] head = hdul['SCI'].header + PIXAR_SR += [head.get('PIXAR_SR', np.nan)] BUNIT += [head.get('BUNIT', 'NONE')] if cr_from_siaf: CRPIX1 += [ap.XSciRef] @@ -313,6 +315,7 @@ def read_jwst_s012_data(self, APERNAME = np.array(APERNAME) PPS_APER = np.array(PPS_APER) PIXSCALE = np.array(PIXSCALE) + PIXAR_SR = np.array(PIXAR_SR) BUNIT = np.array(BUNIT) CRPIX1 = np.array(CRPIX1) CRPIX2 = np.array(CRPIX2) @@ -410,6 +413,7 @@ def read_jwst_s012_data(self, 'APERNAME', 'PPS_APER', 'PIXSCALE', + 'PIXAR_SR', 'BUNIT', 'CRPIX1', 'CRPIX2', @@ -443,6 +447,7 @@ def read_jwst_s012_data(self, 'object', 'object', 'float', + 'float', 'object', 'float', 'float', @@ -481,10 +486,9 @@ def read_jwst_s012_data(self, maskfile = allpaths[ww][j].replace('.fits', '_psfmask.fits') if not os.path.exists(maskfile): if EXP_TYPE[ww][j] == 'NRC_CORON': - maskpath = APERNAME[ww][j] + '_' + FILTER[ww][j] + '.fits' - maskfile = os.path.join(maskbase, maskpath) - if not os.path.exists(maskfile): - maskfile = 'NONE' + pipeline = Detector1Pipeline() + input = datamodels.open(allpaths[ww][j]) + maskfile = pipeline.get_reference_file(input, 'psfmask') elif EXP_TYPE[ww][j] == 'MIR_4QPM' or EXP_TYPE[ww][j] == 'MIR_LYOT': if APERNAME[ww][j] == 'MIRIM_MASK1065': maskpath = 'JWST_MIRI_F1065C_transmission_webbpsf-ext_v2.fits' @@ -521,6 +525,7 @@ def read_jwst_s012_data(self, APERNAME[ww][j], PPS_APER[ww][j], PIXSCALE[ww][j], + PIXAR_SR[ww][j], BUNIT[ww][j], CRPIX1[ww][j], CRPIX2[ww][j], @@ -627,6 +632,7 @@ def read_jwst_s3_data(self, APERNAME = [] PPS_APER = [] PIXSCALE = [] # arcsec + PIXAR_SR = [] # sr MODE = [] ANNULI = [] SUBSECTS = [] @@ -723,6 +729,7 @@ def read_jwst_s3_data(self, BLURFWHM += [head.get('BLURFWHM', np.nan)] if TYPE[-1] == 'CORON3': head = hdul['SCI'].header + PIXAR_SR += [head.get('PIXAR_SR', np.nan)] BUNIT += [head.get('BUNIT', 'NONE')] if cr_from_siaf: CRPIX1 += [ap.XSciRef] @@ -753,6 +760,7 @@ def read_jwst_s3_data(self, APERNAME = np.array(APERNAME) PPS_APER = np.array(PPS_APER) PIXSCALE = np.array(PIXSCALE) + PIXAR_SR = np.array(PIXAR_SR) MODE = np.array(MODE) ANNULI = np.array(ANNULI) SUBSECTS = np.array(SUBSECTS) @@ -794,6 +802,7 @@ def read_jwst_s3_data(self, 'APERNAME', 'PPS_APER', 'PIXSCALE', + 'PIXAR_SR', 'MODE', 'ANNULI', 'SUBSECTS', @@ -823,6 +832,7 @@ def read_jwst_s3_data(self, 'object', 'object', 'float', + 'float', 'object', 'int', 'int', @@ -858,6 +868,7 @@ def read_jwst_s3_data(self, APERNAME[ww[j]], PPS_APER[ww[j]], PIXSCALE[ww[j]], + PIXAR_SR[ww[j]], MODE[ww[j]], ANNULI[ww[j]], SUBSECTS[ww[j]], @@ -1021,8 +1032,10 @@ def print_obs(self, else: print_tab.remove_columns(['TARG_RA', 'TARG_DEC', 'EXPSTART', 'APERNAME', 'PPS_APER', 'CRPIX1', 'CRPIX2', 'RA_REF', 'DEC_REF', 'FITSFILE', 'MASKFILE']) + print_tab['XOFFSET'] *= 1e3 print_tab['XOFFSET'] = np.round(print_tab['XOFFSET']) print_tab['XOFFSET'][print_tab['XOFFSET'] == 0.] = 0. + print_tab['YOFFSET'] *= 1e3 print_tab['YOFFSET'] = np.round(print_tab['YOFFSET']) print_tab['YOFFSET'][print_tab['YOFFSET'] == 0.] = 0. print_tab.pprint() @@ -1102,7 +1115,8 @@ def update_obs(self, yoffset=None, crpix1=None, crpix2=None, - blurfwhm=None): + blurfwhm=None, + update_pxar=False): """ Update the content of the observations database. @@ -1138,6 +1152,9 @@ def update_obs(self, blurfwhm : float, optional New FWHM for the Gaussian filter blurring (pix) for the observation to be updated. The default is None. + update_pxar : bool, optional + Update the pixel area column of the database based on the FITS file + header information? The default is False. Returns ------- @@ -1174,6 +1191,12 @@ def update_obs(self, self.obs[key]['FITSFILE'][index] = fitsfile if maskfile is not None: self.obs[key]['MASKFILE'][index] = maskfile + if update_pxar: + try: + pxar = pyfits.getheader(self.obs[key]['FITSFILE'][index], 'SCI')['PIXAR_SR'] + self.obs[key]['PIXAR_SR'][index] = pxar + except: + pass hdul.close() pass diff --git a/spaceKLIP/expjumpramp.py b/spaceKLIP/expjumpramp.py new file mode 100644 index 00000000..41e57b7d --- /dev/null +++ b/spaceKLIP/expjumpramp.py @@ -0,0 +1,1301 @@ +from __future__ import division + +from jwst.stpipe import Step +from jwst import datamodels +from jwst.datamodels import dqflags, RampModel +from stcal.ramp_fitting.utils import set_if_total_ramp +from jwst.ramp_fitting.ramp_fit_step import create_image_model, create_integration_model + +import astropy.io.fits as fits +import numpy as np +from scipy import special +import warnings + +from multiprocessing import Pool + +class ExperimentalJumpRampStep(Step): + """ + Experimental step based on Tim Brandt's jump detection + and ramp fitting algorithm. + """ + class_alias = "experimental_jumpramp" + + spec = """ + use = boolean(default=False) # Use regular pipeline by default + nproc = integer(default=4) # Number of processes to use + noise_scale = float(default=1) #Scale factor for noise estimate + nan_dnu = boolean(default=True) #Set do not use pixels to NaN? + """ + + def process(self, input): + """ Perform jump detection and ramp fitting + + Parameters + ---------- + input : JWST pipeline input + Appropriate input to JWST pipeline step, e.g. fits file name or preloaded + JWST datamodel. + + Returns + ------- + img_model : JWST datamodel + JWST datamodel equivalent to a "rate.fits" file. + int_model : JWST datamodel + JWST datamodel equivalent to a "rateints.fits" file. + + """ + with datamodels.RampModel(input) as input_model: + # Run the jump detection and ramp fitting + ramp_results = self.produce_ramp_images(input_model, self.noise_scale) + + # Update DQ arrays with jumps + input_model = self.update_groupdq(input_model, ramp_results['flagged_diffs']) + + # Convert DQ arrays to 3D + dq_ints = self.create_dq_ints(input_model) + + # Want to set do not use pixels to NaN + if self.nan_dnu: + ramp_results = self.set_dnu_to_nan(ramp_results, dq_ints) + + # Define integration info + integ_info = (ramp_results['sci'], + dq_ints, + ramp_results['var_poisson'], + ramp_results['var_rdnoise'], + ramp_results['err']) + + # Create integration model + int_model = create_integration_model(input_model, integ_info, input_model.int_times) + int_model.meta.bunit_data = 'DN/s' + int_model.meta.bunit_err = 'DN/s' + int_model.meta.cal_step.jump = 'COMPLETE' + int_model.meta.cal_step.ramp_fit = 'COMPLETE' + + # Define image info + im_sci, im_vpo, im_vrd, im_err = self.weighted_average(ramp_results['sci'], + ramp_results['var_poisson'], + ramp_results['var_rdnoise'], + ramp_results['err']) + image_info = (im_sci, + input_model.pixeldq, + im_vpo, + im_vrd, + im_err) + + # Create image model + img_model = create_image_model(input_model, image_info) + img_model.meta.bunit_data = 'DN/s' + img_model.meta.bunit_err = 'DN/s' + img_model.meta.cal_step.jump = 'COMPLETE' + img_model.meta.cal_step.ramp_fit = 'COMPLETE' + + return img_model, int_model + + def produce_ramp_images(self, datamodel, noise_scale): + """ + Function to assemble relevant data and run jump detection and ramp fitting. + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + noise_scale : float + Scale factor to apply to the readnoise + + Returns + ------- + results : dict + Dictionary containing equivalents of the integration level 'SCI', 'ERR', + 'VAR_POISSON', and 'VAR_RDNOISE' arrays. + + """ + + # Assemble covariance and differenced frames. + C = self.create_covar(datamodel) + diffs = self.create_diffs(datamodel, C) + + # Grab reference files and create uncertainties + gain = self.get_subarray_ref(datamodel, 'gain') + rn = self.get_subarray_ref(datamodel, 'readnoise') + sig = noise_scale*gain*rn/2**0.5 + + # Need to multiply data by gain for this method, as it works in electrons + # Must undo this at the end!!! + diffs *= gain + + # Get some values + ncols = datamodel.meta.subarray.xsize + nrows = datamodel.meta.subarray.ysize + + if '-seg' in datamodel.meta.filename: + # This is a multi-segment dataset + int_s = datamodel.meta.exposure.integration_start + int_e = datamodel.meta.exposure.integration_end + nints = (int_e - int_s) + 1 + else: + # This is a single file dataset + nints = datamodel.meta.exposure.nints + + # Define arrays to save the results to + rate_ints = np.empty([nints, nrows, ncols]) + uncert_ints = np.empty([nints, nrows, ncols]) + var_po_ints = np.empty([nints, nrows, ncols]) + var_rd_ints = np.empty([nints, nrows, ncols]) + + # Define array to flag which diff frames to use + all_diffs2use = self.create_alldiffs2use(datamodel, diffs.shape) + + # Loop over each integration, run one ramp at a time + for int_i in range(nints): + print('--> Fitting Integration {}/{}...'.format(int_i+1, nints)) + + # Get the diffs for this integration + int_diffs = diffs[int_i] + + # Get array to save which differences we will use + int_diffs2use = all_diffs2use[int_i] + + # Now loop over each column using multiprocessing + colrange = range(ncols) + with Pool(processes=self.nproc) as pool: + results = pool.map(jumpramp_column_helper, [(i, + int_diffs, + C, + sig, + int_diffs2use) for i in colrange]) + pool.close() + pool.join() + + # Unpack results into the arrays + for i, res in enumerate(results): + res_rate, res_uncert, res_poisson, res_rdnoise, int_diffs2use = res + rate_ints[int_i, :, i] = res_rate + uncert_ints[int_i, :, i] = res_uncert + var_po_ints[int_i, :, i] = res_poisson + var_rd_ints[int_i, :, i] = res_rdnoise + all_diffs2use[int_i, :, :, i] = int_diffs2use + + # Explictly remove the gain scaling + rate_ints /= gain + uncert_ints /= gain + var_po_ints /= gain**2 + var_rd_ints /= gain**2 + + results = {'sci':rate_ints, + 'err':uncert_ints, + 'var_poisson':var_po_ints, + 'var_rdnoise':var_rd_ints, + 'flagged_diffs':all_diffs2use} + + return results + + def create_covar(self, datamodel): + """ Create the covariance matrix + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + + Returns + ------- + C : Covar() object + Covariance matrix object + """ + + # Get readtimes array + dt = datamodel.meta.exposure.frame_time + ngroups = datamodel.meta.exposure.ngroups + readtimes = dt*(np.arange(ngroups)+1) + + # Produce covariance + C = Covar(readtimes) + + return C + + def create_diffs(self, datamodel, C): + """ Create the differenced frames + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + C : Covar() object + Covariance matrix object + + Returns + ------- + diffs : ndarray + Scaled differenced frames of a given set of ramp images. + + """ + im = datamodel.data + + # Difference images + diffs = im[:,1:,:,:]-im[:,:-1,:,:] + + # Scale using covariance + diffs /= C.delta_t[np.newaxis, :, np.newaxis, np.newaxis] + + return diffs + + def get_subarray_ref(self, datamodel, filename, trim=True): + """ Get a reference file and extract on subarray + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + filename : str + File name string for the given reference file, e.g. 'gain' or 'readnoise' + trim : bool + Boolean for trimming reference file to the subarray of the provided datamodel. + + Returns + ------- + subref : ndarray + Loaded reference file array. + + """ + + file = self.get_reference_file(datamodel, filename) + with fits.open(file) as hdul: + full = hdul['SCI'].data + + if trim == True: + # Trim full array to subarray for this data + xstrt = datamodel.meta.subarray.xstart-1 #SUBSTRT1 + ystrt = datamodel.meta.subarray.ystart-1 #SUBSTRT2 + xsize = datamodel.meta.subarray.xsize #SUBSIZE1 + ysize = datamodel.meta.subarray.ysize #SUBSIZE2 + subref = full[ystrt:ystrt+ysize, xstrt:xstrt+xsize] + else: + subref = full + + return subref + + def create_alldiffs2use(self, datamodel, shape): + """ + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + shape : list + Shape of the corresponding diffs array + + Returns + ------- + all_diffs2use : ndarray + Array flagging which diffs should be used in the jump detection and ramp fitting + + """ + + # Make initial diffs2use + all_diffs2use = np.ones(shape, dtype=np.uint8) + + # Want to preflag any do not use pixels + dnu = dqflags.pixel['DO_NOT_USE'] + + grp_dq = datamodel.groupdq.copy() # Don't adjust actual groupdq + dnu_loc = np.bitwise_and(grp_dq, dnu) # Locations of do not use pixels per group + dnu_mask = np.where(dnu_loc > 0) + good_mask = np.where(dnu_loc == 0) + + grp_dq[dnu_mask] = 0 # These are bad + grp_dq[good_mask] = 1 # These are good + + # Add rather than subtract diffs + dq_diffs = grp_dq[:,1:,:,:]+grp_dq[:,:-1,:,:] + + # By adding, any diffs with a value <2 have at least one DNU pixel, so shouldn't be used. + diff_mask = np.where(dq_diffs < 2) + + # Set the diff mask diffs to zero so that aren't used. + all_diffs2use[diff_mask] = 0 + + return all_diffs2use + + def update_groupdq(self, datamodel, flagged_diffs): + """ Update group DQ based on detected jumps + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + flagged_diffs : ndarray + Flagged array of size the same as differenced frames, with jumps marked. + + Returns + ------- + updated_datamodel : JWST datamodel + JWST ramp datamodel with updated groupdq + + """ + + # Create array to hold the jump information and define jump flag + dq_grp = datamodel.groupdq.copy() + jump = dqflags.pixel['JUMP_DET'] + + # Loop over each integration and create jump array + for int_i, integ in enumerate(flagged_diffs): + for i, diff in enumerate(integ): + # Locate the jumps in the differenced frames, Tim's code sets to 0. + jump_check = np.where(diff == 0) + + # If jump is in differenced frame N, then flag as a jump in real frames N and N+1 + this_dq_n = dq_grp[int_i, i] + this_dq_np1 = dq_grp[int_i, i+1] + + this_dq_n[jump_check] = np.bitwise_or(this_dq_n[jump_check], jump) + this_dq_np1[jump_check] = np.bitwise_or(this_dq_np1[jump_check], jump) + + dq_grp[int_i, i] = this_dq_n + dq_grp[int_i, i+1] = this_dq_np1 + + # Apply jump array to groupdq + datamodel.groupdq = dq_grp + + return datamodel + + def create_dq_ints(self, datamodel): + """ Turn the 2D and 4D pixel DQ to 3D DQ + + Parameters + ---------- + datamodel : JWST datamodel + JWST ramp datamodel + + Returns + ------- + dqints : ndarray + 3D data quality array, corresponding to each integration + + """ + + # Get 2D/4D arrays + dq_pix_base = datamodel.pixeldq + dq_grp = datamodel.groupdq + + # Define some DQ flags + sat = dqflags.pixel['SATURATED'] + jump = dqflags.pixel['JUMP_DET'] + dnu = dqflags.pixel['DO_NOT_USE'] + + # Define array to save values to + ncols = datamodel.meta.subarray.xsize + nrows = datamodel.meta.subarray.ysize + if '-seg' in datamodel.meta.filename: + # This is a multi-segment dataset + int_s = datamodel.meta.exposure.integration_start + int_e = datamodel.meta.exposure.integration_end + nints = (int_e - int_s) + 1 + else: + # This is a single file dataset + nints = datamodel.meta.exposure.nints + dq_ints = np.empty([nints, nrows, ncols], dtype=np.uint32) + + # Loop over integrations + for int_i in range(nints): + # Make copy of Pixel DQ + dq_pix = dq_pix_base.copy() + + # This integration DQ + dq_grp_int = dq_grp[int_i] + + # Check Saturated and Do Not Use + set_if_total_ramp(dq_pix, dq_grp_int, sat | dnu, dnu) + + # Assume complete saturation if group 0 saturated + dq_grp0_sat = np.bitwise_and(dq_grp_int[0], sat) + dq_pix[dq_grp0_sat != 0] = np.bitwise_or(dq_pix[dq_grp0_sat != 0], sat | dnu) + + # If jump occurs mark the appropriate flag. + jump_loc = np.bitwise_and(dq_grp_int, jump) + jump_check = np.where(jump_loc.sum(axis=0) > 0) + dq_pix[jump_check] = np.bitwise_or(dq_pix[jump_check], jump) + + # Save to main array + dq_ints[int_i] = dq_pix + + return dq_ints + + def set_dnu_to_nan(self, ramp_results, dq_ints): + ''' Set any do not use pixels to NaNs + + Parameters + ---------- + results : dict + Dictionary containing equivalents of the integration level 'SCI', 'ERR', + 'VAR_POISSON', and 'VAR_RDNOISE' arrays. + dq_ints : ndarray + 3D data quality array + + Returns + ------- + results : dict + Dictionary containing equivalents of the integration level 'SCI', 'ERR', + 'VAR_POISSON', and 'VAR_RDNOISE' arrays. + + ''' + + # Define do not use pixel check + dnu = dqflags.pixel['DO_NOT_USE'] + dnu_loc = np.bitwise_and(dq_ints, dnu) + dnu_check = np.where(dnu_loc > 0) + + # Assign NaNs to arrays + ramp_results['sci'][dnu_check] = np.nan + ramp_results['var_poisson'][dnu_check] = np.nan + ramp_results['var_rdnoise'][dnu_check] = np.nan + ramp_results['err'][dnu_check] = np.nan + + return ramp_results + + def weighted_average(self, sci, var_poisson, var_rdnoise, err): + ''' Compute a weighted average of a 3D multi-integration dataset + + Parameters + ---------- + sci : ndarray + Array of science data + var_poisson : ndarray + Array of poission variance + var_rdnoise : ndarray + Array of readnoise variance + err : ndarray + Array of uncertainties + + Returns + ------- + w_av_sci : ndarray + Weighted average array of science data + w_av_vpo : ndarray + Weighted average array of poisson variance + w_av_vrd : ndarray + Weighted average array of readnoise variance + w_av_err : ndarray + Weighted average array of uncertainties + + ''' + # Remove NaNs + sci = np.nan_to_num(sci) + var_poisson = np.nan_to_num(var_poisson) + var_rdnoise = np.nan_to_num(var_rdnoise) + err = np.nan_to_num(err) + + # Get normalised weights using variances and normalize + variance = var_poisson + var_rdnoise + weights = 1 / variance + weights /= np.sum(weights, axis=0) + + # Produce averaged results + w_av_sci = np.average(sci, weights=weights, axis=0) + w_av_err = np.sqrt(np.average(err**2, weights=weights, axis=0)) + w_av_vpo = np.average(var_poisson, weights=weights, axis=0) + w_av_vrd = np.average(var_rdnoise, weights=weights, axis=0) + + return w_av_sci, w_av_vpo, w_av_vrd, w_av_err + + +def jumpramp_column_helper(args): + """ Function to unpack parameters in multiprocessing + + Parameters + ---------- + args : list + List of arguments to be passed to the jumpramp calculation + + Returns + ------- + result.countrate : ndarray + Array of countrates from ramp fitting. + result.uncert : ndarray + Array of uncertainties from ramp fitting. + result.var_poisson : ndarray + Array of poisson variance from ramp fitting. + result.var_rdnoise : ndarray + Array of readnoise from ramp fitting. + int_diffs2use : ndarray + The integration level differenced frames to be used during ramp fitting. + + """ + + i, int_diffs, C, sig, int_diffs2use = args + result, int_diffs2use = jumpramp_column(i, int_diffs, C, sig, int_diffs2use) + return result.countrate, result.uncert, result.var_poisson, result.var_rdnoise, int_diffs2use + +def jumpramp_column(i, int_diffs, C, sig, int_diffs2use): + """ Function to run of jump/ramp on specific column of pixels, assumes an input 4D array of + data at the group level. + + Parameters + ---------- + i : int + Index of the integration to run ramp fitting over. + int_diffs : ndarray + Array of differenced frames + C : Covar() object + Covariance matrix object + sig : ndarray + Uncertainty on the data + int_diffs2use : ndarray + The integration level differenced frames to be used during ramp fitting. + + Returns + ------- + result : Ramp_Result() object + Output object from ramp fitting algorithm + + int_diffs2use : ndarray + Updated integration level differenced frames to be used during ramp fitting. + """ + + # Run jump detection + int_diffs2use, countrates = mask_jumps(int_diffs[:, :, i], + C, + sig[:, i], + diffs2use=int_diffs2use[:, :, i]) + + # Run ramp fitting + result = fit_ramps(int_diffs[:, :, i], + C, + sig[:, i], + diffs2use=int_diffs2use, + countrateguess=countrates*(countrates > 0)) + + return result, int_diffs2use + + +class Covar: + + """ + + class Covar holding read and photon noise components of alpha and + beta and the time intervals between the resultant midpoints + + """ + + def __init__(self, read_times, pedestal=False): + + """ + + Compute alpha and beta, the diagonal and off-diagonal elements of + the covariance matrix of the resultant differences, and the time + intervals between the resultant midpoints. + + Arguments: + 1. readtimes [list of values or lists for the times of reads. If + a list of lists, times for reads that are averaged + together to produce a resultant.] + + Optional arguments: + 2. pedestal [boolean: does the covariance matrix include the terms + for the first resultant? This is needed if fitting + for the pedestal (i.e. the reset value). Default + False. ] + + """ + + mean_t = [] # mean time of the resultant as defined in the paper + tau = [] # variance-weighted mean time of the resultant + N = [] # Number of reads per resultant + + for times in read_times: + mean_t += [np.mean(times)] + + if hasattr(times, "__len__"): + N += [len(times)] + k = np.arange(1, N[-1] + 1) + tau += [1/N[-1]**2*np.sum((2*N[-1] + 1 - 2*k)*np.array(times))] + else: + tau += [times] + N += [1] + + mean_t = np.array(mean_t) + tau = np.array(tau) + N = np.array(N) + + delta_t = mean_t[1:] - mean_t[:-1] + + self.pedestal = pedestal + self.delta_t = delta_t + self.mean_t = mean_t + self.tau = tau + self.Nreads = N + + self.alpha_readnoise = (1/N[:-1] + 1/N[1:])/delta_t**2 + self.beta_readnoise = -1/N[1:-1]/(delta_t[1:]*delta_t[:-1]) + + self.alpha_phnoise = (tau[:-1] + tau[1:] - 2*mean_t[:-1])/delta_t**2 + self.beta_phnoise = (mean_t[1:-1] - tau[1:-1])/(delta_t[1:]*delta_t[:-1]) + + # If we want the reset value we need to include the first + # resultant. These are the components of the variance and + # covariance for the first resultant. + + if pedestal: + self.alpha_readnoise = np.array([1/N[0]/mean_t[0]**2] + + list(self.alpha_readnoise)) + self.beta_readnoise = np.array([-1/N[0]/mean_t[0]/delta_t[0]] + + list(self.beta_readnoise)) + self.alpha_phnoise = np.array([tau[0]/mean_t[0]**2] + + list(self.alpha_phnoise)) + self.beta_phnoise = np.array([(mean_t[0] - tau[0])/mean_t[0]/delta_t[0]] + + list(self.beta_phnoise)) + + def calc_bias(self, countrates, sig, cvec, da=1e-7): + + """ + Calculate the bias in the best-fit count rate from estimating the + covariance matrix. This calculation is derived in the paper. + + Arguments: + 1. countrates [array of count rates at which the bias is desired] + 2. sig [float, single read noise] + 3. cvec [weight vector on resultant differences for initial + estimation of count rate for the covariance matrix. + Will be renormalized inside this function.] + Optional argument: + 4. da [float, fraction of the count rate plus sig**2 to use for finite + difference estimate of the derivative. Default 1e-7.] + + Returns: + 1. bias [array, bias of the best-fit count rate from using cvec + plus the observed resultants to estimate the covariance + matrix] + + """ + + if self.pedestal: + raise ValueError("Cannot compute bias with a Covar class that includes a pedestal fit.") + + alpha = countrates[np.newaxis, :]*self.alpha_phnoise[:, np.newaxis] + alpha += sig**2*self.alpha_readnoise[:, np.newaxis] + beta = countrates[np.newaxis, :]*self.beta_phnoise[:, np.newaxis] + beta += sig**2*self.beta_readnoise[:, np.newaxis] + + # we only want the weights; it doesn't matter what the count rates are. + n = alpha.shape[0] + z = np.zeros((len(cvec), len(countrates))) + result_low_a = fit_ramps(z, self, sig, countrateguess=countrates) + + # try to avoid problems with roundoff error + da_incr = da*(countrates[np.newaxis, :] + sig**2) + + dalpha = da_incr*self.alpha_phnoise[:, np.newaxis] + dbeta = da_incr*self.beta_phnoise[:, np.newaxis] + result_high_a = fit_ramps(z, self, sig, countrateguess=countrates+da_incr) + # finite difference approximation to dw/da + + dw_da = (result_high_a.weights - result_low_a.weights)/da_incr + + bias = np.zeros(len(countrates)) + c = cvec/np.sum(cvec) + + for i in range(len(countrates)): + + C = np.zeros((n, n)) + for j in range(n): + C[j, j] = alpha[j, i] + for j in range(n - 1): + C[j + 1, j] = C[j, j + 1] = beta[j, i] + + bias[i] = np.linalg.multi_dot([c[np.newaxis, :], C, dw_da[:, i]]) + + sig_a = np.sqrt(np.linalg.multi_dot([c[np.newaxis, :], C, c[:, np.newaxis]])) + bias[i] *= 0.5*(1 + special.erf(countrates[i]/sig_a/2**0.5)) + + return bias + + +class Ramp_Result: + + def __init__(self): + + self.countrate = None + self.chisq = None + self.uncert = None + self.var_poisson = None + self.var_rdnoise = None + self.weights = None + self.pedestal = None + self.uncert_pedestal = None + self.covar_countrate_pedestal = None + + self.countrate_twoomit = None + self.chisq_twoomit = None + self.uncert_twoomit = None + + self.countrate_oneomit = None + self.jumpval_oneomit = None + self.jumpsig_oneomit = None + self.chisq_oneomit = None + self.uncert_oneomit = None + + def fill_masked_reads(self, diffs2use): + + """ + + Replace countrates, uncertainties, and chi squared values that + are NaN because resultant differences were doubly omitted. + For these cases, revert to the corresponding values in with + fewer omitted resultant differences to get the correct values + without double-coundint omissions. + + Arguments: + 1. diffs2use [a 2D array matching self.countrate_oneomit in + shape with zero for resultant differences that + were masked and one for differences that were + not masked] + + This function replaces the relevant entries of + self.countrate_twoomit, self.chisq_twoomit, + self.uncert_twoomit, self.countrate_oneomit, and + self.chisq_oneomit in place. It does not return a value. + + """ + + # replace entries that would be nan (from trying to + # doubly exclude read differences) with the global fits. + + omit = diffs2use == 0 + ones = np.ones(diffs2use.shape) + + self.countrate_oneomit[omit] = (self.countrate*ones)[omit] + self.chisq_oneomit[omit] = (self.chisq*ones)[omit] + self.uncert_oneomit[omit] = (self.uncert*ones)[omit] + + omit = diffs2use[1:] == 0 + + self.countrate_twoomit[omit] = (self.countrate_oneomit[:-1])[omit] + self.chisq_twoomit[omit] = (self.chisq_oneomit[:-1])[omit] + self.uncert_twoomit[omit] = (self.uncert_oneomit[:-1])[omit] + + omit = diffs2use[:-1] == 0 + + self.countrate_twoomit[omit] = (self.countrate_oneomit[1:])[omit] + self.chisq_twoomit[omit] = (self.chisq_oneomit[1:])[omit] + self.uncert_twoomit[omit] = (self.uncert_oneomit[1:])[omit] + + +def fit_ramps(diffs, Cov, sig, countrateguess=None, diffs2use=None, + detect_jumps=False, resetval=0, resetsig=np.inf, rescale=True, + dn_scale=10): + + """ + Function fit_ramps. Fits ramps to read differences using the + covariance matrix for the read differences as given by the diagonal + elements and the off-diagonal elements. + + Arguments: + 1. diffs [resultant differences, shape (ndiffs, npix)] + 2. Cov [class Covar, holds the covariance matrix information] + 3. sig [read noise, 1D array, shape (npix)] + + Optional Arguments: + 4. countrateguess [array of shape (npix): count rates to be used + to estimate the covariance matrix. Default None, + in which case the average difference will be used, + replacing negative means with zeros.] + 5. diffs2use [shape (ndiffs, npix), boolean mask of whether to use + each resultant difference for each pixel. Default + None] + 6. detect_jumps [run the jump detection machinery leaving out + single and consecutive resultant differences. + Default False] + 7. resetval [float or array of shape (npix): priors on the reset + values. Default 0. Irrelevant unless + Cov.pedestal is True.] + 8. resetsig [float or array of shape (npix): uncertainties on the + reset values. Default np.inf, i.e., reset values + have flat priors. Irrelevant unless Cov.pedestal + is True.] + 9. rescale [boolean, scale the covariance matrix internally to avoid + possible overflow/underflow problems for long ramps. + Slightly increases computational cost. Default + True. ] + + Returns: + Class Ramp_Result holding lots of information + + """ + + if diffs2use is None: + diffs2use = np.ones(diffs.shape, np.uint8) + + if countrateguess is None: + # initial guess for count rate is the average of the unmasked + # resultant differences unless otherwise specified. + if Cov.pedestal: + countrateguess = np.sum((diffs*diffs2use)[1:], axis=0)/np.sum(diffs2use[1:], axis=0) + else: + countrateguess = np.sum((diffs*diffs2use), axis=0)/np.sum(diffs2use, axis=0) + countrateguess *= countrateguess > 0 + + # Elements of the covariance matrix + + alpha_phnoise = countrateguess*Cov.alpha_phnoise[:, np.newaxis] + alpha_readnoise = sig**2*Cov.alpha_readnoise[:, np.newaxis] + alpha = alpha_phnoise + alpha_readnoise + beta_phnoise = countrateguess*Cov.beta_phnoise[:, np.newaxis] + beta_readnoise = sig**2*Cov.beta_readnoise[:, np.newaxis] + beta = beta_phnoise + beta_readnoise + + # Mask resultant differences that should be ignored. This is half + # of what we need to do to mask these resultant differences; the + # rest comes later. + + d = diffs*diffs2use + beta = beta*diffs2use[1:]*diffs2use[:-1] + ndiffs, npix = alpha.shape + + # Rescale the covariance matrix to a determinant of 1 to + # avoid possible overflow/underflow. The uncertainty and chi + # squared value will need to be scaled back later. Note that + # theta[-1] is the determinant of the covariance matrix. + # + # The method below uses the fact that if all alpha and beta + # are multiplied by f, theta[i] is multiplied by f**i. Keep + # a running track of these factors to construct the scale at + # the end, and keep scaling throughout so that we never risk + # overflow or underflow. + + if rescale: + theta = np.ones((ndiffs + 1, npix)) + theta[1] = alpha[0] + + scale = theta[0]*1 + for i in range(2, ndiffs + 1): + + theta[i] = alpha[i - 1]/scale*theta[i - 1] - beta[i - 2]**2/scale**2*theta[i - 2] + + # Scaling every ten steps is safe for alpha up to 1e20 + # or so and incurs a negligible computational cost for + # the fractional power. + + if i % int(dn_scale) == 0 or i == ndiffs: + f = theta[i]**(1/i) + scale *= f + theta[i - 1] /= theta[i]/f + theta[i - 2] /= theta[i]/f**2 + theta[i] = 1 + else: + scale = 1 + + alpha /= scale + beta /= scale + + # All definitions and formulas here are in the paper. + + theta = np.ones((ndiffs + 1, npix)) + theta[1] = alpha[0] + for i in range(2, ndiffs + 1): + theta[i] = alpha[i - 1]*theta[i - 1] - beta[i - 2]**2*theta[i - 2] + + #print(np.mean(theta[-1]), np.std(theta[-1])) + + phi = np.ones((ndiffs + 1, npix)) + phi[ndiffs - 1] = alpha[ndiffs - 1] + for i in range(ndiffs - 2, -1, -1): + phi[i] = alpha[i]*phi[i + 1] - beta[i]**2*phi[i + 2] + + sgn = np.ones((ndiffs, npix)) + sgn[::2] = -1 + + Phi = np.zeros((ndiffs, npix)) + for i in range(ndiffs - 2, -1, -1): + Phi[i] = Phi[i + 1]*beta[i] + sgn[i + 1]*beta[i]*phi[i + 2] + + # This one is defined later in the paper and is used for jump + # detection and pedestal fitting. + + PhiD = np.zeros((ndiffs, npix)) + for i in range(ndiffs - 2, -1, -1): + PhiD[i] = (PhiD[i + 1] + sgn[i + 1]*d[i + 1]*phi[i + 2])*beta[i] + + Theta = np.zeros((ndiffs, npix)) + Theta[0] = -theta[0] + for i in range(1, ndiffs): + Theta[i] = Theta[i - 1]*beta[i - 1] + sgn[i]*theta[i] + + ThetaD = np.zeros((ndiffs + 1, npix)) + ThetaD[1] = -d[0]*theta[0] + for i in range(1, ndiffs): + ThetaD[i + 1] = beta[i - 1]*ThetaD[i] + sgn[i]*d[i]*theta[i] + + beta_extended = np.ones((ndiffs, npix)) + beta_extended[1:] = beta + + # C' and B' in the paper + + dC = sgn/theta[ndiffs]*(phi[1:]*Theta + theta[:-1]*Phi) + dC *= diffs2use + + dB = sgn/theta[ndiffs]*(phi[1:]*ThetaD[1:] + theta[:-1]*PhiD) + + # {\cal A}, {\cal B}, {\cal C} in the paper + + A = 2*np.sum(d*sgn/theta[-1]*beta_extended*phi[1:]*ThetaD[:-1], axis=0) + A += np.sum(d**2*theta[:-1]*phi[1:]/theta[ndiffs], axis=0) + + B = np.sum(d*dC, axis=0) + C = np.sum(dC, axis=0) + + r = Ramp_Result() + + # Finally, save the best-fit count rate, chi squared, uncertainty + # in the count rate, and the weights used to combine the + # resultants. + + if not Cov.pedestal: + r.countrate = B/C + r.chisq = (A - B**2/C)/scale + r.uncert = np.sqrt(scale/C) + r.weights = dC/C + r.var_poisson = np.sum(r.weights**2*alpha_phnoise, axis=0) + r.var_poisson += 2*np.sum(r.weights[1:]*r.weights[:-1]*beta_phnoise, axis=0) + r.var_rdnoise = np.sum(r.weights**2*alpha_readnoise, axis=0) + r.var_rdnoise += 2*np.sum(r.weights[1:]*r.weights[:-1]*beta_readnoise, axis=0) + + # If we are computing the pedestal, then we use the other formulas + # in the paper. + + else: + dt = Cov.mean_t[0] + Cinv_11 = theta[0]*phi[1]/theta[ndiffs] + + # Calculate the pedestal and slope using the equations in the paper. + # Do not compute weights for this case. + + b = dB[0]*C*dt - B*dC[0]*dt + dt**2*C*resetval/resetsig**2 + b /= C*Cinv_11 - dC[0]**2 + dt**2*C/resetsig**2 + a = B/C - b*dC[0]/C/dt + r.pedestal = b + r.countrate = a + r.chisq = A + a**2*C + b**2/dt**2*Cinv_11 + r.chisq += - 2*b/dt*dB[0] - 2*a*B + 2*a*b/dt*dC[0] + r.chisq /= scale + + # elements of the inverse covariance matrix + M = [C, dC[0]/dt, Cinv_11/dt**2 + 1/resetsig**2] + detM = M[0]*M[-1] - M[1]**2 + r.uncert = np.sqrt(scale*M[-1]/detM) + r.uncert_pedestal = np.sqrt(scale*M[0]/detM) + r.covar_countrate_pedestal = -scale*M[1]/detM + + # The code below computes the best chi squared, best-fit slope, + # and its uncertainty leaving out each resultant difference in + # turn. There are ndiffs possible differences that can be + # omitted. + # + # Then do it omitting two consecutive reads. There are ndiffs-1 + # possible pairs of adjacent reads that can be omitted. + # + # This approach would need to be modified if also fitting the + # pedestal, so that condition currently triggers an error. The + # modifications would make the equations significantly more + # complicated; the matrix equations to be solved by hand would be + # larger. + + if detect_jumps: + + # The algorithms below do not work if we are computing the + # pedestal here. + + if Cov.pedestal: + raise ValueError("Cannot use jump detection algorithm when fitting pedestals.") + + # Diagonal elements of the inverse covariance matrix + + Cinv_diag = theta[:-1]*phi[1:]/theta[ndiffs] + Cinv_diag *= diffs2use + + # Off-diagonal elements of the inverse covariance matrix + # one spot above and below for the case of two adjacent + # differences to be masked + + Cinv_offdiag = -beta*theta[:-2]*phi[2:]/theta[ndiffs] + + # Equations in the paper: best-fit a, b + # + # Catch warnings in case there are masked resultant + # differences, since these will be overwritten later. No need + # to warn about division by zero here. + + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + a = (Cinv_diag*B - dB*dC)/(C*Cinv_diag - dC**2) + b = (dB - a*dC)/Cinv_diag + + r.countrate_oneomit = a + r.jumpval_oneomit = b + + # Use the best-fit a, b to get chi squared + + r.chisq_oneomit = A + a**2*C - 2*a*B + b**2*Cinv_diag - 2*b*dB + 2*a*b*dC + # invert the covariance matrix of a, b to get the uncertainty on a + r.uncert_oneomit = np.sqrt(Cinv_diag/(C*Cinv_diag - dC**2)) + r.jumpsig_oneomit = np.sqrt(C/(C*Cinv_diag - dC**2)) + + r.chisq_oneomit /= scale + r.uncert_oneomit *= np.sqrt(scale) + r.jumpsig_oneomit *= np.sqrt(scale) + + # Now for two omissions in a row. This is more work. Again, + # all equations are in the paper. I first define three + # factors that will be used more than once to save a bit of + # computational effort. + + cpj_fac = dC[:-1]**2 - C*Cinv_diag[:-1] + cjck_fac = dC[:-1]*dC[1:] - C*Cinv_offdiag + bcpj_fac = B*dC[:-1] - dB[:-1]*C + + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + # best-fit a, b, c + c = (bcpj_fac/cpj_fac - (B*dC[1:] - dB[1:]*C)/cjck_fac) + c /= cjck_fac/cpj_fac - (dC[1:]**2 - C*Cinv_diag[1:])/cjck_fac + b = (bcpj_fac - c*cjck_fac)/cpj_fac + a = (B - b*dC[:-1] - c*dC[1:])/C + r.countrate_twoomit = a + + # best-fit chi squared + r.chisq_twoomit = A + a**2*C + b**2*Cinv_diag[:-1] + c**2*Cinv_diag[1:] + r.chisq_twoomit -= 2*a*B + 2*b*dB[:-1] + 2*c*dB[1:] + r.chisq_twoomit += 2*a*b*dC[:-1] + 2*a*c*dC[1:] + 2*b*c*Cinv_offdiag + r.chisq_twoomit /= scale + + # uncertainty on the slope from inverting the (a, b, c) + # covariance matrix + fac = Cinv_diag[1:]*Cinv_diag[:-1] - Cinv_offdiag**2 + term2 = dC[:-1]*(dC[:-1]*Cinv_diag[1:] - Cinv_offdiag*dC[1:]) + term3 = dC[1:]*(dC[:-1]*Cinv_offdiag - Cinv_diag[:-1]*dC[1:]) + r.uncert_twoomit = np.sqrt(fac/(C*fac - term2 + term3)) + r.uncert_twoomit *= np.sqrt(scale) + + r.fill_masked_reads(diffs2use) + + return r + + +def mask_jumps(diffs, Cov, sig, threshold_oneomit=20.25, + threshold_twoomit=23.8, diffs2use=None): + + """ + + Function mask_jumps implements a likelihood-based, iterative jump + detection algorithm. + + Arguments: + 1. diffs [resultant differences] + 2. Cov [class Covar, holds the covariance matrix information. Must + be based on differences alone (i.e. without the pedestal)] + 3. sig [read noise, 1D array] + Optional arguments: + 4. threshold_oneomit [float, minimum chisq improvement to exclude + a single resultant difference. Default 20.25, + i.e., 4.5 sigma] + 5. threshold_twoomit [float, minimum chisq improvement to exclude + two sequential resultant differences. + Default 23.8, i.e., 4.5 sigma] + 6. diffs2use [a 2D array of the same shape as d, one for resultant + differences that appear ok and zero for resultant + differences flagged as contaminated. These flagged + differences will be ignored throughout jump detection, + which will only flag additional differences and + overwrite the data in this array. Default None] + + Returns: + 1. diffs2use [a 2D array of the same shape as d, one for resultant + differences that appear ok and zero for resultant + differences flagged as contaminated.] + 2. countrates [a 1D array of the count rates after masking the pixels + and resultants in diffs2use.] + + """ + + if Cov.pedestal: + raise ValueError("Cannot mask jumps with a Covar class that includes a pedestal fit.") + + # Force a copy of the input array for more efficient memory access. + + d = diffs*1 + + # We can use one-omit searches only where the reads immediately + # preceding and following have just one read. If a readout + # pattern has more than one read per resultant but significant + # gaps between resultants then a one-omit search might still be a + # good idea even with multiple-read resultants. + + oneomit_ok = Cov.Nreads[1:]*Cov.Nreads[:-1] >= 1 + oneomit_ok[0] = oneomit_ok[-1] = True + + # Other than that, we need to omit two. If a resultant has more + # than two reads, we need to omit both differences containing it + # (one pair of omissions in the differences). + + twoomit_ok = Cov.Nreads[1:-1] > 1 + + # This is the array to return: one for resultant differences to + # use, zero for resultant differences to ignore. + + if diffs2use is None: + diffs2use = np.ones(d.shape, np.uint8) + + # We need to estimate the covariance matrix. I'll use the median + # here for now to limit problems with the count rate in reads with + # jumps (which is what we are looking for) since we'll be using + # likelihoods and chi squared; getting the covariance matrix + # reasonably close to correct is important. + + countrateguess = np.median(d, axis=0)[np.newaxis, :] + countrateguess *= countrateguess > 0 + + # boolean arrays to be used later + recheck = np.ones(d.shape[1]) == 1 + dropped = np.ones(d.shape[1]) == 0 + + for j in range(d.shape[0]): + + # No need for indexing on the first pass. + if j == 0: + result = fit_ramps(d, Cov, sig, countrateguess=countrateguess, + diffs2use=diffs2use, detect_jumps=True) + # Also save the count rates so that we can use them later + # for debiasing. + countrate = result.countrate*1. + else: + result = fit_ramps(d[:, recheck], Cov, sig[recheck], + countrateguess=countrateguess[:, recheck], + diffs2use=diffs2use[:, recheck], + detect_jumps=True) + + # Chi squared improvements + + dchisq_two = result.chisq - result.chisq_twoomit + dchisq_one = result.chisq - result.chisq_oneomit + + # We want the largest chi squared difference + + best_dchisq_one = np.amax(dchisq_one*oneomit_ok[:, np.newaxis], axis=0) + best_dchisq_two = np.amax(dchisq_two*twoomit_ok[:, np.newaxis], axis=0) + + # Is the best improvement from dropping one resultant + # difference or two? Two drops will always offer more + # improvement than one so penalize them by the respective + # thresholds. Then find the chi squared improvement + # corresponding to dropping either one or two reads, whichever + # is better, if either exceeded the threshold. + + onedropbetter = (best_dchisq_one - threshold_oneomit > best_dchisq_two - threshold_twoomit) + + best_dchisq = best_dchisq_one*(best_dchisq_one > threshold_oneomit)*onedropbetter + best_dchisq += best_dchisq_two*(best_dchisq_two > threshold_twoomit)*(~onedropbetter) + + # If nothing exceeded the threshold set the improvement to + # NaN so that dchisq==best_dchisq is guaranteed to be False. + + best_dchisq[best_dchisq == 0] = np.nan + + # Now make the masks for which resultant difference(s) to + # drop, count the number of ramps affected, and drop them. + # If no ramps were affected break the loop. + + dropone = dchisq_one == best_dchisq + droptwo = dchisq_two == best_dchisq + + drop = np.any([np.sum(dropone, axis=0), + np.sum(droptwo, axis=0)], axis=0) + + if np.sum(drop) == 0: + break + + # Store the updated counts with omitted reads + + new_cts = np.zeros(np.sum(recheck)) + i_d1 = np.sum(dropone, axis=0) > 0 + new_cts[i_d1] = np.sum(result.countrate_oneomit*dropone, axis=0)[i_d1] + i_d2 = np.sum(droptwo, axis=0) > 0 + new_cts[i_d2] = np.sum(result.countrate_twoomit*droptwo, axis=0)[i_d2] + + # zero out count rates with drops and add their new values back in + + countrate[recheck] *= drop == 0 + countrate[recheck] += new_cts + + # Drop the read (set diffs2use=0) if the boolean array is True. + + diffs2use[:, recheck] *= ~dropone + diffs2use[:-1, recheck] *= ~droptwo + diffs2use[1:, recheck] *= ~droptwo + + # No need to repeat this on the entire ramp, only re-search + # ramps that had a resultant difference dropped this time. + + dropped[:] = False + dropped[recheck] = drop + recheck[:] = dropped + + # Do not try to search for bad resultants if we have already + # given up on all but one, two, or three resultant differences + # in the ramp. If there are only two left we have no way of + # choosing which one is "good". If there are three left we + # run into trouble in case we need to discard two. + + recheck[np.sum(diffs2use, axis=0) <= 3] = False + + return diffs2use, countrate + + + +def getramps(countrate, sig, readtimes, nramps=10): + + """ + Function getramps: make some synthetic ramps + Arguments: + 1. countrate [electrons/time] + 2. sig [single read read noise, electrons] + 3. readtimes [list of values or lists for the times of reads. If + a list of lists, times for reads that are averaged + together to produce a resultant.] + Optional Arguments: + 4. nramps [number of ramps desired, default 10] + + Returns: + 1. counts [electrons in each read, shape (nreads, nramps)] + """ + + t_last = 0 + counts = np.zeros((len(readtimes), nramps)) # resultant values to return + counts_total = np.zeros(nramps) # Running total of electrons + + for k in range(len(readtimes)): + t = readtimes[k] + counts_last = counts_total*1. + counts_resultant = counts_last*0 + + if hasattr(t, "__len__"): + for i in range(len(t)): + lam = countrate*(t[i] - t_last) # expected number of electrons + counts_total += np.random.poisson(lam, nramps) + counts[k] += counts_total/len(t) + t_last = t[i] + + # add read noise + counts[k] += np.random.randn(nramps)*sig/np.sqrt(len(t)) + + else: + if t_last is None: + t_last = t + lam = countrate*(t - t_last) # expected number of electrons + counts_total += np.random.poisson(lam, nramps) + counts[k] = counts_total + t_last = t + + # add read noise + counts[k] += np.random.randn(nramps)*sig + + return counts + diff --git a/spaceKLIP/imagetools.py b/spaceKLIP/imagetools.py index b57faf0c..93ff71c3 100644 --- a/spaceKLIP/imagetools.py +++ b/spaceKLIP/imagetools.py @@ -497,22 +497,26 @@ def subtract_median(self, subdir : str, optional Name of the directory where the data products shall be saved. The default is 'medsub'. - method : str + method : str, optional 'robust' for a robust median after masking out bright stars, - 'sigma_clipped' for another version of robust median using astropy sigma_clipped_stats on the whole image, - 'border' for robust median on the outer border region only, to ignore the bright stellar PSF in the center, - or 'simple' for a simple np.nanmedian - sigma : float - number of standard deviations to use for the clipping limit in sigma_clipped_stats, if - the robust option is selected. - borderwidth : int - number of pixels to use when defining the outer border region, if the border option is selected. - Default is to use the outermost 32 pixels around all sides of the image. - + 'sigma_clipped' for another version of robust median using astropy + sigma_clipped_stats on the whole image, + 'border' for robust median on the outer border region only, to + ignore the bright stellar PSF in the center, + or 'simple' for a simple np.nanmedian. + sigma : float, optional + number of standard deviations to use for the clipping limit in + sigma_clipped_stats, if the robust option is selected. + borderwidth : int, optional + number of pixels to use when defining the outer border region, if + the border option is selected. Default is to use the outermost 32 + pixels around all sides of the image. + Returns ------- - None, but writes out new files to subdir and updates database. - """ + None. + + """ # Set output directory. output_dir = os.path.join(self.database.output_dir, subdir) @@ -595,6 +599,140 @@ def subtract_median(self, # Update spaceKLIP database. self.database.update_obs(key, j, fitsfile, maskfile) + + + def subtract_background_godoy(self, + types=['SCI', 'REF'], + subdir='bgsub'): + + """ + Subtract the corresponding background observations from the SCI and REF + data in the spaceKLIP database using a method developed by Nico Godoy. + + Parameters + ---------- + types : list of str + File types to run the subtraction over. + + subdir : str, optional + Name of the directory where the data products shall be saved. The + default is 'bgsub'. + + Returns + ------- + None. + + """ + + # Set output directory. + output_dir = os.path.join(self.database.output_dir, subdir) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Loop through concatenations. + for i, key in enumerate(self.database.obs.keys()): + + # Load in bunch of stuff + # Find science, reference, and background files. + ww_sci = np.where(self.database.obs[key]['TYPE'] == 'SCI')[0] + ww_ref = np.where(self.database.obs[key]['TYPE'] == 'REF')[0] + ww_sci_bg = np.where(self.database.obs[key]['TYPE'] == 'SCI_BG')[0] + ww_ref_bg = np.where(self.database.obs[key]['TYPE'] == 'REF_BG')[0] + + # Loop over science and reference files + for typ in types: + if typ == 'SCI': + ww, ww_bg = ww_sci, ww_sci_bg + elif typ == 'REF': + ww, ww_bg = ww_ref, ww_ref_bg + + # Gather background files. + if len(ww_bg) == 0: + raise UserWarning('Could not find any background files.') + else: + bg_data, bg_erro, bg_pxdq = [], [], [] + for j in ww_bg: + # Read background file. + fitsfile = self.database.obs[key]['FITSFILE'][j] + data, erro, pxdq, head_pri, head_sci, is2d, imshifts, maskoffs = ut.read_obs(fitsfile) + + # Compute median science background. + bg_data += [data] + bg_erro += [erro] + bg_pxdq += [pxdq] + bg_data, bg_erro, bg_pxdq = np.array(bg_data), np.array(bg_erro), np.array(bg_pxdq) + + # If multiple files, take the median. Otherwise, carry on. + if bg_data.ndim == 4: + bg_data = np.nanmedian(bg_data, axis=0) + + # Loop over individual files + for j in ww: + # Read FITS file. + fitsfile = self.database.obs[key]['FITSFILE'][j] + data, erro, pxdq, head_pri, head_sci, is2d, imshifts, maskoffs = ut.read_obs(fitsfile) + + # Subtract the background per frame + data -= bg_data + + # Loop over integrations + data_bg_sub = np.empty_like(data) + for k in range(data.shape[0]): + # Subtract median of corresponding background frame from the frame + bg_submed = bg_data[k,:,:] - np.nanmedian(bg_data[k,:,:]) + # Do the same for the data (that's already background subtracted) + data_submed = data[k,:,:] - np.nanmedian(data[k,:,:]) + + # Specify sections for initial guess + # sect1 = data_submed[108:118,12:62]/bg_submed[108:118,12:62] + # sect2 = data_submed[93:106,152:207]/bg_submed[93:106,152:207] + sect1 = data_submed[112:118,4:10]/bg_submed[112:118,4:10] + sect2 = data_submed[95:101,207:212]/bg_submed[95:101,207:212] + + # Reshape into 1d arrays and concatenate + s1 = sect1.reshape(1,sect1.shape[0]*sect1.shape[1]) + s2 = sect2.reshape(1,sect2.shape[0]*sect2.shape[1]) + s12 = np.concatenate((s1[0,:],s2[0,:])) + + # Take median of concatenated array + cte = np.nanmedian(s12) + + # Use filter to determine mask for estimating BG scaling + # at the moment only have it working for F1140C. + filt = self.database.obs[key]['FILTER'][j] + if filt not in ['F1065C', 'F1140C', 'F1550C']: + raise NotImplementedError('Godoy subtraction is only supported for MIRI FQPMs at this time!') + else: + bgmaskbase = os.path.split(os.path.abspath(__file__))[0] + bgmaskfile = os.path.join(bgmaskbase, 'resources/miri_bg_masks/godoy_mask_{}.fits'.format(filt.lower())) + + # Run minimisation function, 'res' will tell us if there is any residual + # background that wasn't removed in the initial attempt. I.e. do we + # need to subtract a little bit more or less? + res = minimize(ut.bg_minimize, + x0=cte*100, + args=(data_submed, bg_submed, bgmaskfile), + method='L-BFGS-B', + tol=1e-7) + + # Extract scale factor for the background from res + scale = res.x/100 + + # Scale the background, and now subtract this correction from the original + # background subtracted data + data_improved_bgsub = data_submed - bg_submed*scale + + # Subtract median of residual frame to remove any residual median offset + data_bg_sub[k] = data_improved_bgsub - np.nanmedian(data_improved_bgsub) + + # Write FITS file and PSF mask. + fitsfile = ut.write_obs(fitsfile, output_dir, data_bg_sub, erro, pxdq, head_pri, head_sci, is2d, imshifts, maskoffs) + + # Update spaceKLIP database. + self.database.update_obs(key, j, fitsfile) + + pass + def subtract_background(self, nints_per_med=None, @@ -1360,11 +1498,13 @@ def blur_frames(self, Parameters ---------- - fact : 'auto' or float or dict of list of float or None, optional + fact : 'auto' or 'fix23' or float or dict of list of float or None, optional FWHM (pix) of the Gaussian filter. If 'auto', will compute the FWHM automatically based on the Nyquist sampling criterion for discrete data, which is FWHM = lambda / 2.3D, where D = 5.2 m for NIRCam - coronagraphy and D = 6.5 m otherwise. If dict of list of float, + coronagraphy and D = 6.5 m otherwise. If 'fix23', will always blur + the data with a Gaussian kernel of FWHM = 2.3 pix, so that even bad + pixels cause no more Fourier ripples. If dict of list of float, then the dictionary keys must match the keys of the observations database, and the number of entries in the lists must match the number of observations in the corresponding concatenation. Then, a @@ -1422,10 +1562,21 @@ def blur_frames(self, raise UserWarning('Data originates from unknown telescope') if fact_temp is not None: if str(fact_temp) == 'auto': - wave_min = self.database.obs[key]['CWAVEL'][j] - self.database.obs[key]['DWAVEL'][j] - nyquist = wave_min * 1e-6 / diam * 180. / np.pi * 3600. / 2.3 # see, e.g., Pawley 2006 - fact_temp = self.database.obs[key]['PIXSCALE'][j] / nyquist + wave_min = self.database.obs[key]['CWAVEL'][j] - self.database.obs[key]['DWAVEL'][j] # micron + fwhm_current = wave_min * 1e-6 / diam * 180. / np.pi * 3600. / self.database.obs[key]['PIXSCALE'][j] # pix + fwhm_desired = 2.3 # pix; see, e.g., Pawley 2006 + fwhm_desired *= 1.5 # go to 1.5 times the theoretically required bluring to safely avoid numerical ringing effects + fact_temp = np.sqrt(fwhm_desired**2 - fwhm_current**2) fact_temp /= np.sqrt(8. * np.log(2.)) # fix from Marshall + if str(fact_temp) == 'fix23': + fwhm_current = 1. # pix + fwhm_desired = 2.3 # pix; see, e.g., Pawley 2006 + fact_temp = np.sqrt(fwhm_desired**2 - fwhm_current**2) + fact_temp /= np.sqrt(8. * np.log(2.)) # fix from Marshall + if np.isnan(fact_temp): + fact_temp = None + log.info(' --> Frame blurring: skipped') + continue log.info(' --> Frame blurring: factor = %.3f' % fact_temp) for k in range(data.shape[0]): data[k] = gaussian_filter(data[k], fact_temp) @@ -1460,7 +1611,7 @@ def hpf(self, Parameters ---------- - fact : 'auto' or float or dict of list of float or None, optional + size : 'auto' or float or dict of list of float or None, optional FWHM (pix) of the Gaussian filter. If 'auto', will compute the FWHM automatically based on the Nyquist sampling criterion for discrete data, which is FWHM = lambda / 2.3D, where D = 5.2 m for NIRCam @@ -1503,7 +1654,7 @@ def hpf(self, mask = ut.read_msk(maskfile) # Skip file types that are not in the list of types. - fact_temp = None + size_temp = None if self.database.obs[key]['TYPE'][j] in types: # High-pass filter frames. @@ -1512,7 +1663,7 @@ def hpf(self, try: size_temp = size[key] except: - raise NotImplementedError() + size_temp = float(size) if size_temp is not None: log.info(' --> Frame filtering: HPF FWHM = %.2f pix' % size_temp) fourier_sigma_size = (data.shape[1] / size_temp) / (2. * np.sqrt(2. * np.log(2.))) @@ -1525,7 +1676,7 @@ def hpf(self, if size_temp is None: pass else: - head_pri['HPFSIZE'] = fact_temp + head_pri['HPFSIZE'] = size_temp fitsfile = ut.write_obs(fitsfile, output_dir, data, erro, pxdq, head_pri, head_sci, is2d, imshifts, maskoffs) maskfile = ut.write_msk(maskfile, mask, fitsfile) @@ -1858,9 +2009,11 @@ def update_nircam_centers(self): def recenter_frames(self, method='fourier', subpix_first_sci_only=False, + first_sci_only=True, spectral_type='G2V', shft_exp=1, kwargs={}, + highpass=False, subdir='recentered'): """ Recenter frames so that the host star position is data.shape // 2. For @@ -1882,6 +2035,10 @@ def recenter_frames(self, data to avoid another interpolation step if the 'align_frames' routine is run subsequently. Only applicable to non-coronagraphic data. The default is False. + first_sci_only : bool, optional + Recenter all files and not just the first SCI file in each concate- + nation. Only applicable to NIRCam coronagraphy. The default is + True. spectral_type : str, optional Host star spectral type for the WebbPSF model used to determine the star position behind the coronagraphic mask. The default is 'G2V'. @@ -1950,17 +2107,19 @@ def recenter_frames(self, # For the first SCI frame, get the star position # and the shift between the star and coronagraphic # mask position. - if j == ww_sci[0] and k == 0: + + if (not first_sci_only or j == ww_sci[0]) and k == 0: xc, yc, xshift, yshift = self.find_nircam_centers(data0=data.copy(), key=key, j=j, shft_exp=shft_exp, spectral_type=spectral_type, date=head_pri['DATE-BEG'], - output_dir=output_dir) - + output_dir=output_dir, + highpass=highpass) + # Apply the same shift to all SCI and REF frames. - shifts += [np.array([-(xc - data.shape[-1]//2), -(yc - data.shape[-2]//2)])] + shifts += [np.array([-(xc - (data.shape[-1] - 1.) / 2.), -(yc - (data.shape[-2] - 1.) / 2.)])] maskoffs_temp += [np.array([xshift, yshift])] data[k] = ut.imshift(data[k], [shifts[k][0], shifts[k][1]], method=method, kwargs=kwargs) erro[k] = ut.imshift(erro[k], [shifts[k][0], shifts[k][1]], method=method, kwargs=kwargs) @@ -1969,9 +2128,9 @@ def recenter_frames(self, mask = spline_shift(mask, [shifts[k][1], shifts[k][0]], order=0, mode='constant', cval=np.nanmedian(mask)) xoffset = self.database.obs[key]['XOFFSET'][j] - self.database.obs[key]['XOFFSET'][ww_sci[0]] # arcsec yoffset = self.database.obs[key]['YOFFSET'][j] - self.database.obs[key]['YOFFSET'][ww_sci[0]] # arcsec - crpix1 = data.shape[-1]//2 + 1 # 1-indexed - crpix2 = data.shape[-2]//2 + 1 # 1-indexed - + crpix1 = (data.shape[-1] - 1.) / 2. + 1. # 1-indexed + crpix2 = (data.shape[-2] - 1.) / 2. + 1. # 1-indexed + # MIRI coronagraphy. elif self.database.obs[key]['EXP_TYPE'][j] in ['MIR_4QPM', 'MIR_LYOT']: log.warning(' --> Recenter frames: not implemented for MIRI coronagraphy, skipped') @@ -2089,7 +2248,8 @@ def find_nircam_centers(self, output_dir=None, fov_pix=65, oversample=2, - use_coeff=False): + use_coeff=False, + highpass=False): """ Find the star position behind the coronagraphic mask using a WebbPSF model. @@ -2163,6 +2323,13 @@ def find_nircam_centers(self, xoff = (crpix1 + 1) - xsciref yoff = (crpix2 + 1) - ysciref model_psf = psf.gen_psf_idl((0, 0), coord_frame='idl', return_oversample=False, quick=True) + if not isinstance(highpass, bool): + highpass = float(highpass) + fourier_sigma_size = (model_psf.shape[0] / highpass) / (2. * np.sqrt(2. * np.log(2.))) + model_psf = parallelized.high_pass_filter_imgs(np.array([model_psf]), numthreads=None, filtersize=fourier_sigma_size)[0] + else: + if highpass: + raise NotImplementedError() if not np.isnan(self.database.obs[key]['BLURFWHM'][j]): gauss_sigma = self.database.obs[key]['BLURFWHM'][j] / np.sqrt(8. * np.log(2.)) model_psf = gaussian_filter(model_psf, gauss_sigma) @@ -2245,7 +2412,9 @@ def find_nircam_centers(self, ax[2].legend(loc='upper right', fontsize=12) ax[2].set_title('Scene overview (1-indexed)') plt.tight_layout() - output_file = os.path.join(output_dir, key + '_recenter.pdf') + output_file = os.path.split(self.database.obs[key]['FITSFILE'][j])[1] + output_file = output_file.replace('.fits', '.pdf') + output_file = os.path.join(output_dir, output_file) plt.savefig(output_file) log.info(f" Plot saved in {output_file}") # plt.show() @@ -2261,6 +2430,8 @@ def align_frames(self, mask_override=None, msk_shp=8, shft_exp=1, + align_to_file=None, + scale_prior=False, kwargs={}, subdir='aligned'): """ @@ -2279,6 +2450,14 @@ def align_frames(self, Shape (height or radius, or [inner radius, outer radius]) for custom mask invoked by "mask_override" shft_exp : float, optional Take image to the given power before cross correlating for shifts, default is 1. For instance, 1/2 helps align nircam bar/narrow data (or other data with weird speckles) + align_to_file : str, optional + Path to FITS file to which all images shall be aligned. Needs to be + a file with the same observational setup as all concatenations in + the spaceKLIP database. Hence, this can only be applied to one + observational setup at a time. The default is None. + scale_prior : bool, optional + If True, tries to find a better prior for the scale factor instead + of simply using 1. The default is False. kwargs : dict, optional Keyword arguments for the scipy.ndimage.shift routine. The default is {}. @@ -2347,6 +2526,13 @@ def create_rec_mask(h, w, center=None, z=None): ww_all = np.append(ww_sci, ww_ref) # Loop through FITS files. + if align_to_file is not None: + try: + ref_image = pyfits.getdata(align_to_file, 'SCI') + except: + ref_image = pyfits.getdata(align_to_file, 0) + if ref_image.ndim == 3: + ref_image = np.nanmedian(ref_image, axis=0) shifts_all = [] for j in ww_all: @@ -2370,8 +2556,8 @@ def create_rec_mask(h, w, center=None, z=None): elif mask is None: mask_temp = np.ones_like(data[0]) else: - mask_temp = mask - + mask_temp = mask.copy() + # Align frames. head, tail = os.path.split(fitsfile) log.info(' --> Align frames: ' + tail) @@ -2382,7 +2568,8 @@ def create_rec_mask(h, w, center=None, z=None): # Take the first science frame as reference frame. if j == ww_sci[0] and k == 0: - ref_image = data[k].copy() + if align_to_file is None: + ref_image = data[k].copy() pp = np.array([0., 0., 1.]) xoffset = self.database.obs[key]['XOFFSET'][j] #arcsec yoffset = self.database.obs[key]['YOFFSET'][j] #arcsec @@ -2392,7 +2579,7 @@ def create_rec_mask(h, w, center=None, z=None): # Align all other SCI and REF frames to the first science # frame. - else: + if align_to_file is not None or j != ww_sci[0] or k != 0: # Calculate shifts relative to first frame, work in pixels xfirst = crpix1 + (xoffset/pxsc) xoff_curr_pix = self.database.obs[key]['XOFFSET'][j]/self.database.obs[key]['PIXSCALE'][j] @@ -2404,12 +2591,27 @@ def create_rec_mask(h, w, center=None, z=None): ycurrent = self.database.obs[key]['CRPIX2'][j] + yoff_curr_pix yshift = yfirst - ycurrent - p0 = np.array([xshift, yshift, 1.]) - # p0 = np.array([((crpix1 + xoffset) - (self.database.obs[key]['CRPIX1'][j] + self.database.obs[key]['XOFFSET'][j])) / self.database.obs[key]['PIXSCALE'][j], ((crpix2 + yoffset) - (self.database.obs[key]['CRPIX2'][j] + self.database.obs[key]['YOFFSET'][j])) / self.database.obs[key]['PIXSCALE'][j], 1.]) + if scale_prior: + ww = mask < 0.5 + sh = mask.shape + bw = 100 + ww[:bw, :] = 0. + ww[:, :bw] = 0. + ww[sh[0] - bw:, :] = 0. + ww[:, sh[1] - bw:] = 0. + # plt.imshow(ww, origin='lower') + # plt.show() + scale = np.nanmedian(np.true_divide(ref_image, data[k])[ww]) + if shft_exp != 1: + scale = np.power(np.abs(scale), shft_exp) + p0 = np.array([xshift, yshift, scale]) + else: + p0 = np.array([xshift, yshift, 1.]) + # Fix for weird numerical behaviour if shifts are small # but not exactly zero. if (np.abs(xshift) < 1e-3) and (np.abs(yshift) < 1e-3): - p0 = np.array([0., 0., 1.]) + p0 = np.array([0., 0., p0[-1]]) if align_algo == 'leastsq': if shft_exp != 1: args = (np.power(np.abs(data[k]), shft_exp), np.power(np.abs(ref_image), shft_exp), mask_temp, method, kwargs) @@ -2426,14 +2628,13 @@ def create_rec_mask(h, w, center=None, z=None): # Append shifts to array and apply shift to image # using defined method. shifts += [np.array([pp[0], pp[1], pp[2]])] - if j != ww_sci[0] or k != 0: + if align_to_file is not None or j != ww_sci[0] or k != 0: data[k] = ut.imshift(data[k], [shifts[k][0], shifts[k][1]], method=method, kwargs=kwargs) erro[k] = ut.imshift(erro[k], [shifts[k][0], shifts[k][1]], method=method, kwargs=kwargs) shifts = np.array(shifts) if mask is not None: - if j != ww_sci[0]: + if align_to_file is not None or j != ww_sci[0]: temp = np.median(shifts, axis=0) - # mask = ut.imshift(mask, [temp[0], temp[1]], method=method, kwargs=kwargs) mask = spline_shift(mask, [temp[1], temp[0]], order=0, mode='constant', cval=np.nanmedian(mask)) shifts_all += [shifts] if imshifts is not None: @@ -2478,7 +2679,7 @@ def create_rec_mask(h, w, center=None, z=None): fig = plt.figure(figsize=(6.4, 4.8)) ax = plt.gca() for index, j in enumerate(ww_sci): - ax.scatter(shifts_all[index][:, 0] * self.database.obs[key]['PIXSCALE'][j] * 1000, + ax.scatter(shifts_all[index][:, 0] * self.database.obs[key]['PIXSCALE'][j] * 1000, shifts_all[index][:, 1] * self.database.obs[key]['PIXSCALE'][j] * 1000, s=5, color=colors[index%len(colors)], marker='o', label='PA = %.0f deg' % self.database.obs[key]['ROLL_REF'][j]) @@ -2557,3 +2758,4 @@ def create_rec_mask(h, w, center=None, z=None): log.info(f" Plot saved in {output_file}") plt.close(fig) + diff --git a/spaceKLIP/pyklippipeline.py b/spaceKLIP/pyklippipeline.py index 807b9220..82eb13d6 100644 --- a/spaceKLIP/pyklippipeline.py +++ b/spaceKLIP/pyklippipeline.py @@ -22,6 +22,7 @@ from pyklip.instruments.JWST import JWSTData from pyklip.klip import _rotate_wcs_hdr from spaceKLIP.psf import get_transmission +from spaceKLIP.utils import pop_pxar_kw import logging log = logging.getLogger(__name__) @@ -106,6 +107,8 @@ def run_obs(database, kwargs_temp['save_rolls'] = False else: kwargs_temp['save_ints'] = kwargs_temp['save_rolls'] + if 'highpass' not in kwargs_temp.keys(): + kwargs_temp['highpass'] = False # Set output directory. output_dir = os.path.join(database.output_dir, subdir) @@ -127,20 +130,26 @@ def run_obs(database, if 'maxnumbasis' not in kwargs_temp.keys() or kwargs_temp['maxnumbasis'] is None: kwargs_temp['maxnumbasis'] = maxnumbasis - # Initialize pyKLIP dataset. - dataset = JWSTData(filepaths, psflib_filepaths) - kwargs_temp['dataset'] = dataset - kwargs_temp['aligned_center'] = dataset._centers[0] - kwargs_temp['psf_library'] = dataset.psflib - # Run KLIP subtraction. for mode in kwargs['mode']: + + # Initialize pyKLIP dataset. + pop_pxar_kw(np.append(filepaths, psflib_filepaths)) + dataset = JWSTData(filepaths, psflib_filepaths, highpass=kwargs_temp['highpass']) + kwargs_temp['dataset'] = dataset + kwargs_temp['aligned_center'] = dataset._centers[0] + kwargs_temp['psf_library'] = dataset.psflib + kwargs_temp['mode'] = mode + + # Can run pyKLIP multiple times on the same dataset with different + # annuli and subsections. for annu in kwargs['annuli']: for subs in kwargs['subsections']: log.info(' --> pyKLIP: mode = ' + mode + ', annuli = ' + str(annu) + ', subsections = ' + str(subs)) fileprefix = mode + '_NANNU' + str(annu) + '_NSUBS' + str(subs) + '_' + key + + # Add/update ramaining keywords. kwargs_temp['fileprefix'] = fileprefix - kwargs_temp['mode'] = mode kwargs_temp['annuli'] = annu kwargs_temp['subsections'] = subs kwargs_temp_temp = kwargs_temp.copy() @@ -175,6 +184,10 @@ def run_obs(database, hdul[0].header['APERNAME'] = database.obs[key]['APERNAME'][ww_sci[0]] hdul[0].header['PPS_APER'] = database.obs[key]['PPS_APER'][ww_sci[0]] hdul[0].header['PIXSCALE'] = database.obs[key]['PIXSCALE'][ww_sci[0]] + try: + hdul[0].header['PIXAR_SR'] = database.obs[key]['PIXAR_SR'][ww_sci[0]] + except: + pass hdul[0].header['MODE'] = mode hdul[0].header['ANNULI'] = annu hdul[0].header['SUBSECTS'] = subs diff --git a/spaceKLIP/resources/crpix_jarron.json b/spaceKLIP/resources/crpix_jarron.json index aaad79f1..df452d30 100644 --- a/spaceKLIP/resources/crpix_jarron.json +++ b/spaceKLIP/resources/crpix_jarron.json @@ -11,7 +11,7 @@ "NRCB5_MASK335R": [160.1, 161.4], "NRCA2_MASK430R": [295.91, 160.88], "NRCA5_MASK430R": [151.3, 175.2], - "NRCA5_MASKLWB": [151.3, 175.2], + "NRCA5_MASKLWB": [151.3, 175.2], "NRCA2_FULL_MASK430R": [2023.91, 1519.88], "NRCA5_FULL_MASK430R": [964.3, 1676.2], "NRCB5_MASK430R": [160.5, 161.0] diff --git a/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1065c.fits b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1065c.fits new file mode 100644 index 00000000..f95c11a2 Binary files /dev/null and b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1065c.fits differ diff --git a/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1140c.fits b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1140c.fits new file mode 100644 index 00000000..dd1aa9ca Binary files /dev/null and b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1140c.fits differ diff --git a/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1550c.fits b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1550c.fits new file mode 100644 index 00000000..baa62fe3 Binary files /dev/null and b/spaceKLIP/resources/miri_bg_masks/godoy_mask_f1550c.fits differ diff --git a/spaceKLIP/resources/svo_filter_table.dat b/spaceKLIP/resources/svo_filter_table.dat new file mode 100644 index 00000000..7f63f807 --- /dev/null +++ b/spaceKLIP/resources/svo_filter_table.dat @@ -0,0 +1,30 @@ +FilterProfileService filterID WavelengthUnit WavelengthUCD PhotSystem DetectorType Band Instrument Facility ProfileReference CalibrationReference Description Comments WavelengthRef WavelengthMean WavelengthEff WavelengthMin WavelengthMax WidthEff WavelengthCen WavelengthPivot WavelengthPeak WavelengthPhot FWHM Fsun PhotCalID MagSys ZeroPoint ZeroPointUnit Mag0 ZeroPointType AsinhSoft TrasmissionCurve +ivo://svo/fps JWST/NIRCam.F070W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F070W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 7039.1194650654 7088.3009369996 6988.4272768359 6048.1970523246 7927.0738659178 1212.8399166581 7099.1873443748 7039.1194650654 7691.5 7022.060805287 1430.8105961315 140.01772043307 JWST/NIRCam.F070W/Vega Vega 2768.4045696982 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F070W +ivo://svo/fps JWST/NIRCam.F090W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F090W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 9021.5288283376 9083.3952700859 8984.9812166695 7881.875216583 10243.081731613 1772.7418859059 9007.2449821231 9021.5288283376 9905.5 9026.3413455784 2088.0551132236 89.25825936013 JWST/NIRCam.F090W/Vega Vega 2244.9507457345 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F090W +ivo://svo/fps JWST/NIRCam.F115W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F115W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 11542.608442048 11623.884998948 11433.622503939 9975.6025121473 13058.39727876 2055.1323897044 11494.311055172 11542.608442048 12652.0 11488.863330367 2644.2460160212 53.032797973932 JWST/NIRCam.F115W/Vega Vega 1746.1178757573 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F115W +ivo://svo/fps JWST/NIRCam.F140M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F140M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 14053.232536752 14074.468174513 14023.618737895 13042.245240637 15058.576852755 1367.4001675902 14055.471891322 14053.232536752 14643.5 14037.805056945 1471.7453241243 34.081684124249 JWST/NIRCam.F140M/Vega Vega 1288.6489399126 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F140M +ivo://svo/fps JWST/NIRCam.F150W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F150W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 15007.438586148 15104.226607667 14872.561806926 13041.194700122 16948.894918849 2890.4253488344 15010.691851699 15007.438586148 16547.9 14937.56637456 3349.3001304998 28.954011369422 JWST/NIRCam.F150W/Vega Vega 1172.057562686 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F150W +ivo://svo/fps JWST/NIRCam.F162M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F162M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 16272.470828066 16296.590457303 16243.330063859 15126.159604123 17439.174281297 1626.2931600626 16279.44469614 16272.470828066 16954.0 16259.497388533 1710.0509900877 23.379801780551 JWST/NIRCam.F162M/Vega Vega 1023.0385411698 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F162M +ivo://svo/fps JWST/NIRCam.F164N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F164N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 16445.362304472 16445.953951204 16446.179233607 16171.410441496 16717.719150987 200.10135928626 16443.915873312 16445.362304472 16454.0 16446.588351547 178.80546638652 22.757987154832 JWST/NIRCam.F164N/Vega Vega 985.57767369197 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F164N +ivo://svo/fps JWST/NIRCam.F150W2 Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F150W2 filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 16592.114851269 17865.566963247 14793.713184492 9774.7126497213 23946.869516933 9389.1567466481 18162.989131619 16592.114851269 21320.0 15665.709458518 10382.434508848 21.401406494905 JWST/NIRCam.F150W2/Vega Vega 1149.9431838877 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F150W2 +ivo://svo/fps JWST/NIRCam.F182M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F182M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 18451.671760304 18494.295108627 18388.832224161 16959.531571562 20010.965690062 2250.8124936959 18451.438545284 18451.671760304 19515.0 18417.178838828 2450.0535142807 15.346268463819 JWST/NIRCam.F182M/Vega Vega 844.94329283811 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F182M +ivo://svo/fps JWST/NIRCam.F187N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F187N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 18738.986047596 18739.646084758 18737.223679202 18445.2839062 19029.975310351 236.68645056505 18738.831571338 18738.986047596 18732.0 18737.681771247 211.39649471002 14.240182596606 JWST/NIRCam.F187N/Vega Vega 794.84815933828 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F187N +ivo://svo/fps JWST/NIRCam.F200W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F200W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 19886.483516935 20028.146492527 19680.4102017 17249.082485184 22596.641972191 4190.389872561 19920.758215355 19886.483516935 22044.0 19774.84108433 4689.4034095684 11.914876030378 JWST/NIRCam.F200W/Vega Vega 757.65380461946 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F200W +ivo://svo/fps JWST/NIRCam.F210M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F210M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 20954.513115415 20982.217923787 20908.350068007 19618.544672231 22337.285421049 2055.3790735177 20970.264415414 20954.513115415 21396.0 20926.742039461 2092.9556602331 9.8109724045601 JWST/NIRCam.F210M/Vega Vega 688.03251394796 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F210M +ivo://svo/fps JWST/NIRCam.F212N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F212N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 21213.18248472 21213.973774212 21211.927274831 20900.932977763 21524.994634265 274.27440340689 21216.958037587 21213.18248472 21233.0 21212.45522847 246.02475633297 9.4024271302203 JWST/NIRCam.F212N/Vega Vega 674.83167374035 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F212N +ivo://svo/fps JWST/NIRCam.F250M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F250M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 25032.325766786 25049.391569476 25005.80077427 23935.493008153 26177.908427773 1782.9583911921 25035.347980395 25032.325766786 25496.5 25017.183531601 1824.9614459531 5.0551974588382 JWST/NIRCam.F250M/Vega Vega 504.65113524699 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F250M +ivo://svo/fps JWST/NIRCam.F277W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F277W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 27617.398037931 27844.642741698 27278.576094025 23673.124318244 32203.222514802 6614.6091124143 27762.830081588 27617.398037931 28991.3 27428.886303298 7064.6352975589 3.5910862170117 JWST/NIRCam.F277W/Vega Vega 430.1399020248 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F277W +ivo://svo/fps JWST/NIRCam.F300M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F300M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 29891.21134832 29940.444469418 29818.319259518 27703.546039978 32505.920390405 3255.5781627185 29930.663423494 29891.21134832 29051.5 29850.934233443 3276.8424330832 2.6519147041501 JWST/NIRCam.F300M/Vega Vega 369.64763495866 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F300M +ivo://svo/fps JWST/NIRCam.F323N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F323N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 32368.534888242 32369.286671264 32367.304317143 32046.288196528 32761.073117067 384.88993856761 32359.768130498 32368.534888242 32348.5 32367.804453319 386.35931499885 1.9586665955606 JWST/NIRCam.F323N/Vega Vega 320.94274667646 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F323N +ivo://svo/fps JWST/NIRCam.F322W2 Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F322W2 filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 32319.755057761 33334.921422542 30731.382702291 23851.44946962 41234.693242206 11332.545275066 32537.813005081 32319.755057761 39415.4 31420.399418885 15196.730756689 2.0989606942145 JWST/NIRCam.F322W2/Vega Vega 343.30232528434 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F322W2 +ivo://svo/fps JWST/NIRCam.F335M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F335M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 33620.673080026 33675.235591391 33537.232322948 31203.356330494 36442.229352719 3389.4183845554 33593.663526404 33620.673080026 34942.8 33573.673638078 3576.0001863291 1.6975152259093 JWST/NIRCam.F335M/Vega Vega 298.90992108194 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F335M +ivo://svo/fps JWST/NIRCam.F356W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F356W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 35683.619450209 35934.4889657 35287.040491722 30732.911625808 40801.258504962 7239.2951464142 35652.157449383 35683.619450209 38604.7 35456.170831334 8326.8074712862 1.36754014167 JWST/NIRCam.F356W/Vega Vega 271.3926286675 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F356W +ivo://svo/fps JWST/NIRCam.F360M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F360M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 36241.760882275 36298.097588999 36148.930726857 33260.335540832 39037.393933034 3584.7956145675 36217.330681244 36241.760882275 37456.1 36186.621637174 3855.305234812 1.2716930639146 JWST/NIRCam.F360M/Vega Vega 260.30648962313 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F360M +ivo://svo/fps JWST/NIRCam.F405N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F405N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 40516.525496821 40517.390303609 40515.758914405 40097.867457833 40966.099971704 454.87184788608 40514.332602433 40516.525496821 40523.4 40516.343119312 460.05555259099 0.81999477112789 JWST/NIRCam.F405N/Vega Vega 206.96939908611 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F405N +ivo://svo/fps JWST/NIRCam.F410M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F410M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 40822.380379117 40886.543870603 40723.184168815 37763.560510417 44048.409052071 4262.8579023938 40848.595772652 40822.380379117 42050.3 40766.07870333 4351.6499114491 0.80371529679638 JWST/NIRCam.F410M/Vega Vega 208.75045059896 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F410M +ivo://svo/fps JWST/NIRCam.F430M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F430M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 42812.575646101 42829.393603083 42784.78806412 41227.683101414 44448.787236365 2295.1444214116 42832.58018141 42812.575646101 42482.7 42795.894458171 2315.3069368744 0.6488094267775 JWST/NIRCam.F430M/Vega Vega 190.69659340759 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F430M +ivo://svo/fps JWST/NIRCam.F444W Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F444W filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 44043.150837738 44393.515120525 43504.264673627 38039.572043804 50995.5 10676.002928393 44405.491515008 44043.150837738 43523.2 43732.035994545 11144.052434142 0.57820726511534 JWST/NIRCam.F444W/Vega Vega 184.1022219016 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F444W +ivo://svo/fps JWST/NIRCam.F460M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F460M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 46299.277451031 46315.567661685 46269.863826257 44652.640201372 48146.405666279 2308.9976097615 46317.872804161 46299.277451031 45860.8 46280.765417962 2329.5607509154 0.43536278814704 JWST/NIRCam.F460M/Vega Vega 164.1099180149 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F460M +ivo://svo/fps JWST/NIRCam.F466N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F466N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 46544.303736584 46545.305761355 46540.477222667 46021.350197247 47042.621884854 535.40523943323 46548.339825337 46544.303736584 46563.4 46541.156644092 520.35830830914 0.42613770545148 JWST/NIRCam.F466N/Vega Vega 157.77306515275 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F466N +ivo://svo/fps JWST/NIRCam.F470N Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F470N filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 47077.912373555 47078.819812928 47077.839401996 46553.98481001 47566.821551396 510.27235458608 47081.607869655 47077.912373555 47076.9 47078.439899243 494.58838389155 0.41566639580181 JWST/NIRCam.F470N/Vega Vega 159.49947070015 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F470N +ivo://svo/fps JWST/NIRCam.F480M Angstrom em.wl NIRCam 1 "" NIRCam JWST https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters "" "NIRCam F480M filter" "includes NIRCam optics, DBS, QE and JWST Optical Telescope Element" 48181.945913954 48213.27199619 48139.105123907 45820.0235722 50919.019359765 3141.3708247124 48224.877085394 48181.945913954 47117.4 48159.715297841 3197.4526630828 0.38335792706566 JWST/NIRCam.F480M/Vega Vega 153.22388044927 Jy 0.0 Pogson 0.0 http://svo2.cab.inta-csic.es//theory/fps/fps.php?ID=JWST/NIRCam.F480M diff --git a/spaceKLIP/resources/transmissions/JWST_MIRI_F1065C_transmission_webbpsf-ext_v2.fits b/spaceKLIP/resources/transmissions/JWST_MIRI_F1065C_transmission_webbpsf-ext_v2.fits index b38c2da9..a58fdaa6 100644 Binary files a/spaceKLIP/resources/transmissions/JWST_MIRI_F1065C_transmission_webbpsf-ext_v2.fits and b/spaceKLIP/resources/transmissions/JWST_MIRI_F1065C_transmission_webbpsf-ext_v2.fits differ diff --git a/spaceKLIP/resources/transmissions/JWST_MIRI_F1140C_transmission_webbpsf-ext_v2.fits b/spaceKLIP/resources/transmissions/JWST_MIRI_F1140C_transmission_webbpsf-ext_v2.fits index beeb8f7a..fe402b37 100644 Binary files a/spaceKLIP/resources/transmissions/JWST_MIRI_F1140C_transmission_webbpsf-ext_v2.fits and b/spaceKLIP/resources/transmissions/JWST_MIRI_F1140C_transmission_webbpsf-ext_v2.fits differ diff --git a/spaceKLIP/resources/transmissions/JWST_MIRI_F1550C_transmission_webbpsf-ext_v2.fits b/spaceKLIP/resources/transmissions/JWST_MIRI_F1550C_transmission_webbpsf-ext_v2.fits index 2561fe73..30e6ad0c 100644 Binary files a/spaceKLIP/resources/transmissions/JWST_MIRI_F1550C_transmission_webbpsf-ext_v2.fits and b/spaceKLIP/resources/transmissions/JWST_MIRI_F1550C_transmission_webbpsf-ext_v2.fits differ diff --git a/spaceKLIP/starphot.py b/spaceKLIP/starphot.py index 4360f255..203ab40b 100644 --- a/spaceKLIP/starphot.py +++ b/spaceKLIP/starphot.py @@ -19,6 +19,7 @@ import astropy.units as u +from astropy.table import Table from astroquery.svo_fps import SvoFps from synphot import Observation, SourceSpectrum, SpectralElement from synphot.models import Empirical1D @@ -165,7 +166,16 @@ def get_stellar_magnitudes(starfile, # http://svo2.cab.inta-csic.es/theory/fps/ filts = [] zeros = [] - filter_list = SvoFps.get_filter_list(facility='JWST', instrument=instrume) + path = os.path.abspath(__file__) + path = path[:path.rfind('/')] + path = os.path.join(path, 'resources/svo_filter_table.dat') + try: + filter_list = SvoFps.get_filter_list(facility='JWST', instrument=instrume) + filter_list.write(path, format='ascii', overwrite=True) + except: + log.warning('Using SVO Filter Profile Service timed out. Using cached data instead.') + filter_list = Table.read(path, format='ascii') + for i in range(len(filter_list)): filts += [filter_list['filterID'][i].split('.')[-1]] zeros += [filter_list['ZeroPoint'][i]] diff --git a/spaceKLIP/tests/test_database.py b/spaceKLIP/tests/test_database.py index 9f100ad5..e68e871c 100644 --- a/spaceKLIP/tests/test_database.py +++ b/spaceKLIP/tests/test_database.py @@ -5,7 +5,6 @@ import pytest - testdatapath = os.getenv('SPACEKLIP_TEST_DATA_PATH') testpath = os.path.dirname(os.path.abspath(__file__)) diff --git a/spaceKLIP/utils.py b/spaceKLIP/utils.py index c0698683..a0c3af31 100644 --- a/spaceKLIP/utils.py +++ b/spaceKLIP/utils.py @@ -20,6 +20,7 @@ from scipy.ndimage import fourier_shift, gaussian_filter from scipy.ndimage import shift as spline_shift +import pysiaf from webbpsf_ext.imreg_tools import get_coron_apname as nircam_apname from webbpsf_ext.image_manip import expand_mask @@ -123,8 +124,14 @@ def read_obs(fitsfile, # Read FITS file. hdul = pyfits.open(fitsfile) data = hdul['SCI'].data - erro = hdul['ERR'].data - pxdq = hdul['DQ'].data + try: + erro = hdul['ERR'].data + except: + erro = np.sqrt(data) + try: + pxdq = hdul['DQ'].data + except: + pxdq = np.zeros_like(data).astype('int') head_pri = hdul[0].header head_sci = hdul['SCI'].header is2d = False @@ -384,10 +391,10 @@ def write_fitpsf_images(fitpsf, pri.header[key] = 'NONE' else: pri.header[key] = row[key] + res = pyfits.ImageHDU(residual_image, name='RES') sci = pyfits.ImageHDU(fitpsf.data_stamp, name='SCI') mod = pyfits.ImageHDU(fm_bestfit, name='MOD') - res = pyfits.ImageHDU(residual_image, name='RES') - hdul = pyfits.HDUList([pri, sci, res, mod]) + hdul = pyfits.HDUList([pri, res, sci, mod]) hdul.writeto(fitsfile, output_verify='fix', overwrite=True) pass @@ -819,7 +826,7 @@ def get_filter_info(instrument, timeout=1, do_svo=True, return_more=False): 'MIRI' : webbpsf.MIRI, } inst = inst_func[iname_upper]() - filter_list = inst.filter_list + filter_list = inst.filter_list wave, weff = ({}, {}) if do_svo: @@ -1026,6 +1033,32 @@ def cube_outlier_detection(data, sigma_cut=10, nint_min=10): return indbad +def bg_minimize(par,X,Y,bgmaskfile): + """Simple minimisation function for Godoy background subtraction + + Parameters + ---------- + par : int + Variable to scale background array + X : ndarray + Science / reference image + Y : ndarray + Background image + bgmaskfile : str + File which provides a mask to select which pixels + to compare during minimisation + + Returns + ------- + Sum of the squares of the residuals between images X and Y. + """ + mask = pyfits.getdata(bgmaskfile) + indices = np.where(mask == 1) + X0 = X[indices] + Y0 = Y[indices] + Z0 = X0 - Y0*par/100 + return np.nansum(np.sqrt(Z0**2)) + def interpret_dq_value(dq_value): """Interpret DQ value using DQ definition @@ -1107,3 +1140,38 @@ def get_dqmask(dqarr, bitvalues): dqmask = dqmask | (dqarr & bitval) return dqmask + +def pop_pxar_kw(filepaths): + """ + + Populate the PIXAR_A2 SCI header keyword which is required by pyKLIP in + case it is not already available. + + Parameters + ---------- + filepaths : list or array + File paths of the FITS files whose headers shall be checked. + """ + + for filepath in filepaths: + try: + pxar = pyfits.getheader(filepath, 'SCI')['PIXAR_A2'] + except: + hdul = pyfits.open(filepath) + siaf_nrc = pysiaf.Siaf('NIRCam') + siaf_nis = pysiaf.Siaf('NIRISS') + siaf_mir = pysiaf.Siaf('MIRI') + if hdul[0].header['INSTRUME'] == 'NIRCAM': + ap = siaf_nrc[hdul[0].header['APERNAME']] + elif hdul[0].header['INSTRUME'] == 'NIRISS': + ap = siaf_nis[hdul[0].header['APERNAME']] + elif hdul[0].header['INSTRUME'] == 'MIRI': + ap = siaf_mir[hdul[0].header['APERNAME']] + else: + raise UserWarning('Data originates from unknown JWST instrument') + pix_scale = (ap.XSciScale + ap.YSciScale) / 2. + hdul['SCI'].header['PIXAR_A2'] = pix_scale**2 + hdul.writeto(filepath, output_verify='fix', overwrite=True) + hdul.close() + + pass diff --git a/tox.ini b/tox.ini index 2c049beb..1ec36cc3 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,10 @@ conda deps = astroquery commands_pre= pip install webbpsf_ext - pip install git+https://bitbucket.org/pyKLIP/pyklip.git@jwst + pip install pyklip +setenv = + CRDS_PATH = {homedir}/crds_cache + CRDS_SERVER_URL = https://jwst-crds.stsci.edu commands= test: pytest {posargs} cov: pytest {posargs} --cov-config=pyproject.toml --cov-report=xml --cov=spaceklip spaceklip/tests/