Skip to content

Commit

Permalink
Merge pull request #15 from radicamc/towards-eclipse-fit
Browse files Browse the repository at this point in the history
Towards eclipse fit
  • Loading branch information
radicamc authored Jun 1, 2023
2 parents 13cac09 + cfe6bb4 commit 6dd6a33
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 42 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ Alternatively, Stages 1 to 3 can be run at once via the ```run_DMS.py``` script.
3. Once happy with the input parameters, enter ```python run_DMS.py run_DMS.yaml``` in the terminal.

To use the light curve fitting capabilities, simply follow the same procedure with the fit_lightcurves.py and .yaml files.
Currently only transit light curve fits are supported, with eclipse fitting in development.

## Citations
If you make use of this code in your work, please cite [Radica et al. (2023)](https://ui.adsabs.harvard.edu/abs/2023arXiv230517001R/abstract) and [Feinstein et al. (2023)](https://ui.adsabs.harvard.edu/abs/2023Natur.614..670F/abstract).
Expand Down
5 changes: 5 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
# Changelog
All notable changes to this project will be documented in this file.

### [1.1.1] -- 2023-06-01
#### Added
- Support for eclipse fitting.
- Misc. bug fixes.

### [1.1.0] -- 2023-05-04
#### Added
- Compatibility with jwst v1.8.5.
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from setuptools import setup

setup(name='supreme_spoon',
version='1.1.0',
version='1.1.1',
license='MIT',
author='Michael Radica',
author_email='[email protected]',
Expand Down
75 changes: 49 additions & 26 deletions supreme_spoon/fit_lightcurves.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,8 @@
'theta0_SOSS': r'$\theta_0$', 'theta1_SOSS': r'$\theta_1$',
'theta2_SOSS': r'$\theta_2$',
'GP_sigma_SOSS': r'$GP \sigma$',
'GP_rho_SOSS': r'$GP \rho$', 'rho': r'$\rho$'}
'GP_rho_SOSS': r'$GP \rho$', 'rho': r'$\rho$',
't_secondary_p1': r'$T_{sec}$', 'fp_p1': r'$F_p/F_*$'}

# === Get Detrending Quantities ===
# Get time axis
Expand All @@ -98,6 +99,15 @@
for i, key in enumerate(config['lm_parameters']):
lm_param = lm_data[key]
lm_quantities[:, i] = (lm_param - np.mean(lm_param)) / np.sqrt(np.var(lm_param))
# Eclipses must fit for a baseline, which is done via the linear detrending.
# So add this term to the fits if not already included.
if config['lm_file'] is None and config['occultation_type'] == 'eclipse':
lm_quantities = np.zeros((len(t), 1))
lm_quantities[:, 0] = np.ones_like(t)
config['params'].append('theta0_SOSS')
config['dists'].append('uniform')
config['hyperps'].append([-10, 10])

# Quantity on which to train GP.
if config['gp_file'] is not None:
gp_data = pd.read_csv(config['gp_file'], comment='#')
Expand Down Expand Up @@ -168,13 +178,14 @@
priors[param] = {}
priors[param]['distribution'] = dist
priors[param]['hyperparameters'] = hyperp
# Interpolate LD coefficients from stellar models.
if order == 1 and config['ldcoef_file_o1'] is not None:
q1, q2 = stage4.read_ld_coefs(config['ldcoef_file_o1'], wave_low,
wave_up)
if order == 2 and config['ldcoef_file_o2'] is not None:
q1, q2 = stage4.read_ld_coefs(config['ldcoef_file_o2'], wave_low,
wave_up)
# For transit fits, interpolate LD coefficients from stellar models.
if config['occultation_type'] == 'transit':
if order == 1 and config['ldcoef_file_o1'] is not None:
q1, q2 = stage4.read_ld_coefs(config['ldcoef_file_o1'], wave_low,
wave_up)
if order == 2 and config['ldcoef_file_o2'] is not None:
q1, q2 = stage4.read_ld_coefs(config['ldcoef_file_o2'], wave_low,
wave_up)

# Pack fitting arrays and priors into dictionaries.
data_dict, prior_dict = {}, {}
Expand All @@ -194,16 +205,17 @@

# Prior dictionaries.
prior_dict[thisbin] = copy.deepcopy(priors)
# Update the LD prior for this bin if available.
# Set prior width to 0.2 around the model value - based on findings of
# Patel & Espinoza 2022.
if config['ldcoef_file_o1'] is not None or config['ldcoef_file_o2'] is not None:
if np.isfinite(q1[wavebin]):
prior_dict[thisbin]['q1_SOSS']['distribution'] = 'truncatednormal'
prior_dict[thisbin]['q1_SOSS']['hyperparameters'] = [q1[wavebin], 0.2, 0.0, 1.0]
if np.isfinite(q2[wavebin]):
prior_dict[thisbin]['q2_SOSS']['distribution'] = 'truncatednormal'
prior_dict[thisbin]['q2_SOSS']['hyperparameters'] = [q2[wavebin], 0.2, 0.0, 1.0]
# For transit only; update the LD prior for this bin if available.
if config['occultation_type'] == 'transit':
# Set prior width to 0.2 around the model value - based on
# findings of Patel & Espinoza 2022.
if config['ldcoef_file_o1'] is not None or config['ldcoef_file_o2'] is not None:
if np.isfinite(q1[wavebin]):
prior_dict[thisbin]['q1_SOSS']['distribution'] = 'truncatednormal'
prior_dict[thisbin]['q1_SOSS']['hyperparameters'] = [q1[wavebin], 0.2, 0.0, 1.0]
if np.isfinite(q2[wavebin]):
prior_dict[thisbin]['q2_SOSS']['distribution'] = 'truncatednormal'
prior_dict[thisbin]['q2_SOSS']['hyperparameters'] = [q2[wavebin], 0.2, 0.0, 1.0]

# === Do the Fit ===
# Fit each light curve
Expand Down Expand Up @@ -235,11 +247,17 @@
order_results['dppm_err'].append(np.nan)
# If not skipped, append median and 1-sigma bounds.
else:
pp = fit_results[wavebin].posteriors['posterior_samples']['p_p1']
md, up, lw = juliet.utils.get_quantiles(pp)
order_results['dppm'].append((md**2)*1e6)
err_low = (md**2 - lw**2)*1e6
err_up = (up**2 - md**2)*1e6
post_samples = fit_results[wavebin].posteriors['posterior_samples']
if config['occultation_type'] == 'transit':
md, up, lw = juliet.utils.get_quantiles(post_samples['p_p1'])
order_results['dppm'].append((md**2)*1e6)
err_low = (md**2 - lw**2)*1e6
err_up = (up**2 - md**2)*1e6
else:
md, up, lw = juliet.utils.get_quantiles(post_samples['fp_p1'])
order_results['dppm'].append(md*1e6)
err_low = (md - lw)*1e6
err_up = (up - md)*1e6
order_results['dppm_err'].append(np.mean([err_up, err_low]))

# Make summary plots.
Expand Down Expand Up @@ -343,7 +361,11 @@
infile_header = fits.getheader(config['infile'], 0)
extract_type = infile_header['METHOD']
target = infile_header['TARGET'] + config['planet_letter']
filename = target + '_NIRISS_SOSS_transmission_spectrum' + fit_suffix + '.csv'
if config['occultation_type'] == 'transit':
spec_type = 'transmission'
else:
spec_type = 'emission'
filename = target + '_NIRISS_SOSS_' + spec_type + '_spectrum' + fit_suffix + '.csv'
# Get fit metadata.
# Include fixed parameter values.
fit_metadata = '#\n# Fit Metadata\n'
Expand All @@ -370,7 +392,8 @@
outdir, filename=filename, target=target,
extraction_type=extract_type,
resolution=config['res'],
fit_meta=fit_metadata)
fancyprint('Transmission spectrum saved to {}'.format(outdir+filename))
fit_meta=fit_metadata,
occultation_type=config['occultation_type'])
fancyprint('{0} spectrum saved to {1}'.format(spec_type, outdir+filename))

fancyprint('Done')
1 change: 1 addition & 0 deletions supreme_spoon/fit_lightcurves.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ planet_letter : 'b'

# ===== Fit Priors + Parameters =====
# Fitting priors in juliet format.
# For eclipse fits, must not pass q1_soss and q2_soss, and must pass t_secondary_p1 and fp_p1.
params : ['P_p1', 't0_p1', 'p_p1', 'b_p1',
'q1_SOSS', 'q2_SOSS', 'ecc_p1', 'omega_p1', 'a_p1',
'mdilution_SOSS', 'mflux_SOSS', 'sigma_w_SOSS']
Expand Down
17 changes: 9 additions & 8 deletions supreme_spoon/stage1.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,27 +693,28 @@ def jumpstep_in_time(datafile, window=10, thresh=10, fileroot=None,
for i in tqdm(range(nints)):
# Create a stack of the integrations before and after the current
# integration.
up = np.min([nints, i + window + 1])
up = np.min([nints, i + window])
low = np.max([0, i - window])
stack = np.concatenate([cube[low:i], cube[(i + 1):up]])
# Get median and standard deviation of the stack.
local_med = np.nanmedian(stack, axis=0)
local_std = np.nanstd(stack, axis=0)
for g in range(ngroups):
# Find deviant pixels.
ii = np.where(np.abs(cube[i, g] - local_med[g]) >= thresh * local_std[g])
ii = np.abs(cube[i, g] - local_med[g]) >= thresh * local_std[g]
# If ngroup<=2, replace the pixel with the stack median so that a
# ramp can still be fit.
if g < 2:
if ngroups <= 2:
# Do not want to interpolate pixels which are flagged for
# another reason, so only select good pixels or those which
# are flagged for jumps.
jj = np.where((dqcube[i, g][ii] == 0) | (dqcube[i, g][ii] == 4))
jj = (dqcube[i, g] == 0) | (dqcube[i, g] == 4)
# Replace these pixels with the stack median and remove the
# dq flag.
cube[i, g][ii][jj] = local_med[g][ii][jj]
dqcube[i, g][ii][jj] = 0
interp += len(jj[0])
replace = ii & jj
cube[i, g][replace] = local_med[g][replace]
dqcube[i, g][replace] = 0
interp += np.sum(replace)
# If ngroup>2, flag the pixel as having a jump.
else:
# Want to ignore pixels which are already flagged for a jump.
Expand Down Expand Up @@ -903,7 +904,7 @@ def oneoverfstep(datafiles, baseline_ints, even_odd_rows=True,
# doesn't hurt to do it again.
dc = np.zeros_like(sub)
# For group-level corrections.
if np.ndim(currentfile.data == 4):
if np.ndim(currentfile.data) == 4:
dc[:, ::2] = bn.nanmedian(sub[:, ::2], axis=1)[:, None, :]
dc[:, 1::2] = bn.nanmedian(sub[:, 1::2], axis=1)[:, None, :]
# For integration-level corrections.
Expand Down
18 changes: 12 additions & 6 deletions supreme_spoon/stage4.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,8 +513,8 @@ def run_juliet(priors, t_lc, y_lc, yerr_lc, out_folder,

def save_transmission_spectrum(wave, wave_err, dppm, dppm_err, order, outdir,
filename, target, extraction_type, resolution,
fit_meta=''):
"""Write a transmission spectrum to file.
fit_meta='', occultation_type='transit'):
"""Write a transmission/emission spectrum to file.
Parameters
----------
Expand All @@ -523,9 +523,9 @@ def save_transmission_spectrum(wave, wave_err, dppm, dppm_err, order, outdir,
wave_err : array-like[float]
Bin half-widths for each wavelength bin.
dppm : array-like[float]
Transit depth in each bin.
Transit/eclipse depth in each bin.
dppm_err : array-like[float]
Error on the transit depth in each bin.
Error on the transit/eclipse depth in each bin.
order : array-like[int]
SOSS order corresponding to each bin.
outdir : str
Expand All @@ -540,6 +540,8 @@ def save_transmission_spectrum(wave, wave_err, dppm, dppm_err, order, outdir,
Spectral resolution of spectrum.
fit_meta: str
Fitting metadata.
occultation_type : str
Type of occultation; either 'transit' or 'eclipse'.
"""

# Pack the quantities into a dictionary.
Expand All @@ -565,8 +567,12 @@ def save_transmission_spectrum(wave, wave_err, dppm, dppm_err, order, outdir,
f.write(fit_meta)
f.write('# Column wave: Central wavelength of bin (micron)\n')
f.write('# Column wave_err: Wavelength bin halfwidth (micron)\n')
f.write('# Column dppm: (Rp/R*)^2 (ppm)\n')
f.write('# Column dppm_err: Error in (Rp/R*)^2 (ppm)\n')
if occultation_type == 'transit':
f.write('# Column dppm: (Rp/R*)^2 (ppm)\n')
f.write('# Column dppm_err: Error in (Rp/R*)^2 (ppm)\n')
else:
f.write('# Column dppm: (Fp/F*) (ppm)\n')
f.write('# Column dppm_err: Error in (Fp/F*) (ppm)\n')
f.write('# Column order: SOSS diffraction order\n')
f.write('#\n')
df.to_csv(f, index=False)
Expand Down

0 comments on commit 6dd6a33

Please sign in to comment.