diff --git a/bin/live/pycbc_live_combine_single_fits b/bin/live/pycbc_live_combine_single_significance_fits similarity index 88% rename from bin/live/pycbc_live_combine_single_fits rename to bin/live/pycbc_live_combine_single_significance_fits index c2847a1e774..47e183c9e7d 100644 --- a/bin/live/pycbc_live_combine_single_fits +++ b/bin/live/pycbc_live_combine_single_significance_fits @@ -154,31 +154,43 @@ for ifo in args.ifos: logging.info(ifo) fout_ifo = fout.create_group(ifo) l_times = np.array(live_times[ifo]) - fout_ifo.attrs['live_time'] = l_times.sum() + total_time = l_times.sum() + fout_ifo.attrs['live_time'] = total_time fout_ifo['separate_fits/live_times'] = l_times[ad_order] fout_ifo['separate_fits/start_time'] = trigger_file_starts[ad_order] fout_ifo['separate_fits/end_time'] = trigger_file_ends[ad_order] - for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo], - counts_bin[ifo], bu, bl)): - a, c, u, l = a_c_u_l + for counter, (a, c) in enumerate(zip(alphas_bin[ifo], + counts_bin[ifo])): # Sort alpha and counts by date a = np.array(a)[ad_order] c = np.array(c)[ad_order] - invalphan = c / a - mean_alpha = c.mean() / invalphan.mean() - cons_alpha = np.percentile(a, 100 - args.conservative_percentile) + + # ignore anything with the 'invalid' salient values + valid = c > 0 + + fout_ifo[f'separate_fits/bin_{counter:d}/fit_coeff'] = a + fout_ifo[f'separate_fits/bin_{counter:d}/counts'] = c + + if not any(valid): + cons_alphas_out[ifo][counter] = np.nan + alphas_out[ifo][counter] = np.nan + cons_counts_out[ifo][counter] = np.nan + counts_out[ifo][counter] = np.nan + continue + + invalphan = c[valid] / a[valid] + mean_alpha = c[valid].mean() / invalphan.mean() + cons_alpha = np.percentile(a[valid], 100 - args.conservative_percentile) cons_alphas_out[ifo][counter] = cons_alpha alphas_out[ifo][counter] = mean_alpha + # To get the count values, we need to convert to rates and back again - r = c / l_times[ad_order] + r = c[valid]/ l_times[ad_order][valid] cons_rate = np.percentile(r, args.conservative_percentile) - cons_counts_out[ifo][counter] = cons_rate * l_times[ad_order].sum() - counts_out[ifo][counter] = np.mean(r) * l_times[ad_order].sum() - - fout_ifo[f'separate_fits/bin_{counter:d}/fit_coeff'] = a - fout_ifo[f'separate_fits/bin_{counter:d}/counts'] = c + cons_counts_out[ifo][counter] = cons_rate * total_time + counts_out[ifo][counter] = np.mean(r) * total_time # Output the mean average values fout_ifo['mean/fit_coeff'] = alphas_out[ifo] @@ -188,17 +200,18 @@ for ifo in args.ifos: fout_ifo['conservative/fit_coeff'] = cons_alphas_out[ifo] fout_ifo['conservative/counts'] = cons_counts_out[ifo] - # Take some averages for plotting and summary values - overall_invalphan = counts_out[ifo] / alphas_out[ifo] - overall_meanalpha = counts_out[ifo].mean() / overall_invalphan.mean() - # For the fixed version, we just set this to 1 fout_ifo['fixed/counts'] = [1] * len(counts_out[ifo]) fout_ifo['fixed/fit_coeff'] = [0] * len(alphas_out[ifo]) + # Take some averages for plotting and summary values + overall_invalphan = counts_out[ifo] / alphas_out[ifo] + overall_meanalpha = np.nanmean(counts_out[ifo]) \ + / np.nanmean(overall_invalphan) + # Add some useful info to the output file fout_ifo.attrs['mean_alpha'] = overall_meanalpha - fout_ifo.attrs['total_counts'] = counts_out[ifo].sum() + fout_ifo.attrs['total_counts'] = np.nansum(counts_out[ifo]) fout.close() diff --git a/bin/live/pycbc_live_plot_combined_single_fits b/bin/live/pycbc_live_plot_combined_single_significance_fits similarity index 91% rename from bin/live/pycbc_live_plot_combined_single_fits rename to bin/live/pycbc_live_plot_combined_single_significance_fits index 5cb729959cd..89dc3781e06 100644 --- a/bin/live/pycbc_live_plot_combined_single_fits +++ b/bin/live/pycbc_live_plot_combined_single_significance_fits @@ -140,7 +140,8 @@ for ifo in ifos: continue l_times = separate_times[ifo] - rate = counts / l_times + with np.errstate(divide='ignore'): + rate = counts / l_times ma = mean_alpha[ifo][i] ca = cons_alpha[ifo][i] @@ -151,7 +152,8 @@ for ifo in ifos: bin_colour = plt.get_cmap(args.colormap)(bin_prop) bin_label = f"duration {bl:.2f}-{bu:.2f}" alpha_lines += ax_alpha.plot(separate_starts[ifo], alphas, c=bin_colour, - label=bin_label) + label=bin_label, marker='.', + markersize=10) alpha_lines.append(ax_alpha.axhline(ma, label="total fit = %.2f" % ma, c=bin_colour, linestyle='--',)) @@ -161,14 +163,22 @@ for ifo in ifos: label=alpha_lab)) count_lines += ax_count.plot(separate_starts[ifo], rate, c=bin_colour, - label=bin_label) + label=bin_label, marker='.', + markersize=10) + + if mr < 1e-3: + mlab = f"mean = {mr:.3e}" + clab = f"{conservative_percentile:d}th %ile = {cr:.3e}" + else: + mlab = f"mean = {mr:.3f}" + clab = f"{conservative_percentile:d}th %ile = {cr:.3f}" + count_lines.append(ax_count.axhline(mr, c=bin_colour, linestyle='--', - label=f"mean = {mr:.3f}")) - count_lab = f"{conservative_percentile:d}th %ile = {cr:.3f}" + label=mlab)) count_lines.append(ax_count.axhline(cr, c=bin_colour, linestyle=':', - label=count_lab)) + label=clab)) alpha_labels = [l.get_label() for l in alpha_lines] ax_alpha.legend(alpha_lines, alpha_labels, loc='lower center', diff --git a/bin/live/pycbc_live_plot_single_trigger_fits b/bin/live/pycbc_live_plot_single_significance_fits similarity index 100% rename from bin/live/pycbc_live_plot_single_trigger_fits rename to bin/live/pycbc_live_plot_single_significance_fits diff --git a/bin/live/pycbc_live_single_trigger_fits b/bin/live/pycbc_live_single_significance_fits similarity index 100% rename from bin/live/pycbc_live_single_trigger_fits rename to bin/live/pycbc_live_single_significance_fits diff --git a/bin/live/pycbc_live_supervise_single_significance_fits b/bin/live/pycbc_live_supervise_single_significance_fits new file mode 100755 index 00000000000..bb7c072159c --- /dev/null +++ b/bin/live/pycbc_live_supervise_single_significance_fits @@ -0,0 +1,517 @@ +#!/usr/bin/env python + +"""Supervise the periodic re-fitting of PyCBC Live single-detector triggers, +and the associated plots. +""" + +import re +import logging +import argparse +from datetime import datetime, timedelta +from dateutil.relativedelta import relativedelta +import time +import copy +import os +import shutil +import subprocess +import numpy as np +import lal +import pycbc +import h5py + + +def symlink(target, link_name): + """Create a symbolic link replacing the destination and checking for + errors. + """ + cp = subprocess.run([ + 'ln', '-sf', target, link_name + ]) + if cp.returncode: + raise subprocess.SubprocessError( + f"Could not link plot {target} to {link_name}" + ) + + +def dict_to_args(opts_dict): + """ + Convert an option dictionary into a list to be used by subprocess.run + """ + dargs = [] + for option in opts_dict.keys(): + dargs.append('--' + option.strip()) + value = opts_dict[option] + if len(value.split()) > 1: + # value is a list, append individually + for v in value.split(): + dargs.append(v) + elif not value: + # option is a flag, do nothing + continue + else: + # Single value option - easy enough + dargs.append(value) + return dargs + + +def mail_volunteers_error(controls, mail_body_lines, subject): + """ + Email a list of people, defined by mail-volunteers-file + To be used for errors or unusual occurences + """ + with open(controls['mail-volunteers-file'], 'r') as mail_volunteers_file: + volunteers = [volunteer.strip() for volunteer in + mail_volunteers_file.readlines()] + logging.info("Emailing %s with warnings", ' '.join(volunteers)) + mail_command = [ + 'mail', + '-s', + subject + ] + mail_command += volunteers + mail_body = '\n'.join(mail_body_lines) + subprocess.run(mail_command, input=mail_body, text=True) + + +def check_trigger_files(filenames, test_options, controls): + """ + Check that the fit coefficients meet criteria set + """ + coeff_upper_limit = float(test_options['upper-limit-coefficient']) + coeff_lower_limit = float(test_options['lower-limit-coefficient']) + warnings = [] + warning_files = [] + for filename in filenames: + warnings_thisfile = [] + with h5py.File(filename, 'r') as trff: + ifos = [k for k in trff.keys() if not k.startswith('bins')] + fit_coeffs = {ifo: trff[ifo]['fit_coeff'][:] for ifo in ifos} + bins_upper = trff['bins_upper'][:] + bins_lower = trff['bins_lower'][:] + # Which bins have at least *some* triggers within the limit + use_bins = bins_lower > float(test_options['duration-bin-lower-limit']) + for ifo in ifos: + coeffs_above = fit_coeffs[ifo][use_bins] > coeff_upper_limit + coeffs_below = fit_coeffs[ifo][use_bins] < coeff_lower_limit + if not any(coeffs_above) and not any(coeffs_below): + continue + # Problem - the fit coefficient is outside the limits + for bl, bu, fc in zip(bins_lower[use_bins], bins_upper[use_bins], + fit_coeffs[ifo][use_bins]): + if fc < coeff_lower_limit or fc > coeff_upper_limit: + warnings_thisfile.append( + f"WARNING - {ifo} fit coefficient {fc:.3f} in bin " + f"{bl}-{bu} outwith limits " + f"{coeff_lower_limit}-{coeff_upper_limit}" + ) + if warnings_thisfile: + warning_files.append(filename) + warnings.append(warnings_thisfile) + + if warnings: + # Some coefficients are outside the range + # Add the fact that this check failed in the logs + logging.warning("Extreme daily fits values found:") + mail_body_lines = ["Extreme daily fits values found:"] + for filename, filewarnings in zip(warning_files, warnings): + logging.warning(filename) + mail_body_lines.append(f"Values in {filename}") + for fw in filewarnings: + logging.warning(" " + fw) + mail_body_lines.append(" " + fw) + mail_volunteers_error(controls, mail_body_lines, + 'PyCBC Live single trigger fits extreme value(s)') + + +def run_and_error(command_arguments): + """ + Wrapper around subprocess.run to catch errors and send emails if required + """ + logging.info("Running " + " ".join(command_arguments)) + command_output = subprocess.run(command_arguments, capture_output=True) + if command_output.returncode: + error_contents = [' '.join(command_arguments), + command_output.stderr.decode()] + mail_volunteers_error(controls, error_contents, + f"PyCBC live could not run {command_arguments[0]}") + err_msg = f"Could not run {command_arguments[0]}" + raise subprocess.SubprocessError(err_msg) + + +# These are the option used to control the supervision, and will not be passed +# to the subprocesses +control_options = [ + "check-daily-output", + "combined-days", + "mail-volunteers-file", + "output-directory", + "output-id-str", + "public-dir", + "replay-duration", + "replay-start-time", + "submit-dir", + "trfits-format", + "true-start-time", + "variable-trigger-fits", +] + +# these are options which can be taken by both the daily fit code and the +# combined fitting code +options_both = ['ifos', 'verbose'] + +# These options are only for the daily fit code +daily_fit_options = [ + 'cluster', + 'duration-bin-edges', + 'duration-bin-spacing', + 'duration-from-bank', + 'file-identifier', + 'fit-function', + 'fit-threshold', + 'num-duration-bins', + 'prune-loudest', + 'prune-stat-threshold', + 'prune-window', + 'sngl-ranking', + 'template-cuts', + 'top-directory', + 'trigger-cuts', +] + +combined_fit_options = [ + 'conservative-percentile', +] + +coeff_test_options = [ + 'duration-bin-lower-limit', + 'lower-limit-coefficient', + 'upper-limit-coefficient', +] + +all_options = control_options + options_both + daily_fit_options \ + + combined_fit_options + coeff_test_options + + +def do_fitting(args, day_dt, day_str): + """ + Perform the fits as specified + """ + # Read in the config file and pack into appropriate dictionaries + daily_options = {} + combined_options = {} + test_options = {} + controls = {} + + with open(args.config_file, 'r') as conf_file: + all_lines = conf_file.readlines() + + for line in all_lines: + # Ignore whitespace and comments + line = line.strip() + if not line: + continue + if line.startswith(';'): + continue + + option, value = line.split('=') + option = option.strip() + value = value.strip() + + # If it is a control option, add to the controls dictionary + if option in control_options: + controls[option] = value + + # If the option is not to control the input, then it is passed + # straight to the executable + if option in daily_fit_options or option in options_both: + daily_options[option] = value + + if option in options_both or option in combined_fit_options: + combined_options[option] = value + + if option in coeff_test_options: + test_options[option] = value + + if option not in all_options: + logging.warning("Option %s unrecognised, ignoring", option) + + # The main output directory will have a date subdirectory which we + # put the output into + output_dir = os.path.join(controls['output-directory'], day_str) + subprocess.run(['mkdir', '-p', output_dir]) + if 'public-dir' in controls: + public_dir = os.path.join(controls['public-dir'], *day_str.split('_')) + subprocess.run(['mkdir', '-p', public_dir]) + + if not args.combine_only: + ##### DAILY FITTING ##### + daily_options['analysis-date'] = day_str + file_id_str = f'{day_str}' + if 'output-id-str' in controls: + file_id_str += f"-{controls['output-id-str']}" + out_fname = f'{file_id_str}-TRIGGER-FITS.hdf' + daily_options['output'] = os.path.join(output_dir, out_fname) + daily_args = ['pycbc_live_single_significance_fits'] + daily_args += dict_to_args(daily_options) + + run_and_error(daily_args) + + # Add plotting for daily fits, and linking to the public directory + logging.info("Plotting daily fits") + daily_plot_output = os.path.join(output_dir, + '{ifo}-' + f'{out_fname[:-3]}png') + daily_plot_arguments = [ + 'pycbc_live_plot_single_significance_fits', + '--trigger-fits-file', + daily_options['output'], + '--output-plot-name-format', + daily_plot_output, + '--log-colormap' + ] + run_and_error(daily_plot_arguments) + + # Link the plots to the public-dir if wanted + if 'public-dir' in controls: + daily_plot_outputs = [daily_plot_output.format(ifo=ifo) for ifo in + daily_options['ifos'].split()] + logging.info("Linking daily fits plots") + for dpo in daily_plot_outputs: + symlink(dpo, public_dir) + + if args.daily_only: + if 'check-daily-output' in controls: + logging.info( + "Checking that fit coefficients above %s for bins above %ss", + test_options['lower-limit-coefficient'], + test_options['duration-bin-lower-limit'] + ) + check_trigger_files( + [daily_options['output']], + test_options, + controls + ) + logging.info('Done') + exit() + + ##### COMBINED FITTING ##### + combined_days = int(controls['combined-days']) + if 'replay-start-time' in controls: + replay_start_time = int(controls['replay-start-time']) + true_start_time = int(controls['true-start-time']) + replay_duration = int(controls['replay-duration']) + rep_start_utc = lal.GPSToUTC(replay_start_time)[0:6] + + dt_replay_start = datetime( + year=rep_start_utc[0], + month=rep_start_utc[1], + day=rep_start_utc[2], + hour=rep_start_utc[3], + minute=rep_start_utc[4], + second=rep_start_utc[5] + ) + + td = (day_dt - dt_replay_start).total_seconds() + + # Time since the start of this replay + time_since_replay = np.remainder(td, replay_duration) + + # Add this on to the original start time to get the current time of + # the replay data + true_utc = lal.GPSToUTC(true_start_time)[0:6] + dt_true_start = datetime( + year=true_utc[0], + month=true_utc[1], + day=true_utc[2], + hour=true_utc[3], + minute=true_utc[4], + second=true_utc[5] + ) + + # Original time of the data being replayed right now + current_date = dt_true_start + timedelta(seconds=time_since_replay) + else: + current_date = day_dt + + date_test = current_date + timedelta(days=1) + + logging.info("Finding trigger fit files for combination") + if 'check-daily-output' in controls: + logging.info( + "Checking all files that fit coefficients above %s for bins " + "above %ss", + test_options['lower-limit-coefficient'], + test_options['duration-bin-lower-limit'] + ) + + trfits_files = [] + missed_files = 0 + found_files = 0 + while found_files < combined_days and missed_files < 10: + # Loop through the possible file locations and see if the file exists + date_test -= timedelta(days=1) + date_out = date_test.strftime("%Y_%m_%d") + trfits_filename = controls['trfits-format'].format(date=date_out) + # Check that the file exists: + if not os.path.exists(trfits_filename): + missed_files += 1 + logging.info(f"File {trfits_filename} does not exist - skipping") + continue + if not len(trfits_files): + end_date = date_out + # This is now the oldest file + first_date = date_out + # reset the "missed files" counter, and add to the "found files" + missed_files = 0 + found_files += 1 + trfits_files.append(trfits_filename) + + if 'check-daily-output' in controls: + check_trigger_files(trfits_files, test_options, controls) + + if missed_files == 10: + # If more than 10 days between files, something wrong with analysis. + # warn and use fewer files - 10 here is chosen to be an unusual amount + # of time for the analysis to be down in standard operation + logging.warning('More than 10 days between files, only using ' + f'{found_files} files for combination!') + + file_id_str = f'{first_date}-{end_date}' + if 'output-id-str' in controls: + file_id_str += f"-{controls['output-id-str']}" + out_fname = f'{file_id_str}-TRIGGER_FITS_COMBINED' + combined_options['output'] = os.path.join(output_dir, out_fname + '.hdf') + + if not trfits_files: + raise ValueError("No files meet the criteria") + + combined_options['trfits-files'] = ' '.join(trfits_files) + + combined_args = ['pycbc_live_combine_single_significance_fits'] + combined_args += dict_to_args(combined_options) + + run_and_error(combined_args) + + logging.info('Copying combined fits file to local filesystem') + try: + shutil.copyfile( + combined_options['output'], + controls['variable-trigger-fits'] + ) + except Exception as e: + mail_volunteers_error( + controls, + [str(e)], + "PyCBC live could not copy to variable trigger fits file" + ) + raise e + + + logging.info( + "%s updated to link to %s", + controls['variable-trigger-fits'], + combined_options['output'] + ) + + logging.info("Plotting combined fits") + # Add plotting for combined fits, and linking to the public directory + combined_plot_output = os.path.join(output_dir, + f"{{ifo}}-{out_fname}-{{type}}.png") + combined_plot_arguments = [ + 'pycbc_live_plot_combined_single_significance_fits', + '--combined-fits-file', + combined_options['output'], + '--output-plot-name-format', + combined_plot_output, + '--log-colormap' + ] + + run_and_error(combined_plot_arguments) + + combined_plot_outputs = [ + combined_plot_output.format(ifo=ifo, type='fit_coeffs') for ifo in + combined_options['ifos'].split() + ] + combined_plot_outputs += [ + combined_plot_output.format(ifo=ifo, type='counts') for ifo in + combined_options['ifos'].split() + ] + + # Link the plots to the public-dir if wanted + if 'public-dir' in controls: + logging.info("Linking combined fits") + for cpo in combined_plot_outputs: + symlink(cpo, public_dir) + + logging.info('Done') + + +def wait_for_utc_time(target_str): + """Wait until the UTC time is as given by `target_str`, in HH:MM:SS format. + """ + target_hour, target_minute, target_second = map(int, target_str.split(':')) + now = datetime.utcnow() + # for today's target, take now and replace the time + target_today = now + relativedelta( + hour=target_hour, minute=target_minute, second=target_second + ) + # for tomorrow's target, take now, add one day, and replace the time + target_tomorrow = now + relativedelta( + days=1, hour=target_hour, minute=target_minute, second=target_second + ) + next_target = target_today if now <= target_today else target_tomorrow + sleep_seconds = (next_target - now).total_seconds() + logging.info('Waiting %.0f s', sleep_seconds) + time.sleep(sleep_seconds) + + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument('--config-file', required=True) +parser.add_argument( + '--date', + help='Date to analyse, if not given, will analyse yesterday (UTC). ' + 'Format YYYY_MM_DD. Do not use if using --run-daily-at.' +) +parser.add_argument( + '--combine-only', + action='store_true', + help="Only do the combination of singles fit files." +) +parser.add_argument( + '--daily-only', + action='store_true', + help="Only do the daily singles fitting." +) +parser.add_argument( + '--run-daily-at', + metavar='HH:MM:SS', + help='Stay running and repeat the fitting daily at the given UTC hour.' +) +args = parser.parse_args() + +pycbc.init_logging(True) + +if args.run_daily_at is not None and args.date is not None: + parser.error('Cannot take --run-daily-at and --date at the same time') + +if args.run_daily_at is not None: + # keep running and repeat the fitting every day at the given hour + if not re.match('[0-9][0-9]:[0-9][0-9]:[0-9][0-9]', args.run_daily_at): + parser.error('--run-daily-at takes a UTC time in the format HH:MM:SS') + logging.info('Starting in daily run mode') + while True: + wait_for_utc_time(args.run_daily_at) + logging.info('==== Time to update the single fits, waking up ====') + # Get the date string for yesterday's triggers + day_dt = datetime.utcnow() - timedelta(days=1) + day_str = day_dt.strftime('%Y_%m_%d') + do_fitting(args, day_dt, day_str) +else: + # run just once + if args.date: + day_str = args.date + day_dt = datetime.strptime(args.date, '%Y_%m_%d') + else: + # Get the date string for yesterday's triggers + day_dt = datetime.utcnow() - timedelta(days=1) + day_str = day_dt.strftime('%Y_%m_%d') + do_fitting(args, day_dt, day_str) diff --git a/bin/pycbc_live b/bin/pycbc_live index 7cfb3af8290..9a310a2692a 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -104,6 +104,7 @@ class LiveEventManager(object): self.padata = livepau.PAstroData(args.p_astro_spec, args.bank_file) self.use_date_prefix = args.day_hour_output_prefix self.ifar_upload_threshold = args.ifar_upload_threshold + self.pvalue_lookback_time = args.pvalue_lookback_time self.pvalue_livetime = args.pvalue_combination_livetime self.gracedb_server = args.gracedb_server self.gracedb_search = args.gracedb_search @@ -221,7 +222,8 @@ class LiveEventManager(object): self.data_readers[ifo], self.bank, template_id, - coinc_times + coinc_times, + lookback=self.pvalue_lookback_time ) if pvalue_info is None: continue @@ -802,6 +804,20 @@ class LiveEventManager(object): store_psd[ifo].save(fname, group='%s/psd' % ifo) +def check_max_length(args, waveforms): + """Check that the `--max-length` option is sufficient to accomodate the + longest template in the bank and the PSD estimation options. + """ + lengths = numpy.array([1.0 / wf.delta_f for wf in waveforms]) + psd_len = args.psd_segment_length * (args.psd_samples // 2 + 1) + max_length = max(lengths.max() + args.pvalue_lookback_time, psd_len) + if max_length > args.max_length: + raise ValueError( + '--max-length is too short for this template bank. ' + f'Use at least {max_length}.' + ) + + parser = argparse.ArgumentParser(description=__doc__) pycbc.waveform.bank.add_approximant_arg(parser) parser.add_argument('--verbose', action='store_true') @@ -962,6 +978,10 @@ parser.add_argument('--enable-background-estimation', default=False, action='sto parser.add_argument('--ifar-double-followup-threshold', type=float, required=True, help='Inverse-FAR threshold to followup double coincs with' 'additional detectors') +parser.add_argument('--pvalue-lookback-time', type=float, default=150, + metavar='SECONDS', + help='Lookback time for the calculation of the p-value in ' + 'followup detectors.') parser.add_argument('--pvalue-combination-livetime', type=float, required=True, help="Livetime used for p-value combination with followup " "detectors, in years") @@ -1106,13 +1126,10 @@ with ctx: print(e) exit() - maxlen = args.psd_segment_length * (args.psd_samples // 2 + 1) if evnt.rank > 0: bank.table.sort(order='mchirp') waveforms = list(bank[evnt.rank-1::evnt.size-1]) - lengths = numpy.array([1.0 / wf.delta_f for wf in waveforms]) - psd_len = args.psd_segment_length * (args.psd_samples // 2 + 1) - maxlen = max(lengths.max(), psd_len) + check_max_length(args, waveforms) mf = LiveBatchMatchedFilter(waveforms, args.snr_threshold, args.chisq_bins, sg_chisq, snr_abort_threshold=args.snr_abort_threshold, @@ -1134,11 +1151,8 @@ with ctx: # Initialize the data readers for all detectors. For rank 0, we need data # from all detectors, including the localization-only ones. For higher # ranks, we only need the detectors that can generate candidates. - if args.max_length is not None: - maxlen = args.max_length - maxlen = int(maxlen) data_reader = { - ifo: StrainBuffer.from_cli(ifo, args, maxlen) + ifo: StrainBuffer.from_cli(ifo, args) for ifo in (evnt.ifos if evnt.rank == 0 else evnt.trigg_ifos) } evnt.data_readers = data_reader diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index 5094a02dc62..188d636aceb 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -27,9 +27,14 @@ from pycbc.live import snr_optimizer parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument('-v', '--verbose', action='count', default=0, + help='Add verbosity to logging. Adding the option ' + 'multiple times makes logging progressively ' + 'more verbose, e.g. --verbose or -v provides ' + 'logging at the info level, but -vv or ' + '--verbose --verbose provides debug logging.') parser.add_argument('--version', action='version', version=version.git_verbose_msg) -parser.add_argument('--verbose', action='store_true') parser.add_argument('--params-file', required=True, help='Location of the attributes file created by PyCBC ' 'Live') @@ -45,12 +50,12 @@ parser.add_argument('--psd-files', type=str, nargs='+', 'by PyCBC Live.') parser.add_argument('--approximant', required=True, help='Waveform approximant string.') -parser.add_argument('--snr-threshold', default=4.0, +parser.add_argument('--snr-threshold', type=float, default=4.0, help='If the SNR in ifo X is below this threshold do not ' 'consider it part of the coincidence. Not implemented') -parser.add_argument('--chirp-time-f-lower', default=20., +parser.add_argument('--chirp-time-f-lower', type=float, default=20., help='Starting frequency for chirp time window (Hz).') -parser.add_argument('--chirp-time-window', default=2., +parser.add_argument('--chirp-time-window', type=float, default=2., help='Chirp time window (s).') parser.add_argument('--gracedb-server', metavar='URL', help='URL of GraceDB server API for uploading events. ' diff --git a/examples/live/run.sh b/examples/live/run.sh index 8857c5d996e..3c523d6bf00 100755 --- a/examples/live/run.sh +++ b/examples/live/run.sh @@ -131,7 +131,7 @@ python -m mpi4py `which pycbc_live` \ --sample-rate 2048 \ --enable-bank-start-frequency \ --low-frequency-cutoff ${f_min} \ ---max-length 256 \ +--max-length 512 \ --approximant "SPAtmplt:mtotal<4" "SEOBNRv4_ROM:else" \ --chisq-bins "0.72*get_freq('fSEOBNRv4Peak',params.mass1,params.mass2,params.spin1z,params.spin2z)**0.7" \ --snr-abort-threshold 500 \ diff --git a/pycbc/events/single.py b/pycbc/events/single.py index f41b2dfb131..f176840c4b4 100644 --- a/pycbc/events/single.py +++ b/pycbc/events/single.py @@ -223,8 +223,17 @@ def calculate_ifar(self, sngl_ranking, duration): rate = rates[dur_bin] coeff = coeffs[dur_bin] + if np.isnan(coeff) or np.isnan(rate): + logger.warning( + "Single trigger fits are not valid - singles " + "cannot be assessed for this detector at this time." + ) + return None + rate_louder = rate * fits.cum_fit('exponential', [sngl_ranking], coeff, thresh)[0] + # apply a trials factor of the number of duration bins rate_louder *= len(rates) + return conv.sec_to_year(1. / rate_louder) diff --git a/pycbc/filter/matchedfilter.py b/pycbc/filter/matchedfilter.py index 616289f9c08..c14a7104220 100644 --- a/pycbc/filter/matchedfilter.py +++ b/pycbc/filter/matchedfilter.py @@ -1804,7 +1804,50 @@ def followup_event_significance(ifo, data_reader, bank, to determine if the SNR in the first detector has a significant peak in the on-source window. The significance is given in terms of a p-value. See Dal Canton et al. 2021 (https://arxiv.org/abs/2008.07494) - for details. + for details. A portion of the SNR time series around the on-source window + is also returned for use in BAYESTAR. + + If the calculation cannot be carried out, for example because `ifo` is + not in observing mode at the requested time, then None is returned. + Otherwise, the dict contains the following keys. `snr_series` is a + TimeSeries object with the SNR time series for BAYESTAR. `peak_time` is the + time of maximum SNR in the on-source window. `pvalue` is the p-value for + the maximum on-source SNR compared to the off-source realizations. + `pvalue_saturated` is a bool indicating whether the p-value is limited by + the number of off-source realizations, i.e. whether the maximum on-source + SNR is larger than all the off-source ones. `sigma2` is the SNR + normalization (squared) for the given template and detector. + + Parameters + ---------- + ifo: str + Which detector is being used for the calculation. + data_reader: StrainBuffer + StrainBuffer object providing the data for the given detector. + bank: LiveFilterBank + Template bank object providing the template related quantities. + template_id: int + Index of the template in the bank. + coinc_times: dict + Dictionary keyed by detector names reporting the coalescence times of + a candidate measured at the different detectors. Used to define the + on-source window of the candidate in `ifo`. + coinc_threshold: float + Nominal statistical uncertainty in `coinc_times`; expands the + on-source window by twice the given amount. + lookback: float + Nominal amount of time to use for the calculation of the onsource and + offsource SNR time series. The actual time may be reduced depending on + the duration of the template and the strain buffer in the data reader + (if so, a warning is logged). + duration: float + Duration of the SNR time series to be reported to BAYESTAR. + + Returns + ------- + followup_info: dict or None + Results of the followup calculation (see above) or None if `ifo` did + not have usable data. """ from pycbc.waveform import get_waveform_filter_length_in_time tmplt = bank.table[template_id] @@ -1830,17 +1873,63 @@ def followup_event_significance(ifo, data_reader, bank, onsource_start -= coinc_threshold onsource_end += coinc_threshold - # Calculate how much time needed to calculate significance - trim_pad = (data_reader.trim_padding * data_reader.strain.delta_t) - bdur = int(lookback + 2.0 * trim_pad + length_in_time) - if bdur > data_reader.strain.duration * .75: - bdur = data_reader.strain.duration * .75 + # Calculate how much time is needed to calculate the significance. + # At the minimum, we need enough time to include the lookback, plus time + # that we will throw away because of corruption from finite-duration filter + # responses (this is equal to the nominal padding plus the template + # duration). Next, for efficiency, we round the resulting duration up to + # align it with one of the frequency resolutions preferred by the template + # bank. And finally, the resulting duration must fit into the strain buffer + # available in the data reader, so we check that. + trim_pad = data_reader.trim_padding * data_reader.strain.delta_t + buffer_duration = lookback + 2 * trim_pad + length_in_time + buffer_samples = bank.round_up(int(buffer_duration * bank.sample_rate)) + max_safe_buffer_samples = int( + 0.9 * data_reader.strain.duration * bank.sample_rate + ) + if buffer_samples > max_safe_buffer_samples: + buffer_samples = max_safe_buffer_samples + new_lookback = ( + buffer_samples / bank.sample_rate - (2 * trim_pad + length_in_time) + ) + # Require a minimum lookback time of twice the onsource window or SNR + # time series (whichever is longer) so we have enough data for the + # onsource window, the SNR time series, and at least a few background + # samples + min_required_lookback = 2 * max(onsource_end - onsource_start, duration) + if new_lookback > min_required_lookback: + logging.warning( + 'Strain buffer too short for a lookback time of %f s, ' + 'reducing lookback to %f s', + lookback, + new_lookback + ) + else: + logging.error( + 'Strain buffer too short to compute the followup SNR time ' + 'series for template %d, will not use %s for followup. ' + 'Either use shorter templates, or raise --max-length.', + template_id, + ifo + ) + return None + buffer_duration = buffer_samples / bank.sample_rate # Require all strain be valid within lookback time if data_reader.state is not None: - state_start_time = data_reader.strain.end_time \ - - data_reader.reduced_pad * data_reader.strain.delta_t - bdur - if not data_reader.state.is_extent_valid(state_start_time, bdur): + state_start_time = ( + data_reader.strain.end_time + - data_reader.reduced_pad * data_reader.strain.delta_t + - buffer_duration + ) + if not data_reader.state.is_extent_valid( + state_start_time, buffer_duration + ): + logging.info( + '%s strain buffer contains invalid data during lookback, ' + 'will not use for followup', + ifo + ) return None # We won't require that all DQ checks be valid for now, except at @@ -1849,16 +1938,23 @@ def followup_event_significance(ifo, data_reader, bank, dq_start_time = onsource_start - duration / 2.0 dq_duration = onsource_end - onsource_start + duration if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration): + logging.info( + '%s DQ buffer indicates invalid data during onsource window, ' + 'will not use for followup', + ifo + ) return None - # Calculate SNR time series for this duration - htilde = bank.get_template(template_id, min_buffer=bdur) + # Calculate SNR time series for the entire lookback duration + htilde = bank.get_template( + template_id, delta_f=bank.sample_rate / float(buffer_samples) + ) stilde = data_reader.overwhitened_data(htilde.delta_f) sigma2 = htilde.sigmasq(stilde.psd) snr, _, norm = matched_filter_core(htilde, stilde, h_norm=sigma2) - # Find peak in on-source and determine p-value + # Find peak SNR in on-source and determine p-value onsrc = snr.time_slice(onsource_start, onsource_end) peak = onsrc.abs_arg_max() peak_time = peak * snr.delta_t + onsrc.start_time diff --git a/pycbc/psd/variation.py b/pycbc/psd/variation.py index 538eac83ed2..e8aaa547aa2 100644 --- a/pycbc/psd/variation.py +++ b/pycbc/psd/variation.py @@ -42,7 +42,7 @@ def create_full_filt(freqs, filt, plong, srate, psd_duration): fweight = norm * fweight fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong) fwhiten[0] = 0. - full_filt = sig.hann(int(psd_duration * srate)) * numpy.roll( + full_filt = sig.windows.hann(int(psd_duration * srate)) * numpy.roll( irfft(fwhiten * fweight), int(psd_duration / 2) * srate) return full_filt diff --git a/pycbc/results/psd.py b/pycbc/results/psd.py index 103671ccd03..3a69c9e7d2d 100644 --- a/pycbc/results/psd.py +++ b/pycbc/results/psd.py @@ -30,13 +30,12 @@ from pycbc import DYN_RANGE_FAC -def generate_asd_plot(psddict, output_filename): +def generate_asd_plot(psddict, output_filename, f_min=10.): """ Generate an ASD plot as used for upload to GraceDB. Parameters ---------- - psddict: dictionary A dictionary keyed on ifo containing the PSDs as FrequencySeries objects @@ -44,22 +43,32 @@ def generate_asd_plot(psddict, output_filename): output_filename: string The filename for the plot to be saved to + f_min: float + Minimum frequency at which anything should be plotted + Returns ------- None """ from matplotlib import pyplot as plt asd_fig, asd_ax = plt.subplots(1) + asd_min = [1E-24] # Default minimum to plot + for ifo in sorted(psddict.keys()): curr_psd = psddict[ifo] - # Can't plot log(0) so start from point 1 - asd_ax.loglog(curr_psd.sample_frequencies[1:], - curr_psd[1:] ** 0.5 / DYN_RANGE_FAC, - c=ifo_color(ifo), label=ifo) + freqs = curr_psd.sample_frequencies + physical = (freqs >= f_min) # Ignore lower frequencies + asd_to_plot = curr_psd[physical] ** 0.5 / DYN_RANGE_FAC + asd_min.append(min(asd_to_plot)) + asd_ax.loglog(freqs[physical], + asd_to_plot, + c=ifo_color(ifo), + label=ifo) + asd_ax.grid(True) asd_ax.legend() - asd_ax.set_xlim([10, 1300]) - asd_ax.set_ylim([3E-24, 1E-20]) + asd_ax.set_xlim([f_min, 1300]) + asd_ax.set_ylim([min(asd_min), 1E-20]) asd_ax.set_xlabel('Frequency (Hz)') asd_ax.set_ylabel('ASD') asd_fig.savefig(output_filename) diff --git a/pycbc/results/render.py b/pycbc/results/render.py index d10c4989e6d..3f58c4bf6fb 100644 --- a/pycbc/results/render.py +++ b/pycbc/results/render.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright (C) 2015 Christopher M. Biwer # # This program is free software; you can redistribute it and/or modify it diff --git a/pycbc/results/versioning.py b/pycbc/results/versioning.py index 678df9c6fe0..104fac7a5fe 100644 --- a/pycbc/results/versioning.py +++ b/pycbc/results/versioning.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright (C) 2015 Ian Harry # # This program is free software; you can redistribute it and/or modify it @@ -22,8 +20,9 @@ import urllib.parse from pycbc.results import save_fig_with_metadata, html_escape -import lal, lalframe -import pycbc.version, glue.git_version +import lal +import lalframe +import pycbc.version def get_library_version_info(): """This will return a list of dictionaries containing versioning @@ -90,19 +89,6 @@ def add_info_new_version(info_dct, curr_module, extra_str): pass library_list.append(lalsimulationinfo) - glueinfo = {} - glueinfo['Name'] = 'LSCSoft-Glue' - glueinfo['ID'] = glue.git_version.id - glueinfo['Status'] = glue.git_version.status - glueinfo['Version'] = glue.git_version.version - glueinfo['Tag'] = glue.git_version.tag - glueinfo['Author'] = glue.git_version.author - glueinfo['Builder'] = glue.git_version.builder - glueinfo['Branch'] = glue.git_version.branch - glueinfo['Committer'] = glue.git_version.committer - glueinfo['Date'] = glue.git_version.date - library_list.append(glueinfo) - pycbcinfo = {} pycbcinfo['Name'] = 'PyCBC' pycbcinfo['ID'] = pycbc.version.version diff --git a/pycbc/strain/strain.py b/pycbc/strain/strain.py index 2793012d565..05b8cc64e6a 100644 --- a/pycbc/strain/strain.py +++ b/pycbc/strain/strain.py @@ -1418,8 +1418,8 @@ def execute_cached_ifft(*args, **kwargs): class StrainBuffer(pycbc.frame.DataBuffer): def __init__(self, frame_src, channel_name, start_time, - max_buffer=512, - sample_rate=4096, + max_buffer, + sample_rate, low_frequency_cutoff=20, highpass_frequency=15.0, highpass_reduction=200.0, @@ -1460,9 +1460,9 @@ def __init__(self, frame_src, channel_name, start_time, Name of the channel to read from the frame files start_time: Time to start reading from. - max_buffer: {int, 512}, Optional - Length of the buffer in seconds - sample_rate: {int, 2048}, Optional + max_buffer: int + Length of the strain buffer in seconds. + sample_rate: int, Optional Rate in Hz to sample the data. low_frequency_cutoff: {float, 20}, Optional The low frequency cutoff to use for inverse spectrum truncation @@ -1534,7 +1534,7 @@ def __init__(self, frame_src, channel_name, start_time, filesystem. """ super(StrainBuffer, self).__init__(frame_src, channel_name, start_time, - max_buffer=32, + max_buffer=max_buffer, force_update_cache=force_update_cache, increment_update_cache=increment_update_cache) @@ -1950,7 +1950,7 @@ def advance(self, blocksize, timeout=10): return self.wait_duration <= 0 @classmethod - def from_cli(cls, ifo, args, maxlen): + def from_cli(cls, ifo, args): """Initialize a StrainBuffer object (data reader) for a particular detector. """ @@ -1982,34 +1982,38 @@ def from_cli(cls, ifo, args, maxlen): frame_src = [args.frame_src[ifo]] strain_channel = ':'.join([ifo, args.channel_name[ifo]]) - return cls(frame_src, strain_channel, - args.start_time, max_buffer=maxlen * 2, - state_channel=state_channel, - data_quality_channel=dq_channel, - idq_channel=idq_channel, - idq_state_channel=idq_state_channel, - idq_threshold=args.idq_threshold, - sample_rate=args.sample_rate, - low_frequency_cutoff=args.low_frequency_cutoff, - highpass_frequency=args.highpass_frequency, - highpass_reduction=args.highpass_reduction, - highpass_bandwidth=args.highpass_bandwidth, - psd_samples=args.psd_samples, - trim_padding=args.trim_padding, - psd_segment_length=args.psd_segment_length, - psd_inverse_length=args.psd_inverse_length, - autogating_threshold=args.autogating_threshold, - autogating_cluster=args.autogating_cluster, - autogating_pad=args.autogating_pad, - autogating_width=args.autogating_width, - autogating_taper=args.autogating_taper, - autogating_duration=args.autogating_duration, - autogating_psd_segment_length=args.autogating_psd_segment_length, - autogating_psd_stride=args.autogating_psd_stride, - psd_abort_difference=args.psd_abort_difference, - psd_recalculate_difference=args.psd_recalculate_difference, - force_update_cache=args.force_update_cache, - increment_update_cache=args.increment_update_cache[ifo], - analyze_flags=analyze_flags, - data_quality_flags=dq_flags, - dq_padding=args.data_quality_padding) + return cls( + frame_src, + strain_channel, + args.start_time, + max_buffer=args.max_length, + state_channel=state_channel, + data_quality_channel=dq_channel, + idq_channel=idq_channel, + idq_state_channel=idq_state_channel, + idq_threshold=args.idq_threshold, + sample_rate=args.sample_rate, + low_frequency_cutoff=args.low_frequency_cutoff, + highpass_frequency=args.highpass_frequency, + highpass_reduction=args.highpass_reduction, + highpass_bandwidth=args.highpass_bandwidth, + psd_samples=args.psd_samples, + trim_padding=args.trim_padding, + psd_segment_length=args.psd_segment_length, + psd_inverse_length=args.psd_inverse_length, + autogating_threshold=args.autogating_threshold, + autogating_cluster=args.autogating_cluster, + autogating_pad=args.autogating_pad, + autogating_width=args.autogating_width, + autogating_taper=args.autogating_taper, + autogating_duration=args.autogating_duration, + autogating_psd_segment_length=args.autogating_psd_segment_length, + autogating_psd_stride=args.autogating_psd_stride, + psd_abort_difference=args.psd_abort_difference, + psd_recalculate_difference=args.psd_recalculate_difference, + force_update_cache=args.force_update_cache, + increment_update_cache=args.increment_update_cache[ifo], + analyze_flags=analyze_flags, + data_quality_flags=dq_flags, + dq_padding=args.data_quality_padding + ) diff --git a/pycbc/types/timeseries.py b/pycbc/types/timeseries.py index 6242f9f418c..d25af89ded6 100644 --- a/pycbc/types/timeseries.py +++ b/pycbc/types/timeseries.py @@ -188,10 +188,10 @@ def time_slice(self, start, end, mode='floor'): start_idx = float(start - self.start_time) * self.sample_rate end_idx = float(end - self.start_time) * self.sample_rate - if _numpy.isclose(start_idx, round(start_idx)): + if _numpy.isclose(start_idx, round(start_idx), rtol=0, atol=1E-3): start_idx = round(start_idx) - if _numpy.isclose(end_idx, round(end_idx)): + if _numpy.isclose(end_idx, round(end_idx), rtol=0, atol=1E-3): end_idx = round(end_idx) if mode == 'floor': diff --git a/pycbc/waveform/bank.py b/pycbc/waveform/bank.py index 0761ba10d95..12f456bfa1e 100644 --- a/pycbc/waveform/bank.py +++ b/pycbc/waveform/bank.py @@ -582,34 +582,64 @@ def __getitem__(self, index): return self.get_template(index) - def get_template(self, index, min_buffer=None): + def freq_resolution_for_template(self, index): + """Compute the correct resolution for a frequency series that contains + a given template in the bank. + """ + from pycbc.waveform.waveform import props + + time_duration = self.minimum_buffer + time_duration += 0.5 + params = props(self.table[index]) + params.pop('approximant') approximant = self.approximant(index) - f_end = self.end_frequency(index) - flow = self.table[index].f_lower + waveform_duration = pycbc.waveform.get_waveform_filter_length_in_time( + approximant, **params + ) + if waveform_duration is None: + raise RuntimeError( + 'Template waveform {approximant} not recognized!' + ) + time_duration += waveform_duration + td_samples = self.round_up(time_duration * self.sample_rate) + return self.sample_rate / float(td_samples) + + def get_template(self, index, delta_f=None): + """Calculate and return the frequency-domain waveform for the template + with the given index. The frequency resolution can optionally be given. - # Determine the length of time of the filter, rounded up to - # nearest power of two - if min_buffer is None: - min_buffer = self.minimum_buffer - min_buffer += 0.5 + Parameters + ---------- + index: int + Index of the template in the bank. + delta_f: float, optional + Resolution of the resulting frequency series. If not given, it is + calculated from the time duration of the template. - from pycbc.waveform.waveform import props - p = props(self.table[index]) - p.pop('approximant') - buff_size = pycbc.waveform.get_waveform_filter_length_in_time(approximant, **p) - if not buff_size: - raise RuntimeError('Template waveform %s not recognized!' % approximant) + Returns + ------- + htilde: FrequencySeries + Template waveform in the frequency domain. + """ + approximant = self.approximant(index) + f_end = self.end_frequency(index) + flow = self.table[index].f_lower - tlen = self.round_up((buff_size + min_buffer) * self.sample_rate) - flen = int(tlen / 2 + 1) + if delta_f is None: + delta_f = self.freq_resolution_for_template(index) - delta_f = self.sample_rate / float(tlen) + flen = int(self.sample_rate / (2 * delta_f) + 1) if f_end is None or f_end >= (flen * delta_f): f_end = (flen - 1) * delta_f - logging.info("Generating %s, %ss, %i, starting from %s Hz", - approximant, 1.0 / delta_f, index, flow) + logging.info( + "Generating %s, duration %s s, index %i, starting from %s Hz", + approximant, + 1.0 / delta_f, + index, + flow + ) # Get the waveform filter distance = 1.0 / DYN_RANGE_FAC diff --git a/pycbc/workflow/core.py b/pycbc/workflow/core.py index d0bdcccba2a..cc8cf5d2e6d 100644 --- a/pycbc/workflow/core.py +++ b/pycbc/workflow/core.py @@ -36,7 +36,6 @@ import lal import lal.utils import Pegasus.api # Try and move this into pegasus_workflow -from glue import lal as gluelal from ligo import segments from ligo.lw import lsctables, ligolw from ligo.lw import utils as ligolw_utils @@ -1523,6 +1522,8 @@ def convert_to_lal_cache(self): """ Return all files in this object as a glue.lal.Cache object """ + from glue import lal as gluelal + lal_cache = gluelal.Cache([]) for entry in self: try: diff --git a/pycbc/workflow/datafind.py b/pycbc/workflow/datafind.py index 9ccf09289b9..b6de44868a5 100644 --- a/pycbc/workflow/datafind.py +++ b/pycbc/workflow/datafind.py @@ -33,7 +33,7 @@ import logging from ligo import segments from ligo.lw import utils, table -from glue import lal +from gwdatafind import find_urls as find_frame_urls from pycbc.workflow.core import SegFile, File, FileList, make_analysis_dir from pycbc.frame import datafind_connection from pycbc.io.ligolw import LIGOLWContentHandler @@ -686,6 +686,8 @@ def setup_datafind_from_pregenerated_lcf_files(cp, ifos, outputDir, tags=None): datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ + from glue import lal + if tags is None: tags = [] @@ -820,6 +822,8 @@ def get_missing_segs_from_frame_file_cache(datafindcaches): missingFrames: Dict. of ifo keyed lal.Cache instances The list of missing frames """ + from glue import lal + missingFrameSegs = {} missingFrames = {} for cache in datafindcaches: @@ -974,6 +978,8 @@ def run_datafind_instance(cp, outputDir, connection, observatory, frameType, Cache file listing all of the datafind output files for use later in the pipeline. """ + from glue import lal + if tags is None: tags = [] diff --git a/setup.py b/setup.py index 12f6c69ff75..1e59cda491c 100755 --- a/setup.py +++ b/setup.py @@ -122,7 +122,7 @@ def __getattr__(self, attr): vinfo = _version_helper.generate_git_version_info() except: vinfo = vdummy() - vinfo.version = '2.1.5' + vinfo.version = '2.1.6' vinfo.release = 'True' with open('pycbc/version.py', 'wb') as f: