From 730fe3a60f27c949704ae3c9457451e3136ec800 Mon Sep 17 00:00:00 2001 From: enrdrigo Date: Mon, 22 Jan 2024 18:52:00 +0100 Subject: [PATCH] latest changes of the code. --- sportran/current/current.py | 30 ++- sportran/md/bayes.py | 443 ++++++++++++++++++++++++++++++++++-- 2 files changed, 443 insertions(+), 30 deletions(-) diff --git a/sportran/current/current.py b/sportran/current/current.py index 1a76f64..d82812f 100644 --- a/sportran/current/current.py +++ b/sportran/current/current.py @@ -357,19 +357,33 @@ def bayesian_analysis(self, model, n_parameters, backend = 'chain.h5', burn_in = None, thin = None, - mask = None): - - self.bayes = BayesFilter(self.cospectrum, model, n_parameters, self.N_EQUIV_COMPONENTS, - is_restart = is_restart, + mask = None, + log_like='off', + parallel = False, + ncpus = 1 + ): + if parallel: + self.bayes = BayesFilter_parallel(self.cospectrum, model, n_parameters, self.N_EQUIV_COMPONENTS, + is_restart = is_restart, + n_steps = n_steps, + backend = backend, + burn_in = burn_in, + thin = thin, + ncpus = ncpus, + mask = mask) + self.bayes.run_mcmc(log_like=log_like) + else: + self.bayes = BayesFilter(self.cospectrum, model, n_parameters, self.N_EQUIV_COMPONENTS, + is_restart = is_restart, n_steps = n_steps, backend = backend, burn_in = burn_in, thin = thin, mask = mask) + self.bayes.run_mcmc(log_like=log_like) - self.bayes.run_mcmc() - self.offdiag = self.bayes.parameters_mean[0] - self.offdiag_std = self.bayes.parameters_std[0] + self.offdiag = self.bayes.parameters_mean[0]*self.bayes.factor + self.offdiag_std = self.bayes.parameters_std[0]*self.bayes.factor self.bayesian_log = \ '-----------------------------------------------------\n' +\ @@ -379,6 +393,8 @@ def bayesian_analysis(self, model, n_parameters, ' L_01 = {:18f} +/- {:10f}\n'.format(self.offdiag, self.offdiag_std) +\ '-----------------------------------------------------\n' log.write_log(self.bayesian_log) + with open('bayesian_analysis_{}'.format(n_parameters), 'w+') as g: + g.write('{}\t{}\n'.format(self.offdiag, self.offdiag_std)) def cepstral_analysis(self, aic_type='aic', aic_Kmin_corrfactor=1.0, manual_cutoffK=None): """ diff --git a/sportran/md/bayes.py b/sportran/md/bayes.py index 1f72f44..5e55927 100644 --- a/sportran/md/bayes.py +++ b/sportran/md/bayes.py @@ -7,6 +7,8 @@ from .cepstral import dct_coefficients, dct_filter_psd, dct_filter_tau, CepstralFilter, multicomp_cepstral_parameters from .tools.filter import runavefilter from sportran.utils import log +from multiprocessing import Pool +import time __all__ = ['BayesFilter'] EULER_GAMMA = 0.57721566490153286060651209008240243104215933593992 # Euler-Mascheroni constant @@ -93,7 +95,9 @@ def run_mcmc(self, is_restart = None, mask = None, filename = None, - n_walkers = None): + n_walkers = None, + log_like='off' + ): # Initialize the parameters if undefined if n_parameters is None: @@ -109,7 +113,6 @@ def run_mcmc(self, # Initialize the parameters for cepstral analysis ck_THEORY_var, psd_THEORY_mean = multicomp_cepstral_parameters(self.spectrum.shape[2], self.n_components) - # Cepstral analysis for the diagonal elements cepf1 = CepstralFilter(np.log(self.spectrum[0,0].real), ck_theory_var = ck_THEORY_var, psd_theory_mean = psd_THEORY_mean, aic_type = 'aic') cepf1.scan_filter_tau(cutoffK = None, aic_Kmin_corrfactor = 1.0) @@ -118,7 +121,6 @@ def run_mcmc(self, self.sigma1 = cepf1.psd[0] self.sigma2 = cepf2.psd[0] - nu = 2 ell = self.n_components # Define noisy data @@ -126,6 +128,8 @@ def run_mcmc(self, noisy_data = (self.spectrum.real[0,1]/np.sqrt(cepf1.psd*cepf2.psd))[mask] else: noisy_data = (self.spectrum.real[0,1]/np.sqrt(cepf1.psd*cepf2.psd)) + + self.factor = np.sqrt(cepf1.psd[0]/cepf2.psd[0]) # Define initial points for the MCMC try: @@ -149,6 +153,9 @@ def run_mcmc(self, omega = np.arange(noisy_data.size) omega_fixed = omega[args] + + self.omega = omega + self.omega_fixed = omega_fixed # Set up the backend # Don't forget to clear it in case the file already exists @@ -157,10 +164,16 @@ def run_mcmc(self, backend.reset(n_walkers, n_parameters) # Initialize the sampler - sampler = emcee.EnsembleSampler(n_walkers, n_parameters, - self.log_posterior_offdiag, - args=(omega, omega_fixed, noisy_data, nu, ell), - backend = backend) + if log_like == 'off': + sampler = emcee.EnsembleSampler(n_walkers, n_parameters, + self.log_posterior_offdiag, + args=(omega, omega_fixed, noisy_data, nu, ell), + backend=backend) + elif log_like == 'normal': + sampler = emcee.EnsembleSampler(n_walkers, n_parameters, + self.log_posterior_normal, + args=(omega, omega_fixed, noisy_data, nu, ell), + backend=backend) # Run MCMC @@ -173,24 +186,82 @@ def run_mcmc(self, # Now we'll sample for up to max_n steps if is_restart: - coord = self.sampler.get_chain()[-1] + coord = backend.get_chain()[-1] + good_idx = None + tau = backend.get_autocorr_time(discard=100) + burn_in = int(2 * np.max(tau)) + thin = np.max([1, int(0.5 * np.min(tau))]) + log.write_log('MCMC autocorrelation time = {}'.format(tau)) + if good_idx is None: + samples = backend.get_chain(discard=burn_in, flat=True, thin=thin) + else: + samples = backend.get_chain(discard=burn_in, flat=True, thin=thin)[:, good_idx] + n_parameters_ = samples.shape[1] + self.n_parameters = n_parameters_ + + # Compute marginalized errors + rho = [] + rho_min = [] + rho_max = [] + print(sampler.iteration) + self.acceprance_fraction=np.mean(sampler.acceptance_fraction) + for i in range(self.n_parameters): + mcmc = np.percentile(samples[:, i], [16, 50, 84]) + # mcmc = np.percentile(samples[:, i], [50-95/2, 50, 50+95/2]) + q = np.diff(mcmc) + # print(mcmc[1], q[0], q[1]) + rho.append(mcmc[1]) + rho_min.append(mcmc[1]-q[0]) + rho_max.append(mcmc[1]+q[1]) + # The estimated parameters are the values of rho as a function of frequency + self.parameters_mean = np.array(rho) + self.parameters_args = args + self.parameters_std = 0.5*(np.array(rho_max) - np.array(rho_min)) + self.sampler = sampler + self.noisy_data = noisy_data + if log_like=='normal': + self.aic = 2*self.log_likelihood_normal(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + elif log_like=='off': + self.aic = 2*self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + burn_in = int(2 * np.max(tau)) + thin = np.max([1, int(0.5 * np.min(tau))]) + return else: coord = np.copy(p0) - for sample in sampler.sample(coord, iterations = n_steps, progress = True): + + todo=True + if is_restart: todo=False + disc=0 + for sample in sampler.sample(coord, iterations = n_steps, progress = True, store = True): # Only check convergence every 100 steps - if sampler.iteration % 500: + if sampler.iteration % 250: continue + # Compute the autocorrelation time so far # Using tol=0 means that we'll always get an estimate even # if it isn't trustworthy - tau = sampler.get_autocorr_time(tol=0) + tau = sampler.get_autocorr_time(tol=0, discard=disc) autocorr[index] = np.mean(tau) index += 1 + if todo and sampler.iteration>1000: + s_old = np.ones(n_parameters) + for i in range(100, int(sampler.iteration/2)+1, 100): + + s = sampler.get_autocorr_time(tol=0, discard=i) + + if np.all(abs((s-s_old)/s)*100<5): + disc=i + todo=False + break + s_old = s + # Check convergence - converged = np.all(tau * 100 < sampler.iteration) - converged &= np.all(np.abs(old_tau - tau) / tau < 0.01) + converged = np.all(tau * 300 < sampler.iteration) + converged &= np.all(np.abs(old_tau - tau) / tau < 0.005) if converged: break old_tau = tau @@ -199,9 +270,9 @@ def run_mcmc(self, # If AutocorrError, probably the chain is too short. You can still use ~2*max(tau) as burn_in good_idx = None try: - tau = sampler.get_autocorr_time() - burn_in = int(2 * np.max(tau)) - thin = np.max([1, int(0.5 * np.min(tau))]) + tau = sampler.get_autocorr_time(discard=disc) + burn_in = int(6 * np.max(tau)) + thin = np.max([1, int(1.5 * np.min(tau))]) log.write_log('MCMC autocorrelation time = {}'.format(tau)) except emcee.autocorr.AutocorrError: log.write_log('The chain is probably too short') @@ -212,8 +283,8 @@ def run_mcmc(self, good_idx = ~np.isnan(tau) tau = tau[good_idx] log.write_log('Fixed MCMC autocorrelation time = {}'.format(tau)) - burn_in = int(2 * np.max(tau)) - thin = np.max([1, int(0.5 * np.min(tau))]) + burn_in = int(6 * np.max(tau)) + thin = np.max([1, int(1.5 * np.min(tau))]) # if self.burn_in is not None: # burn_in = self.burn_in # else: @@ -250,7 +321,18 @@ def run_mcmc(self, self.parameters_std = 0.5*(np.array(rho_max) - np.array(rho_min)) self.sampler = sampler self.noisy_data = noisy_data - + if log_like=='normal': + self.aic = 2 * self.log_likelihood_normal(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + elif log_like=='off': + self.aic = 2 * self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + self.dic = -2*self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + + 4 * sampler.get_log_prob(discard=burn_in, flat=True, thin=thin).mean() + with open('aic_{}'.format(self.n_parameters), 'w+') as g: + g.write('{}\t{}\n'.format(self.n_parameters, self.aic)) + with open('dic_{}'.format(self.n_parameters), 'w+') as g: + g.write('{}\t{}\n'.format(self.n_parameters, self.dic)) ################################################ # Helper functions @@ -271,6 +353,283 @@ def run_mcmc(self, # np.log(sp.kv(0.5*(nu-1), np.abs(z)*one_frac_rho2)) # return np.sum(log_pdf) + def run_mcmc_scratch(self, + n_parameters=None, + n_steps=None, + is_restart=None, + mask=None, + filename=None, + n_walkers=None, + log_like='off' + ): + + # Initialize the parameters if undefined + if n_parameters is None: + n_parameters = self.n_parameters + else: + self.n_parameters=n_parameters + if n_steps is None: + n_steps = self.n_steps + if is_restart is None: + is_restart = self.is_restart + if mask is None: + mask = self.mask + if filename is None: + filename = self.backend + + # Initialize the parameters for cepstral analysis + + nu = 2 + ell = self.n_components + # Define noisy data + if mask is not None: + noisy_data = (self.spectrum[0,1])[mask] + else: + noisy_data = (self.spectrum[0,1]) + + # Define initial points for the MCMC + try: + guess_data = runavefilter(noisy_data, 1000) + except: + guess_data = runavefilter(noisy_data, 100) + + args = np.int32(np.linspace(0, len(noisy_data) - 1, n_parameters, endpoint=True)) + + # MCMC sampling + # number of walkers must be larger than twice the number of parameters (and often a power of 2) + if n_walkers is None: + n_walkers = int(2 ** np.ceil(np.log2(2 * n_parameters))) + + log.write_log('MCMC with {} parameters and {} walkers'.format(n_parameters, n_walkers)) + log.write_log(f'Running up to {n_steps} steps') + + p0 = guess_data + if log_like=='normal' or log_like=='off': + p0 = np.clip(p0[args][np.newaxis, :n_parameters] + \ + np.random.normal(0, 0.1, (n_walkers, n_parameters)), -0.98, 0.98) + + if log_like=='diag': + p0 = np.clip(p0[args][np.newaxis, :n_parameters] + \ + np.random.normal(0, 10, (n_walkers, n_parameters)), 0, 1e6) + + omega = np.arange(noisy_data.size) + omega_fixed = omega[args] + + self.omega = omega + self.omega_fixed = omega_fixed + + # Set up the backend + # Don't forget to clear it in case the file already exists + backend = emcee.backends.HDFBackend(filename) + if not is_restart: + backend.reset(n_walkers, n_parameters) + + # Initialize the sampler + if log_like=='off': + sampler = emcee.EnsembleSampler(n_walkers, n_parameters, + self.log_posterior_offdiag, + args=(omega, omega_fixed, noisy_data, nu, ell), + backend=backend) + elif log_like=='normal': + sampler = emcee.EnsembleSampler(n_walkers, n_parameters, + self.log_posterior_normal, + args=(omega, omega_fixed, noisy_data, nu, ell), + backend=backend) + + elif log_like=='diag': + sampler = emcee.EnsembleSampler(n_walkers, n_parameters, + self.log_posterior_diag, + args=(omega, omega_fixed, noisy_data, ell), + backend=backend) + + + # Run MCMC + # We'll track how the average autocorrelation time estimate changes + index = 0 + autocorr = np.empty(n_steps) + + # This will be useful to testing convergence + old_tau = np.inf + + # Now we'll sample for up to max_n steps + if is_restart: + coord = backend.get_chain()[-1] + good_idx = None + tau = backend.get_autocorr_time(discard=1000) + burn_in = int(6 * np.max(tau)) + thin = np.max([1, int(1.5 * np.min(tau))]) + log.write_log('MCMC autocorrelation time = {}'.format(tau)) + if good_idx is None: + samples = backend.get_chain(discard=burn_in, flat=True, thin=thin) + else: + samples = backend.get_chain(discard=burn_in, flat=True, thin=thin)[:, good_idx] + n_parameters_ = samples.shape[1] + self.n_parameters = n_parameters_ + + # Compute marginalized errors + rho = [] + rho_min = [] + rho_max = [] + for i in range(self.n_parameters): + mcmc = np.percentile(samples[:, i], [16, 50, 84]) + # mcmc = np.percentile(samples[:, i], [50-95/2, 50, 50+95/2]) + q = np.diff(mcmc) + # print(mcmc[1], q[0], q[1]) + rho.append(mcmc[1]) + rho_min.append(mcmc[1]-q[0]) + rho_max.append(mcmc[1]+q[1]) + # The estimated parameters are the values of rho as a function of frequency + self.parameters_mean = np.array(rho) + self.parameters_args = args + self.parameters_std = 0.5*(np.array(rho_max) - np.array(rho_min)) + self.sampler = sampler + self.noisy_data = noisy_data + if log_like=='normal': + self.aic = 2 * self.log_likelihood_normal(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + elif log_like=='off': + self.aic = 2 * self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + burn_in = int(2 * np.max(tau)) + thin = np.max([1, int(0.5 * np.min(tau))]) + elif log_like=='diag': + self.aic = 2 * self.log_likelihood_diag(self.parameters_mean, omega, omega_fixed, noisy_data, ell) \ + - 2 * self.n_parameters + burn_in = int(2 * np.max(tau)) + thin = np.max([1, int(0.5 * np.min(tau))]) + return + else: + coord = np.copy(p0) + + todo = True + disc=0 + for sample in sampler.sample(coord, iterations=n_steps, progress=True, store = True): + # Only check convergence every 100 steps + if sampler.iteration % 250: + continue + + # Compute the autocorrelation time so far + if sampler.iteration%5000==0: + print(tau) + + # Using tol=0 means that we'll always get an estimate even + # if it isn't trustworthy + tau = sampler.get_autocorr_time(tol=0, discard=disc) + autocorr[index] = np.mean(tau) + index += 1 + + # Check convergence + converged = np.all(tau * 300 < sampler.iteration) + converged &= np.all(np.abs(old_tau - tau) / tau < 0.005) + + if todo and sampler.iteration%500==0 and sampler.iteration>1000: + s_old = np.ones(n_parameters) + for i in range(100, int(sampler.iteration/2)+1, 100): + + s = sampler.get_autocorr_time(tol=0, discard=i) + + if np.all(abs((s-s_old)/s)*100<2): + disc=i + todo=False + break + s_old = s + if converged: + break + old_tau = tau + + # Compute chains auto-correlation time to estimate convergence + # If AutocorrError, probably the chain is too short. You can still use ~2*max(tau) as burn_in + good_idx = None + try: + print(disc, ' discard') + tau = sampler.get_autocorr_time(discard=disc) + burn_in = max(int(6 * np.max(tau)), disc) + thin = np.max([1, int(1.5 * np.min(tau))]) + log.write_log('MCMC autocorrelation time = {}'.format(tau)) + except emcee.autocorr.AutocorrError: + log.write_log('The chain is probably too short') + burn_in = max(int(sampler.iteration * 0.3), disc) + thin = int(np.max([int(1.5 * sampler.iteration), 10])) + except ValueError: + log.write_log(f'There is something wrong with tau: tau = {tau}') + good_idx = ~np.isnan(tau) + tau = tau[good_idx] + log.write_log('Fixed MCMC autocorrelation time = {}'.format(tau)) + burn_in = max(int(2 * np.max(tau)), disc) + thin = np.max([1, int(1.5 * np.min(tau))]) + # if self.burn_in is not None: + # burn_in = self.burn_in + # else: + # self.burn_in = burn_in + # if self.thin is not None: + # thin = self.thin + # else: + # self.thin = thin + log.write_log('MCMC burn in = {}; thin = {}'.format(burn_in, thin)) + + if good_idx is None: + samples = sampler.get_chain(discard=burn_in, flat=True, thin=thin) + else: + samples = sampler.get_chain(discard=burn_in, flat=True, thin=thin)[:, good_idx] + n_parameters_ = samples.shape[1] + self.n_parameters = n_parameters_ + + # Compute marginalized errors + rho = [] + rho_min = [] + rho_max = [] + for i in range(self.n_parameters): + mcmc = np.percentile(samples[:, i], [16, 50, 84]) + # mcmc = np.percentile(samples[:, i], [50-95/2, 50, 50+95/2]) + q = np.diff(mcmc) + # print(mcmc[1], q[0], q[1]) + rho.append(mcmc[1]) + rho_min.append(mcmc[1] - q[0]) + rho_max.append(mcmc[1] + q[1]) + + # The estimated parameters are the values of rho as a function of frequency + self.parameters_mean = np.array(rho) + self.parameters_args = args + self.parameters_std = 0.5 * (np.array(rho_max) - np.array(rho_min)) + self.sampler = sampler + self.noisy_data = noisy_data + if log_like=='normal': + self.aic = 2 * self.log_likelihood_normal(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + elif log_like=='off': + self.aic = 2 * self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + - 2 * self.n_parameters + self.dic = -2*self.log_likelihood_offdiag(self.parameters_mean, omega, omega_fixed, noisy_data, nu, ell) \ + + 4 * sampler.get_log_prob(discard=burn_in, flat=True, thin=thin).mean() + elif log_like=='diag': + self.aic = 2 * self.log_likelihood_diag(self.parameters_mean, omega, omega_fixed, noisy_data, ell) \ + - 2 * self.n_parameters + self.dic = -2*self.log_likelihood_diag(self.parameters_mean, omega, omega_fixed, noisy_data, ell) \ + + 4 * sampler.get_log_prob(discard=burn_in, flat=True, thin=thin).mean() + with open('aic_{}'.format(self.n_parameters), 'w+') as g: + g.write('{}\t{}\n'.format(self.n_parameters, self.aic)) + with open('dic_{}'.format(self.n_parameters), 'w+') as g: + g.write('{}\t{}\n'.format(self.n_parameters, self.dic)) + ################################################ + # Helper functions + + # The log-likelihood function + # def log_likelihood_offdiag(self, w, omega, omega_fixed, data_, nu, ell): + # spline = self.model(omega_fixed, w) + # rho = np.clip(spline(omega), -0.99, 0.99) + + # one_frac_rho2 = 1/(1-rho**2) + + # # Data is distributed according to a Variance-Gamma distribution with parameters: + # # mu = 0; alpha = 1/(1-rho**2); beta = rho/(1-rho**2); lambda = ell*nu/2 + # # Its expectation value is ell*nu*rho + # data = ell*data_ + # z = data - ell*nu*rho + + # log_pdf = -np.log(sp.gamma(0.5*nu)) + 0.5*(nu-1)*np.log(np.abs(z)) - 0.5*np.log(2**(nu-1)*np.pi/one_frac_rho2) + rho*z*one_frac_rho2 +\ + # np.log(sp.kv(0.5*(nu-1), np.abs(z)*one_frac_rho2)) + # return np.sum(log_pdf) + def log_likelihood_offdiag(self, w, omega, omega_fixed, data_, nu, ell): spline = self.model(omega_fixed, w) rho = np.clip(spline(omega), -0.98, 0.98) @@ -286,9 +645,31 @@ def log_likelihood_offdiag(self, w, omega, omega_fixed, data_, nu, ell): z = data_*ell*nu absz = np.abs(z) # z = data - log_pdf = _lambda*np.log(_gamma2) + _lambda_minus_half*np.log(absz) + np.log(sp.kv(_lambda_minus_half, _alpha*absz)) + \ _beta*z - 0.5*np.log(np.pi) - np.log(sp.gamma(_lambda)) - _lambda_minus_half*np.log(2*_alpha) + + res = np.sum(log_pdf) + return res + + def log_likelihood_diag(self, w, omega, omega_fixed, data_, ell): + spline = self.model(omega_fixed, w) + rho = np.clip(spline(omega), 1e-6, 1e6) + + # Data is distributed according to a Chi-squared distribution with parameters (notation as in Wikipedia): + # Its expectation value is ell*rho + z = data_*ell/rho + absz = np.abs(z) + # z = data + log_pdf = (ell / 2 - 1)*np.log(absz) - absz/2 - np.log(rho) + + res = np.sum(log_pdf) + return res + + def log_likelihood_normal(self, w, omega, omega_fixed, data_, nu, ell): + spline = self.model(omega_fixed, w) + rho = np.clip(spline(omega), -0.98, 0.98) + + log_pdf = -(data_ - rho)**2 return np.sum(log_pdf) @@ -296,14 +677,30 @@ def log_likelihood_offdiag(self, w, omega, omega_fixed, data_, nu, ell): def log_prior_offdiag(self, w): # Uniform prior if np.all((w>=-1)&(w<=1)): - return 0.5 + return 1 else: - return 0 + return -np.inf + + # The log-prior function + def log_prior_diag(self, w): + # Uniform prior + if np.all((w>=1e-6)&(w<=1e6)): + return 1 + else: + return -np.inf # The log-posterior function def log_posterior_offdiag(self, w, omega, omega_fixed, data, nu = 6, ell = 3): return self.log_prior_offdiag(w) + self.log_likelihood_offdiag(w, omega, omega_fixed, data, nu, ell) + # The log-posterior function + def log_posterior_diag(self, w, omega, omega_fixed, data, ell = 3): + return self.log_prior_diag(w) + self.log_likelihood_diag(w, omega, omega_fixed, data, ell) + + # The log-posterior function + def log_posterior_normal(self, w, omega, omega_fixed, data, nu=6, ell=3): + return self.log_prior_offdiag(w) + self.log_likelihood_normal(w, omega, omega_fixed, data, nu, ell) + def initialize_cepstral_distribution(self, ck_theory_var=None, psd_theory_mean=None): """ @@ -436,4 +833,4 @@ def scan_filter_psd(self, cutoffK_LIST, correct_mean=True): if correct_mean: self.logpsd_K_LIST[:, k] = self.logpsd_K_LIST[:, k] + self.logpsd_THEORY_mean - self.logtau_K_LIST[k] = self.logtau_K_LIST[k] + self.logpsd_THEORY_mean[0] \ No newline at end of file + self.logtau_K_LIST[k] = self.logtau_K_LIST[k] + self.logpsd_THEORY_mean[0]