diff --git a/sportran/md/maxlike.py b/sportran/md/maxlike.py index 2dcbbd3..9ea56b4 100644 --- a/sportran/md/maxlike.py +++ b/sportran/md/maxlike.py @@ -11,9 +11,12 @@ from .tools.filter import runavefilter -EULER_GAMMA = 0.57721566490153286060651209008240243104215933593992 # Euler-Mascheroni constant +EULER_GAMMA = ( + 0.57721566490153286060651209008240243104215933593992 # Euler-Mascheroni constant +) LOG2 = np.log(2) + class MaxLikeFilter: """ Maximum-likelihood estimate of the Onsager or transport coefficient. @@ -30,20 +33,22 @@ class MaxLikeFilter: TODO """ - def __init__(self, - data=None, - model=None, - n_parameters=None, - n_components=None, - n_currents=None, - likelihood=None, - solver=None, - omega_fixed=None): + def __init__( + self, + data=None, + model=None, + n_parameters=None, + n_components=None, + n_currents=None, + likelihood=None, + solver=None, + omega_fixed=None, + ): """ Initialize the MaxLikeFilter class with the provided parameters. """ log.write_log("MaxLikeFilter") - + self.data = data self.model = model self.n_parameters = n_parameters @@ -73,37 +78,41 @@ def set_likelihood(self, likelihood): Set the likelihood function based on the provided likelihood type. """ likelihood = likelihood.lower() - if likelihood == 'wishart': + if likelihood == "wishart": self.log_like = self.log_likelihood_wishart - elif likelihood == 'chisquare' or likelihood == 'chisquared': + elif likelihood == "chisquare" or likelihood == "chisquared": self.log_like = self.log_likelihood_diag - elif likelihood == 'variancegamma' or likelihood == 'variance-gamma': + elif likelihood == "variancegamma" or likelihood == "variance-gamma": self.log_like = self.log_likelihood_offdiag else: - raise NotImplementedError("Currently implemented likelihoods are: `wishart`, `chisquare`, `variance-gamma`") + raise NotImplementedError( + "Currently implemented likelihoods are: `wishart`, `chisquare`, `variance-gamma`" + ) def __repr__(self): - msg = 'MaxLikeFilter:\n' + msg = "MaxLikeFilter:\n" return msg - def maxlike(self, - data=None, - model=None, - n_parameters=None, - likelihood=None, - solver=None, - mask=None, - n_components=None, - n_currents=None, - guess_runave_window=50, - omega_fixed=None, - write_log=True, - minimize_kwargs=None): + def maxlike( + self, + data=None, + model=None, + n_parameters=None, + likelihood=None, + solver=None, + mask=None, + n_components=None, + n_currents=None, + guess_runave_window=50, + omega_fixed=None, + write_log=True, + minimize_kwargs=None, + ): """ Perform the maximum-likelihood estimation. """ # Update instance variables if provided - + if data is not None: self.data = data self.omega = np.arange(data.shape[-1]) @@ -118,33 +127,48 @@ def maxlike(self, if n_components is not None: self.n_components = n_components if n_currents is not None: - self.n_currents= n_currents + self.n_currents = n_currents if omega_fixed is not None: self.omega_fixed = omega_fixed # Validate necessary variables - assert self.n_parameters is not None, "Number of parameters (n_parameters) must be provided" + assert ( + self.n_parameters is not None + ), "Number of parameters (n_parameters) must be provided" assert self.solver is not None, "Solver must be provided" assert self.data is not None, "`data` must be provided" assert self.log_like is not None, "Likelihood must be set" if write_log: - log.write_log('Maximum-likelihood estimate with {} parameters'.format(self.n_parameters)) + log.write_log( + "Maximum-likelihood estimate with {} parameters".format( + self.n_parameters + ) + ) ell = self.n_components if mask is not None: + self.mask = mask self.data = self.data[mask] nu = self.n_currents - + # Check data shape - assert len(self.data.shape) == 1 or len(self.data.shape) == 3, "`data` should be a 1d array (diagonal or off-diagonal estimates) or a 3d array (Wishart matrix estimate)" + assert ( + len(self.data.shape) == 1 or len(self.data.shape) == 3 + ), "`data` should be a 1d array (diagonal or off-diagonal estimates) or a 3d array (Wishart matrix estimate)" if len(self.data.shape) == 3: - assert self.likelihood == 'wishart', f"Misshaped `data` for {self.likelihood} likelihood." - assert self.data.shape[0] == self.data.shape[1], f"data.shape={self.data.shape}. `data` for a Wishart estimate must be a (n,n,N) array." + assert ( + self.likelihood == "wishart" + ), f"Misshaped `data` for {self.likelihood} likelihood." + assert ( + self.data.shape[0] == self.data.shape[1] + ), f"data.shape={self.data.shape}. `data` for a Wishart estimate must be a (n,n,N) array." else: - assert self.likelihood != 'wishart', f"Misshaped `data` for {self.likelihood} likelihood." + assert ( + self.likelihood != "wishart" + ), f"Misshaped `data` for {self.likelihood} likelihood." # Frequency omega = np.arange(self.data.shape[-1]) @@ -156,8 +180,14 @@ def maxlike(self, assert self.omega_fixed.shape[0] == n_parameters except: if write_log: - log.write_log("Spline nodes are equispaced from 0 to the Nyquist frequency.") - args = np.int32(np.linspace(0, self.data.shape[-1] - 1, self.n_parameters, endpoint=True)) + log.write_log( + "Spline nodes are equispaced from 0 to the Nyquist frequency." + ) + args = np.int32( + np.linspace( + 0, self.data.shape[-1] - 1, self.n_parameters, endpoint=True + ) + ) self.omega_fixed = omega[args] else: self.omega_fixed = omega_fixed @@ -167,56 +197,92 @@ def maxlike(self, assert self.model is not None, "Model must be provided" # Initial guess for optimization - guess_data = self.guess_data(self.data, omega, self.omega_fixed, ell, nu, window=guess_runave_window, loglike=self.likelihood) - self.data = np.moveaxis(self.data, self.data.shape.index(max(self.data.shape)), 0) + guess_data = self.guess_data( + self.data, + omega, + self.omega_fixed, + ell, + nu, + window=guess_runave_window, + loglike=self.likelihood, + ) + self.data = np.moveaxis( + self.data, self.data.shape.index(max(self.data.shape)), 0 + ) # Minimize the negative log-likelihood self._guess_data = guess_data - guess_par = guess_data #normalize_parameters(guess_data, guess_data) + guess_par = guess_data # normalize_parameters(guess_data, guess_data) # print(guess_data - denormalize_parameters(guess_par, guess_data)) - res = minimize(fun = self.log_like, - x0 = guess_par, - args = (self.model, omega, self.omega_fixed, self.data, nu, ell), - method = self.solver, - **minimize_kwargs) - + res = minimize( + fun=self.log_like, + x0=guess_par, + args=(self.model, omega, self.omega_fixed, self.data, nu, ell), + method=self.solver, + **minimize_kwargs, + ) + # Covariance of the parameters try: cov = res.hess_inv if write_log: - log.write_log(f'The {self.solver} solver features the calculation of the Hessian. The covariance matrix will be estimated through the Laplace approximation.') + log.write_log( + f"The {self.solver} solver features the calculation of the Hessian. The covariance matrix will be estimated through the Laplace approximation." + ) except: if write_log: - log.write_log(f'The {self.solver} solver does not feature the calculation of the Hessian. No covariance matrix will be output.') + log.write_log( + f"The {self.solver} solver does not feature the calculation of the Hessian. No covariance matrix will be output." + ) cov = None - + self.parameters_mean = res.x # self.parameters_mean = denormalize_parameters(res.x, self._guess_data) if cov is not None: self.parameters_std = np.sqrt(cov.diagonal()) self.optimizer_res = res - self.log_likelihood_value = -self.log_like(res.x, self.model, omega, self.omega_fixed, self.data, nu, ell) + self.log_likelihood_value = -self.log_like( + res.x, self.model, omega, self.omega_fixed, self.data, nu, ell + ) ################################################ # Helper functions - def guess_data(self, data, omega, omega_fixed, ell, nu, window=10, loglike='wishart'): + def guess_data( + self, data, omega, omega_fixed, ell, nu, window=10, loglike="wishart" + ): """ Moving average of the input data as initial guess for the parameter estimation. """ shape = data.shape try: - guess_data = np.array([runavefilter(c, window) for c in data.reshape(-1, shape[-1])]).reshape(shape) + guess_data = np.array( + [runavefilter(c, window) for c in data.reshape(-1, shape[-1])] + ).reshape(shape) except: - log.write_log(f'Guessing data with a running average with a {window} large window failed. Window changed to 10.') + log.write_log( + f"Guessing data with a running average with a {window} large window failed. Window changed to 10." + ) window = 10 - guess_data = np.array([runavefilter(c, window) for c in data.reshape(-1, shape[-1])]).reshape(shape) - - guess_data = np.array([guess_data[..., j] for j in [np.argmin(np.abs(omega - omega_fixed[i])) for i in range(len(omega_fixed))]]) - print(guess_data.shape) - if loglike == 'wishart': - guess_data = np.array([cholesky(g, lower=False) for g in guess_data]) #/ np.sqrt(ell) + guess_data = np.array( + [runavefilter(c, window) for c in data.reshape(-1, shape[-1])] + ).reshape(shape) + + guess_data = np.array( + [ + guess_data[..., j] + for j in [ + np.argmin(np.abs(omega - omega_fixed[i])) + for i in range(len(omega_fixed)) + ] + ] + ) + # print(guess_data.shape) + if loglike == "wishart": + guess_data = np.array( + [cholesky(g, lower=False) for g in guess_data] + ) # / np.sqrt(ell) upper_triangle_indices = np.triu_indices(nu) nw = omega_fixed.shape[0] @@ -234,15 +300,17 @@ def guess_data(self, data, omega, omega_fixed, ell, nu, window=10, loglike='wish ie += 1 guess_data = guess_params.flatten() else: - guess_params = np.zeros((nw, nu*(nu+1)//2)) + guess_params = np.zeros((nw, nu * (nu + 1) // 2)) for i, j in zip(*upper_triangle_indices): guess_params[:, ie] = guess_data[:, i, j] ie += 1 guess_data = guess_params.flatten() - + return guess_data - def log_likelihood_wishart(self, w, model, omega, omega_fixed, data_, nu, ell, eps = 1e-3): + def log_likelihood_wishart( + self, w, model, omega, omega_fixed, data_, nu, ell, eps=1e-3 + ): """ Logarithm of the Wishart probability density function. """ @@ -252,7 +320,7 @@ def log_likelihood_wishart(self, w, model, omega, omega_fixed, data_, nu, ell, e # Compute scale matrix from the model (symmetrize to ensure positive definiteness) V = scale_matrix(model, w, omega, omega_fixed, p) X = data_ - + if n < p: S = np.linalg.svd(X, hermitian=True, compute_uv=False) detX = np.array([np.prod(s[abs(s) > eps]) for s in S]) @@ -263,16 +331,20 @@ def log_likelihood_wishart(self, w, model, omega, omega_fixed, data_, nu, ell, e invV = np.linalg.inv(V) detV = np.linalg.det(V) - trinvV_X = opt_einsum.contract('wab,wba->w', invV, X) - + trinvV_X = opt_einsum.contract("wab,wba->w", invV, X) + coeff_detV = -n - coeff_detX = n-p-1 - - log_pdf = coeff_detV * np.log(detV + eps) + coeff_detX * np.log(detX + eps) - trinvV_X + coeff_detX = n - p - 1 + + log_pdf = ( + coeff_detV * np.log(detV + eps) + coeff_detX * np.log(detX + eps) - trinvV_X + ) tot = -np.sum(log_pdf) return tot - def log_likelihood_complex_wishart(self, w, model, omega, omega_fixed, data_, nu, ell, eps = 1e-9): + def log_likelihood_complex_wishart( + self, w, model, omega, omega_fixed, data_, nu, ell, eps=1e-9 + ): """ Logarithm of the Complex Wishart probability density function. """ @@ -293,10 +365,10 @@ def log_likelihood_complex_wishart(self, w, model, omega, omega_fixed, data_, nu X = data_ if n < p: - raise ValueError('n must be greater or equal than p') + raise ValueError("n must be greater or equal than p") # Singular Wishart multig = multigammaln(0.5 * n, n) - S = np.linalg.svd(X, hermitian = True, compute_uv = False) + S = np.linalg.svd(X, hermitian=True, compute_uv=False) detX = np.array([np.prod(s[abs(s) > eps]) for s in S]) else: # multig = multigammaln(0.5 * n, p) @@ -305,9 +377,9 @@ def log_likelihood_complex_wishart(self, w, model, omega, omega_fixed, data_, nu invS = np.linalg.inv(S) logdetS = np.log(np.abs(np.linalg.det(S).real)) - trinvS_X = opt_einsum.contract('wab,wba->w', invS, X).real - - log_pdf = (n-p)*logdetX - trinvS_X - n*logdetS + trinvS_X = opt_einsum.contract("wab,wba->w", invS, X).real + + log_pdf = (n - p) * logdetX - trinvS_X - n * logdetS # coeff_detV = -n # coeff_detX = n-p-1 # log_pdf = coeff_detV * np.log(detV) + coeff_detX * np.log(detX) - trinvV_X @@ -318,7 +390,7 @@ def log_likelihood_offdiag(self, w, model, omega, omega_fixed, data_, nu, ell): """ Negative of the logarithm of the Variance-Gamma probability density function. """ - print(w) + # print(w) spline = model(omega_fixed, w) rho = np.clip(spline(omega), -0.98, 0.98) _alpha = 1 / (1 - rho**2) @@ -326,18 +398,18 @@ def log_likelihood_offdiag(self, w, model, omega, omega_fixed, data_, nu, ell): _lambda = 0.5 * ell * nu _gamma2 = _alpha**2 - _beta**2 _lambda_minus_half = _lambda - 0.5 - - # Non sono più sicuro sia sensata questa definizione di z. + + # Non sono più sicuro sia sensata questa definizione di z. # Non è semplicemtente data_? AH! Forse è la stessa cosa che succede al Chi2, va moltiplicato per il numero di dof. Capire meglio e fare prove. z = data_ * nu * ell absz = np.abs(z) term1 = _lambda * np.log(_gamma2) term2 = _lambda_minus_half * np.log(absz) term3 = np.log(sp.kv(_lambda_minus_half, _alpha * absz)) - term4 = _beta * z - term5 = - _lambda_minus_half * np.log(2 * _alpha) + term4 = _beta * z + term5 = -_lambda_minus_half * np.log(2 * _alpha) # log_pdf = _lambda * np.log(_gamma2) + _lambda_minus_half * np.log(absz) + np.log(sp.kv(_lambda_minus_half, _alpha * absz)) + \ - # _beta * z - _lambda_minus_half * np.log(2 * _alpha) # + const + # _beta * z - _lambda_minus_half * np.log(2 * _alpha) # + const log_pdf = term1 + term2 + term3 + term4 + term5 return -np.sum(log_pdf) @@ -346,13 +418,13 @@ def log_likelihood_diag(self, w, model, omega, omega_fixed, data_, nu, ell): Negative of the logarithm of the Chi-squared probability density function. """ spline = model(omega_fixed, w) - spectrum = 2*spline(omega) + spectrum = 2 * spline(omega) # spectrum = np.clip(spline(omega), 1e-60, 1e60)/2 - dof = 2*(ell-nu+1) - z = np.abs(dof * data_ / spectrum) # This is chi-squared distributed - + dof = 2 * (ell - nu + 1) + z = np.abs(dof * data_ / spectrum) # This is chi-squared distributed + logz = np.log(z) - log_pdf = (2-dof)*logz + z + log_pdf = (2 - dof) * logz + z return np.sum(log_pdf) @@ -363,7 +435,7 @@ def log_likelihood_normal(self, w, omega, omega_fixed, data_, nu, ell): spline = self.model(omega_fixed, w) rho = np.clip(spline(omega), -0.98, 0.98) - log_pdf = -(data_ - rho)**2 + log_pdf = -((data_ - rho) ** 2) return np.sum(log_pdf) def log_prior_offdiag(self, w): @@ -372,7 +444,7 @@ def log_prior_offdiag(self, w): """ if np.all((w >= -1) & (w <= 1)): return 1 - else: + else: return -np.inf def log_prior_diag(self, w): @@ -388,53 +460,61 @@ def log_posterior_offdiag(self, w, model, omega, omega_fixed, data, nu=6, ell=3) """ Log-posterior for off-diagonal elements. """ - return self.log_prior_offdiag(w) + self.log_likelihood_offdiag(w, omega, model, omega_fixed, data, nu, ell) + return self.log_prior_offdiag(w) + self.log_likelihood_offdiag( + w, omega, model, omega_fixed, data, nu, ell + ) def log_posterior_diag(self, w, model, omega, omega_fixed, data, nu=6, ell=3): """ Log-posterior for diagonal elements. """ - return self.log_prior_diag(w) + self.log_likelihood_diag(w, model, omega, omega_fixed, data, nu, ell) + return self.log_prior_diag(w) + self.log_likelihood_diag( + w, model, omega, omega_fixed, data, nu, ell + ) def log_posterior_normal(self, w, omega, omega_fixed, data, nu=6, ell=3): """ Log-posterior for normal distribution. """ - return self.log_prior_offdiag(w) + self.log_likelihood_normal(w, omega, omega_fixed, data, nu, ell) + return self.log_prior_offdiag(w) + self.log_likelihood_normal( + w, omega, omega_fixed, data, nu, ell + ) def scale_matrix(model, w, omega, omega_fixed, n): - ''' - ''' + """ """ elements = model(omega_fixed, w)(omega) ie = 0 if elements.dtype == np.complex128: - L = np.zeros((n, n, omega.shape[0]), dtype = np.complex128) + L = np.zeros((n, n, omega.shape[0]), dtype=np.complex128) for i, j in zip(*np.triu_indices(n)): if i == j: L[i, j] = elements[:, ie] ie += 1 else: - L[i, j] = elements[:, ie] + 1j*elements[:, ie+1] + L[i, j] = elements[:, ie] + 1j * elements[:, ie + 1] ie += 2 - S = np.einsum('jiw,jkw->wik', L.conj(), L) + S = np.einsum("jiw,jkw->wik", L.conj(), L) else: L = np.zeros((n, n, omega.shape[0])) for i, j in zip(*np.triu_indices(n)): L[i, j] = elements[:, ie] ie += 1 - - S = np.einsum('jiw,jkw->wik', L, L) + + S = np.einsum("jiw,jkw->wik", L, L) return S + def normalize_parameters(p, guess): - mini, maxi = 1.2*np.abs(guess), -1.2*np.abs(guess) - return (p-mini) / (maxi-mini) + mini, maxi = 1.2 * np.abs(guess), -1.2 * np.abs(guess) + return (p - mini) / (maxi - mini) + def denormalize_parameters(p, guess): - mini, maxi = 1.2*np.abs(guess), -1.2*np.abs(guess) - return p * (maxi-mini) + mini + mini, maxi = 1.2 * np.abs(guess), -1.2 * np.abs(guess) + return p * (maxi - mini) + mini + # # Methods to perform a bayesian estimation of the transport coefficients @@ -481,7 +561,7 @@ def denormalize_parameters(p, guess): # def __init__(self): # # TODO: it's likely better to define some quantities here rather than in self.maxlike # log.write_log("MaxLikeFilter") - + # def __repr__(self): # msg = 'MaxLikeFilter:\n' #+ \ # # ' AIC type = {:}\n'.format(self.aic_type) + \ @@ -511,8 +591,8 @@ def denormalize_parameters(p, guess): # omega_fixed = None, # write_log = True # ): - -# # Define internal variables for consistent notation + +# # Define internal variables for consistent notation # assert n_parameters is not None # self.n_parameters = n_parameters @@ -534,13 +614,13 @@ def denormalize_parameters(p, guess): # log_like = self.log_likelihood_offdiag # else: # raise NotImplementedError("Currently implemented likelihoods are: `wishart`, `chisquare`, `variance-gamma`") - + # assert data is not None, "`data` must be provided" # if mask is not None: # self.data = data[mask] # else: # self.data = data - + # # Check data is consistently shaped # assert len(data.shape) == 1 or len(data.shape) == 3, "`data` should be a 1d array (diagonal or off-diagonal estimates) or a 3d array (Wishart matrix estimate)" # if len(data.shape) == 3: @@ -571,10 +651,10 @@ def denormalize_parameters(p, guess): # # Minimize the negative log-likelihood # res = minimize(fun = log_like, -# x0 = guess_data, +# x0 = guess_data, # args = (model, omega, omega_fixed, data, nu, ell), # method = solver) - + # # Evaluate the covariance of the parameters, if the selected solver allows it # try: # cov = res.hess_inv @@ -584,7 +664,7 @@ def denormalize_parameters(p, guess): # if write_log: # log.write_log(f'The {solver} solver does not feature the calculation of the Hessian. No covariance matrix will be output.') # cov = None - + # self.parameters_mean = res.x # if cov is not None: # self.parameters_std = np.sqrt(cov.diagonal()) @@ -602,26 +682,26 @@ def denormalize_parameters(p, guess): # ''' # shape = data.shape - + # try: # guess_data = np.array([runavefilter(c, window) for c in data.reshape(-1, shape[-1])]).reshape(shape) # except: # log.write_log(f'Guessing data with a running average with a {window} large window failed. Window changed to 10.') # window = 10 # guess_data = np.array([runavefilter(c, window) for c in data.reshape(-1, shape[-1])]).reshape(shape) - + # guess_data = np.array([guess_data[...,j] for j in [np.argmin(np.abs(omega - omega_fixed[i])) for i in range(len(omega_fixed))]]) # if loglike == 'wishart': # guess_data = np.array([cholesky(g, lower = False) for g in guess_data]) / np.sqrt(ell) # guess_data = np.array([guess_data[:, 0, 0], guess_data[:, 0, 1], guess_data[:, 1, 1]]).reshape(-1) - + # return guess_data - + # def log_likelihood_wishart(self, w, model, omega, omega_fixed, data_, nu, ell): # ''' # Logarithm of the Wishart probability density function. -# ''' +# ''' # n = ell # p = nu # multig = multigammaln(0.5*n, p) @@ -632,12 +712,12 @@ def denormalize_parameters(p, guess): # V = opt_einsum.contract('wba,wbc->wac', V, V) / n # equiv to V.T@V for each frequency # # The argument of the PDF is the data -# X = data_ - +# X = data_ + # # Determinant of X # a, b, d = X[...,0,0], X[...,0,1], X[...,1,1] # detX = a*d - b**2 - + # # Determinant and inverse of V # a, b, d = V[...,0,0], V[...,0,1], V[...,1,1] # invV = (1/(a*d - b**2)*np.array([[d, -b],[-b, a]])).transpose(2,0,1) @@ -651,7 +731,7 @@ def denormalize_parameters(p, guess): # # Sum pieces of the log-likelihood # log_pdf = - (0.5*(-n*p*LOG2 - n*np.log(detV) + (n-p-1)*np.log(detX) - trinvV_X) - multig) - + # return np.sum(log_pdf) # def log_likelihood_offdiag(self, w, omega, omega_fixed, data_, nu, ell): @@ -665,13 +745,13 @@ def denormalize_parameters(p, guess): # _lambda = 0.5*ell*nu # _gamma2 = _alpha**2 - _beta**2 # _lambda_minus_half = _lambda-0.5 - + # # Data is distributed according to a Variance-Gamma distribution with parameters (notation as in Wikipedia): # # mu = 0; alpha = 1/(1-rho**2); beta = rho/(1-rho**2); lambda = ell*nu/2 # # Its expectation value is ell*nu*rho # z = data_*ell*nu # absz = np.abs(z) -# # z = data +# # z = data # log_pdf = _lambda*np.log(_gamma2) + _lambda_minus_half*np.log(absz) + np.log(sp.kv(_lambda_minus_half, _alpha*absz)) + \ # _beta*z - 0.5*np.log(np.pi) - np.log(sp.gamma(_lambda)) - _lambda_minus_half*np.log(2*_alpha) @@ -686,7 +766,7 @@ def denormalize_parameters(p, guess): # # Its expectation value is ell*rho # z = data_*ell/rho # absz = np.abs(z) -# # z = data +# # z = data # log_pdf = (ell / 2 - 1)*np.log(absz) - absz/2 - np.log(rho) # res = np.sum(log_pdf) @@ -704,7 +784,7 @@ def denormalize_parameters(p, guess): # # Uniform prior # if np.all((w>=-1)&(w<=1)): # return 1 -# else: +# else: # return -np.inf # # The log-prior function @@ -725,4 +805,4 @@ def denormalize_parameters(p, guess): # # The log-posterior function # def log_posterior_normal(self, w, omega, omega_fixed, data, nu=6, ell=3): -# return self.log_prior_offdiag(w) + self.log_likelihood_normal(w, omega, omega_fixed, data, nu, ell) \ No newline at end of file +# return self.log_prior_offdiag(w) + self.log_likelihood_normal(w, omega, omega_fixed, data, nu, ell)