diff --git a/examples/plot_variable_importance_classif.py b/examples/plot_variable_importance_classif.py index bf5091c8..e7bcb5a1 100644 --- a/examples/plot_variable_importance_classif.py +++ b/examples/plot_variable_importance_classif.py @@ -29,7 +29,7 @@ from scipy.stats import ttest_1samp from sklearn.base import clone from sklearn.linear_model import RidgeCV -from sklearn.metrics import log_loss +from sklearn.metrics import hinge_loss from sklearn.model_selection import RandomizedSearchCV, StratifiedKFold from sklearn.svm import SVC @@ -163,7 +163,7 @@ imputation_model=clone(imputation_model), n_permutations=50, n_jobs=5, - loss=log_loss, + loss=hinge_loss, random_state=seed, method="decision_function", ) @@ -177,7 +177,7 @@ imputation_model=clone(imputation_model), n_permutations=50, n_jobs=5, - loss=log_loss, + loss=hinge_loss, random_state=seed, method="decision_function", ) diff --git a/src/hidimstat/cpi.py b/src/hidimstat/cpi.py index a0a0224f..d6278f45 100644 --- a/src/hidimstat/cpi.py +++ b/src/hidimstat/cpi.py @@ -236,7 +236,9 @@ def score(self, X, y): y_pred = getattr(self.estimator, self.method)(X) - loss_reference = self.loss(y_true=y, y_pred=y_pred) + # In sklearn API y_true is the first argument. Not specifying `y_true=...` + # will allows using other losses such as `hinge_loss`. + loss_reference = self.loss(y, y_pred) out_dict["loss_reference"] = loss_reference y_pred_perm = self.predict(X, y) @@ -245,7 +247,7 @@ def score(self, X, y): for j, y_pred_j in enumerate(y_pred_perm): list_loss_perm = [] for y_pred_perm in y_pred_j: - list_loss_perm.append(self.loss(y_true=y, y_pred=y_pred_perm)) + list_loss_perm.append(self.loss(y, y_pred_perm)) out_dict["loss_perm"][j] = np.array(list_loss_perm) out_dict["importance"] = np.array( diff --git a/src/hidimstat/loco.py b/src/hidimstat/loco.py index a3afd86c..a777d7c0 100644 --- a/src/hidimstat/loco.py +++ b/src/hidimstat/loco.py @@ -128,7 +128,7 @@ def predict(self, X, y): output_dict = dict() y_pred = getattr(self.estimator, self.method)(X) - loss_reference = self.loss(y_true=y, y_pred=y_pred) + loss_reference = self.loss(y, y_pred) output_dict["loss_reference"] = loss_reference output_dict["loss_loco"] = dict() @@ -183,13 +183,13 @@ def score(self, X, y): out_dict = dict() y_pred = getattr(self.estimator, self.method)(X) - loss_reference = self.loss(y_true=y, y_pred=y_pred) + loss_reference = self.loss(y, y_pred) out_dict["loss_reference"] = loss_reference y_pred_loco = self.predict(X, y) out_dict["loss_loco"] = np.array( - [self.loss(y_true=y, y_pred=y_pred_loco[j]) for j in range(self.n_groups)] + [self.loss(y, y_pred_loco[j]) for j in range(self.n_groups)] ) out_dict["importance"] = out_dict["loss_loco"] - loss_reference diff --git a/src/hidimstat/permutation_importance.py b/src/hidimstat/permutation_importance.py index 59920b6e..771fc30d 100644 --- a/src/hidimstat/permutation_importance.py +++ b/src/hidimstat/permutation_importance.py @@ -172,7 +172,7 @@ def score(self, X, y): output_dict = dict() y_pred = getattr(self.estimator, self.method)(X) - loss_reference = self.loss(y_true=y, y_pred=y_pred) + loss_reference = self.loss(y, y_pred) output_dict["loss_reference"] = loss_reference output_dict["loss_perm"] = dict() @@ -182,7 +182,7 @@ def score(self, X, y): for j, y_pred_j in enumerate(y_pred_perm): list_loss_perm = [] for y_pred_perm in y_pred_j: - list_loss_perm.append(self.loss(y_true=y, y_pred=y_pred_perm)) + list_loss_perm.append(self.loss(y, y_pred_perm)) output_dict["loss_perm"][j] = np.array(list_loss_perm) output_dict["importance"] = np.array(