diff --git a/src/cabinetry/fit/results_containers.py b/src/cabinetry/fit/results_containers.py index 9861413a..3a89df07 100644 --- a/src/cabinetry/fit/results_containers.py +++ b/src/cabinetry/fit/results_containers.py @@ -1,6 +1,6 @@ """Provides containers for inference results.""" -from typing import Dict, List, NamedTuple, Optional, Tuple +from typing import Dict, List, NamedTuple, Tuple import numpy as np @@ -24,7 +24,7 @@ class FitResults(NamedTuple): bestfit: np.ndarray uncertainty: np.ndarray labels: List[str] - types: List[Optional[str]] + types: List[List[str]] corr_mat: np.ndarray best_twice_nll: float goodness_of_fit: float = -1 diff --git a/src/cabinetry/model_utils.py b/src/cabinetry/model_utils.py index 66cadedd..e3d28dc6 100644 --- a/src/cabinetry/model_utils.py +++ b/src/cabinetry/model_utils.py @@ -518,7 +518,7 @@ def _filter_channels( def _labels_modifiers( model: pyhf.pdf.Model, -) -> Tuple[List[str], List[Optional[str]]]: +) -> Tuple[List[str], List[List[str]]]: """ """ labels = model.config.par_names() types = [] @@ -528,9 +528,9 @@ def _labels_modifiers( mod_type for par_name, mod_type in model.config.modifiers if par_name == parameter - ][:1] + ] ] * model.config.param_set(parameter).n_parameters - return labels, sum(types, []) # flatten types + return labels, types # flatten types def match_fit_results(model: pyhf.pdf.Model, fit_results: FitResults) -> FitResults: diff --git a/src/cabinetry/visualize/__init__.py b/src/cabinetry/visualize/__init__.py index 2e89712f..6b634b29 100644 --- a/src/cabinetry/visualize/__init__.py +++ b/src/cabinetry/visualize/__init__.py @@ -483,9 +483,7 @@ def pulls( # path is None if figure should not be saved figure_path = pathlib.Path(figure_folder) / "pulls.pdf" if save_figure else None labels_np = np.asarray(fit_results.labels) - numeric = np.array( - [True if ty in ["normfactor"] else False for ty in fit_results.types] - ) + numeric = np.array([bool(set(ty) & {"normfactor"}) for ty in fit_results.types]) exclude_set = _exclude_matching( fit_results, exclude=exclude, exclude_by_type=exclude_by_type diff --git a/src/cabinetry/visualize/utils.py b/src/cabinetry/visualize/utils.py index 9a451d0b..5df214ec 100644 --- a/src/cabinetry/visualize/utils.py +++ b/src/cabinetry/visualize/utils.py @@ -77,6 +77,10 @@ def _exclude_matching( exclude_by_type = ["staterror"] exclude_set.update( - [label for label, kind in zip(labels, types) if kind in exclude_by_type] + [ + label + for label, kinds in zip(labels, types) + if bool(set(kinds) & set(exclude_by_type)) + ] ) return exclude_set diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index 74a4c9db..5bf1db9d 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -98,7 +98,7 @@ def test_workspace(mock_validate, mock_build, cli_helpers, tmp_path): np.asarray([1.0]), np.asarray([0.1]), ["label"], - ["type"], + [["type"]], np.asarray([[1.0]]), 1.0, ), @@ -114,7 +114,7 @@ def test_fit(mock_utils, mock_fit, mock_pulls, mock_corrmat, tmp_path): bestfit = np.asarray([1.0]) uncertainty = np.asarray([0.1]) labels = ["label"] - types = ["type"] + types = [["type"]] corr_mat = np.asarray([[1.0]]) fit_results = fit.FitResults(bestfit, uncertainty, labels, types, corr_mat, 1.0) @@ -213,7 +213,7 @@ def test_fit(mock_utils, mock_fit, mock_pulls, mock_corrmat, tmp_path): np.asarray([1.0]), np.asarray([0.1]), ["label"], - ["type"], + [["type"]], np.asarray([[1.0]]), 1.0, ), @@ -229,7 +229,7 @@ def test_ranking(mock_utils, mock_fit, mock_rank, mock_vis, tmp_path): bestfit = np.asarray([1.0]) uncertainty = np.asarray([0.1]) labels = ["label"] - types = ["type"] + types = [["type"]] corr_mat = np.asarray([[1.0]]) fit_results = fit.FitResults(bestfit, uncertainty, labels, types, corr_mat, 1.0) @@ -482,7 +482,7 @@ def test_significance(mock_utils, mock_sig, tmp_path): np.asarray([1.0]), np.asarray([0.1]), ["label"], - ["type"], + [["type"]], np.asarray([[1.0]]), 1.0, ), @@ -528,7 +528,7 @@ def test_data_mc( np.asarray([1.0]), np.asarray([0.1]), ["label"], - ["type"], + [["type"]], np.asarray([[1.0]]), 1.0, ) diff --git a/tests/fit/test_fit.py b/tests/fit/test_fit.py index 63d15e57..b041b5f2 100644 --- a/tests/fit/test_fit.py +++ b/tests/fit/test_fit.py @@ -16,7 +16,7 @@ def test_print_results(caplog): bestfit = np.asarray([1.0, 2.0]) uncertainty = np.asarray([0.1, 0.3]) labels = ["param_A", "param_B"] - types = ["normsys", "shapesys"] + types = [["normsys"], ["shapesys"]] fit_results = fit.FitResults(bestfit, uncertainty, labels, types, np.empty(0), 0.0) fit.print_results(fit_results) @@ -144,13 +144,13 @@ def test__fit_model_custom(mock_minos, example_spec, example_spec_multibin): @mock.patch( "cabinetry.fit._fit_model_custom", return_value=fit.FitResults( - np.asarray([1.2]), np.asarray([0.2]), ["par"], ["normsys"], np.empty(0), 2.0 + np.asarray([1.2]), np.asarray([0.2]), ["par"], [["normsys"]], np.empty(0), 2.0 ), ) @mock.patch( "cabinetry.fit._fit_model_pyhf", return_value=fit.FitResults( - np.asarray([1.1]), np.asarray([0.2]), ["par"], ["normsys"], np.empty(0), 2.0 + np.asarray([1.1]), np.asarray([0.2]), ["par"], [["normsys"]], np.empty(0), 2.0 ), ) def test__fit_model(mock_pyhf, mock_custom, example_spec): @@ -296,7 +296,7 @@ def test__goodness_of_fit( @mock.patch( "cabinetry.fit._fit_model", return_value=fit.FitResults( - np.asarray([1.0]), np.asarray([0.1]), ["par"], ["normsys"], np.empty(0), 2.0 + np.asarray([1.0]), np.asarray([0.1]), ["par"], [["normsys"]], np.empty(0), 2.0 ), ) def test_fit(mock_fit, mock_print, mock_gof): @@ -386,7 +386,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 1.3]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -394,7 +394,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 0.7]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -402,7 +402,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 1.2]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -410,7 +410,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 0.8]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -419,7 +419,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 1.2]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -427,7 +427,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 0.8]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -436,7 +436,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 1.0]), np.asarray([0.3, 0.3]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -444,7 +444,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 1.3]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -452,7 +452,7 @@ def test_fit(mock_fit, mock_print, mock_gof): np.asarray([0.9, 0.7]), np.asarray([0.1, 0.1]), ["a", "b"], - ["normsys", "normsys"], + [["normsys"], ["normsys"]], np.empty(0), 0.0, ), @@ -463,7 +463,7 @@ def test_ranking(mock_fit, example_spec): bestfit = np.asarray([0.9, 1.0]) uncertainty = np.asarray([0.02, 0.1]) labels = ["staterror", "mu"] - types = ["staterror", "normfactor"] + types = [["staterror"], ["normfactor"]] fit_results = fit.FitResults(bestfit, uncertainty, labels, types, np.empty(0), 0.0) model, data = model_utils.model_and_data(example_spec) ranking_results = fit.ranking(model, data, fit_results=fit_results) diff --git a/tests/fit/test_fit_results_containers.py b/tests/fit/test_fit_results_containers.py index c9746849..d0c8feb6 100644 --- a/tests/fit/test_fit_results_containers.py +++ b/tests/fit/test_fit_results_containers.py @@ -9,7 +9,7 @@ def test_FitResults(): bestfit = np.asarray([1.0]) uncertainty = np.asarray([0.1]) labels = ["par_a"] - types = [None] + types = [[]] corr_mat = np.asarray([[1.0]]) best_twice_nll = 2.0 fit_results = fit.FitResults( @@ -109,7 +109,7 @@ def test_print_results(caplog): bestfit = np.asarray([1.0, 2.0]) uncertainty = np.asarray([0.1, 0.3]) labels = ["param_A", "param_B"] - types = [None, None] + types = [[], []] fit_results = fit.FitResults(bestfit, uncertainty, labels, types, np.empty(0), 0.0) fit.print_results(fit_results) diff --git a/tests/test_model_utils.py b/tests/test_model_utils.py index 9624f2f2..37963327 100644 --- a/tests/test_model_utils.py +++ b/tests/test_model_utils.py @@ -265,7 +265,7 @@ def test_prediction( np.asarray([1.01, 1.1]), np.asarray([0.03, 0.1]), ["staterror_Signal-Region[0]", "Signal strength"], - ["staterror", "normfactor"], + [["staterror"], ["normfactor"]], np.asarray([[1.0, 0.2], [0.2, 1.0]]), 0.0, ) @@ -299,7 +299,7 @@ def test_prediction( np.asarray([1.01, 1.1]), np.asarray([0.03, 0.1]), ["a", "b"], - ["staterror", "normfactor"], + [["staterror"], ["normfactor"]], np.asarray([[1.0, 0.2], [0.2, 1.0]]), 0.0, ) @@ -396,7 +396,7 @@ def test_match_fit_results(mock_pars, mock_uncs): np.asarray([1.0, 2.0, 3.0]), np.asarray([0.1, 0.2, 0.3]), ["par_a", "par_b", "par_c"], - [None, None, None], + [[], [], []], np.asarray([[1.0, 0.2, 0.5], [0.2, 1.0, 0.1], [0.5, 0.1, 1.0]]), 5.0, 0.1, diff --git a/tests/visualize/test_visualize.py b/tests/visualize/test_visualize.py index 714fef87..b938db38 100644 --- a/tests/visualize/test_visualize.py +++ b/tests/visualize/test_visualize.py @@ -360,7 +360,7 @@ def test_correlation_matrix(mock_draw): corr_mat = np.asarray([[1.0, 0.2, 0.1], [0.2, 1.0, 0.1], [0.1, 0.1, 1.0]]) corr_mat_pruned = np.asarray([[1.0, 0.2], [0.2, 1.0]]) labels = ["a", "b", "c"] - types = [None, None, None] + types = [[], [], []] labels_pruned = ["a", "b"] folder_path = "tmp" figure_path = pathlib.Path(folder_path) / "correlation_matrix.pdf" @@ -408,7 +408,7 @@ def test_pulls(mock_draw): bestfit = np.asarray([0.8, 1.0, 1.05, 1.1]) uncertainty = np.asarray([0.9, 1.0, 0.03, 0.7]) labels = ["a", "b", "staterror_region[0]", "c"] - types = [None, None, "staterror", None] + types = [[], [], ["staterror"], []] exclude = ["a"] folder_path = "tmp" fit_results = fit.FitResults(bestfit, uncertainty, labels, types, np.empty(0), 1.0)